summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@nuts.ninka.net>2003-07-20 08:52:43 -0700
committerDavid S. Miller <davem@nuts.ninka.net>2003-07-20 08:52:43 -0700
commit764cf56e6ddb2a5ed92b73cf6e0b71f611302273 (patch)
treee2a7855e76914435e3f91a16304b0b48590e919b /include
parentd7cd83673b6a522b47586cf8d09144caadcef396 (diff)
parent50692f8c6b6a27ffed9b5b7206ca6682a60ff354 (diff)
Merge nuts.ninka.net:/home/davem/src/BK/network-2.5
into nuts.ninka.net:/home/davem/src/BK/net-2.5
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/local.h118
-rw-r--r--include/asm-generic/percpu.h33
-rw-r--r--include/asm-generic/sections.h9
-rw-r--r--include/asm-i386/hw_irq.h5
-rw-r--r--include/asm-i386/local.h70
-rw-r--r--include/asm-sparc64/atomic.h23
-rw-r--r--include/asm-sparc64/local.h40
-rw-r--r--include/asm-sparc64/sections.h7
-rw-r--r--include/linux/module.h17
9 files changed, 290 insertions, 32 deletions
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
new file mode 100644
index 000000000000..45c07dbc0a59
--- /dev/null
+++ b/include/asm-generic/local.h
@@ -0,0 +1,118 @@
+#ifndef _ASM_GENERIC_LOCAL_H
+#define _ASM_GENERIC_LOCAL_H
+
+#include <linux/config.h>
+#include <linux/percpu.h>
+#include <asm/types.h>
+#include <asm/hardirq.h>
+
+/* An unsigned long type for operations which are atomic for a single
+ * CPU. Usually used in combination with per-cpu variables. */
+
+#if BITS_PER_LONG == 32 && !defined(CONFIG_SPARC32)
+/* Implement in terms of atomics. */
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct
+{
+ atomic_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_INIT(i) }
+
+#define local_read(l) ((unsigned long)atomic_read(&(l)->a))
+#define local_set(l,i) atomic_set((&(l)->a),(i))
+#define local_inc(l) atomic_inc(&(l)->a)
+#define local_dec(l) atomic_dec(&(l)->a)
+#define local_add(i,l) atomic_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_sub((i),(&(l)->a))
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local_inc(l) local_set((l), local_read(l) + 1)
+#define __local_dec(l) local_set((l), local_read(l) - 1)
+#define __local_add(i,l) local_set((l), local_read(l) + (i))
+#define __local_sub(i,l) local_set((l), local_read(l) - (i))
+
+#else /* ... can't use atomics. */
+/* Implement in terms of three variables.
+ Another option would be to use local_irq_save/restore. */
+
+typedef struct
+{
+ /* 0 = in hardirq, 1 = in softirq, 2 = usermode. */
+ unsigned long v[3];
+} local_t;
+
+#define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()])
+
+#define LOCAL_INIT(i) { { (i), 0, 0 } }
+
+static inline unsigned long local_read(local_t *l)
+{
+ return l->v[0] + l->v[1] + l->v[2];
+}
+
+static inline void local_set(local_t *l, unsigned long v)
+{
+ l->v[0] = v;
+ l->v[1] = l->v[2] = 0;
+}
+
+static inline void local_inc(local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l)++;
+ preempt_enable();
+}
+
+static inline void local_dec(local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l)--;
+ preempt_enable();
+}
+
+static inline void local_add(unsigned long v, local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l) += v;
+ preempt_enable();
+}
+
+static inline void local_sub(unsigned long v, local_t *l)
+{
+ preempt_disable();
+ _LOCAL_VAR(l) -= v;
+ preempt_enable();
+}
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local_inc(l) ((l)->v[0]++)
+#define __local_dec(l) ((l)->v[0]--)
+#define __local_add(i,l) ((l)->v[0] += (i))
+#define __local_sub(i,l) ((l)->v[0] -= (i))
+
+#endif /* Non-atomic implementation */
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable (eg. mystruct.foo), not an address.
+ */
+#define cpu_local_read(v) local_read(&__get_cpu_var(v))
+#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
+#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
+#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
+#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
+#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+/* Non-atomic increments, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well.
+ */
+#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
+#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
+#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
+#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
+
+#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index f13b44926e6a..96d909da9ae7 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,33 +9,34 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
/* Separate out the type, so (int[3], foo) works. */
#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) __typeof__(type) name##__per_cpu
+ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
/* var is in discarded region: offset to particular copy we want */
-#define per_cpu(var, cpu) (*RELOC_HIDE(&var##__per_cpu, __per_cpu_offset[cpu]))
+#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
-static inline void percpu_modcopy(void *pcpudst, const void *src,
- unsigned long size)
-{
- unsigned int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_possible(i))
- memcpy(pcpudst + __per_cpu_offset[i], src, size);
-}
+/* A macro to avoid #include hell... */
+#define percpu_modcopy(pcpudst, src, size) \
+do { \
+ unsigned int __i; \
+ for (__i = 0; __i < NR_CPUS; __i++) \
+ if (cpu_possible(__i)) \
+ memcpy((pcpudst)+__per_cpu_offset[__i], \
+ (src), (size)); \
+} while (0)
#else /* ! SMP */
#define DEFINE_PER_CPU(type, name) \
- __typeof__(type) name##__per_cpu
+ __typeof__(type) per_cpu__##name
-#define per_cpu(var, cpu) ((void)cpu, var##__per_cpu)
-#define __get_cpu_var(var) var##__per_cpu
+#define per_cpu(var, cpu) ((void)cpu, per_cpu__##var)
+#define __get_cpu_var(var) per_cpu__##var
#endif /* SMP */
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) name##__per_cpu
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var##__per_cpu)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##__per_cpu)
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index fe290e3349be..ce400f39ba25 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -3,9 +3,10 @@
/* References to section boundaries */
-extern char _text, _etext;
-extern char _data, _edata;
-extern char __bss_start;
-extern char __init_begin, __init_end;
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 30e4cfa8395b..eaee6b7cd24f 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -16,6 +16,7 @@
#include <linux/profile.h>
#include <asm/atomic.h>
#include <asm/irq.h>
+#include <asm/sections.h>
/*
* Various low-level irq details needed by irq.c, process.c,
@@ -63,8 +64,6 @@ extern unsigned long io_apic_irqs;
extern atomic_t irq_err_count;
extern atomic_t irq_mis_count;
-extern char _stext, _etext;
-
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
/*
@@ -95,7 +94,7 @@ static inline void x86_do_profile(struct pt_regs * regs)
if (!((1<<smp_processor_id()) & prof_cpu_mask))
return;
- eip -= (unsigned long) &_stext;
+ eip -= (unsigned long)_stext;
eip >>= prof_shift;
/*
* Don't ignore out-of-bounds EIP values silently,
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
new file mode 100644
index 000000000000..0177da80dde3
--- /dev/null
+++ b/include/asm-i386/local.h
@@ -0,0 +1,70 @@
+#ifndef _ARCH_I386_LOCAL_H
+#define _ARCH_I386_LOCAL_H
+
+#include <linux/percpu.h>
+
+typedef struct
+{
+ volatile unsigned long counter;
+} local_t;
+
+#define LOCAL_INIT(i) { (i) }
+
+#define local_read(v) ((v)->counter)
+#define local_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void local_inc(local_t *v)
+{
+ __asm__ __volatile__(
+ "incl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+static __inline__ void local_dec(local_t *v)
+{
+ __asm__ __volatile__(
+ "decl %0"
+ :"=m" (v->counter)
+ :"m" (v->counter));
+}
+
+static __inline__ void local_add(unsigned long i, local_t *v)
+{
+ __asm__ __volatile__(
+ "addl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+static __inline__ void local_sub(unsigned long i, local_t *v)
+{
+ __asm__ __volatile__(
+ "subl %1,%0"
+ :"=m" (v->counter)
+ :"ir" (i), "m" (v->counter));
+}
+
+/* On x86, these are no better than the atomic variants. */
+#define __local_inc(l) local_inc(l)
+#define __local_dec(l) local_dec(l)
+#define __local_add(i,l) local_add((i),(l))
+#define __local_sub(i,l) local_sub((i),(l))
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+#define cpu_local_read(v) local_read(&__get_cpu_var(v))
+#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
+#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
+#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
+#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
+#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+#define __cpu_local_inc(v) cpu_local_inc(v)
+#define __cpu_local_dec(v) cpu_local_dec(v)
+#define __cpu_local_add(i, v) cpu_local_add((i), (v))
+#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
+
+#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index fc10b924f42e..ea155897855c 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -9,25 +9,46 @@
#define __ARCH_SPARC64_ATOMIC__
typedef struct { volatile int counter; } atomic_t;
-#define ATOMIC_INIT(i) { (i) }
+typedef struct { volatile long counter; } atomic64_t;
+
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
+#define atomic64_read(v) ((v)->counter)
+
#define atomic_set(v, i) (((v)->counter) = i)
+#define atomic64_set(v, i) (((v)->counter) = i)
extern int __atomic_add(int, atomic_t *);
+extern int __atomic64_add(int, atomic64_t *);
+
extern int __atomic_sub(int, atomic_t *);
+extern int __atomic64_sub(int, atomic64_t *);
#define atomic_add(i, v) ((void)__atomic_add(i, v))
+#define atomic64_add(i, v) ((void)__atomic64_add(i, v))
+
#define atomic_sub(i, v) ((void)__atomic_sub(i, v))
+#define atomic64_sub(i, v) ((void)__atomic64_sub(i, v))
#define atomic_dec_return(v) __atomic_sub(1, v)
+#define atomic64_dec_return(v) __atomic64_sub(1, v)
+
#define atomic_inc_return(v) __atomic_add(1, v)
+#define atomic64_inc_return(v) __atomic64_add(1, v)
#define atomic_sub_and_test(i, v) (__atomic_sub(i, v) == 0)
+#define atomic64_sub_and_test(i, v) (__atomic64_sub(i, v) == 0)
+
#define atomic_dec_and_test(v) (__atomic_sub(1, v) == 0)
+#define atomic64_dec_and_test(v) (__atomic64_sub(1, v) == 0)
#define atomic_inc(v) ((void)__atomic_add(1, v))
+#define atomic64_inc(v) ((void)__atomic64_add(1, v))
+
#define atomic_dec(v) ((void)__atomic_sub(1, v))
+#define atomic64_dec(v) ((void)__atomic64_sub(1, v))
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
diff --git a/include/asm-sparc64/local.h b/include/asm-sparc64/local.h
new file mode 100644
index 000000000000..49f543a8f11e
--- /dev/null
+++ b/include/asm-sparc64/local.h
@@ -0,0 +1,40 @@
+#ifndef _ARCH_SPARC64_LOCAL_H
+#define _ARCH_SPARC64_LOCAL_H
+
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+typedef atomic64_t local_t;
+
+#define LOCAL_INIT(i) ATOMIC64_INIT(i)
+#define local_read(v) atomic64_read(v)
+#define local_set(v,i) atomic64_set(v,i)
+
+#define local_inc(v) atomic64_inc(v)
+#define local_dec(v) atomic64_inc(v)
+#define local_add(i, v) atomic64_add(i, v)
+#define local_sub(i, v) atomic64_sub(i, v)
+
+#define __local_inc(v) ((v)->counter++)
+#define __local_dec(v) ((v)->counter++)
+#define __local_add(i,v) ((v)->counter+=(i))
+#define __local_sub(i,v) ((v)->counter-=(i))
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+#define cpu_local_read(v) local_read(&__get_cpu_var(v))
+#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
+
+#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
+#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
+#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
+#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
+#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
+#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
+#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
+
+#endif /* _ARCH_SPARC64_LOCAL_H */
diff --git a/include/asm-sparc64/sections.h b/include/asm-sparc64/sections.h
new file mode 100644
index 000000000000..e6dcceabffb2
--- /dev/null
+++ b/include/asm-sparc64/sections.h
@@ -0,0 +1,7 @@
+#ifndef _SPARC64_SECTIONS_H
+#define _SPARC64_SECTIONS_H
+
+/* nothing to see, move along */
+#include <asm-generic/sections.h>
+
+#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 1d48f23a8a6a..13ff244afdbf 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -16,6 +16,7 @@
#include <linux/kmod.h>
#include <linux/elf.h>
#include <linux/stringify.h>
+#include <asm/local.h>
#include <asm/module.h>
@@ -171,7 +172,7 @@ void *__symbol_get_gpl(const char *symbol);
struct module_ref
{
- atomic_t count;
+ local_t count;
} ____cacheline_aligned;
enum module_state
@@ -276,19 +277,17 @@ struct module *module_get_kallsym(unsigned int symnum,
char *type,
char namebuf[128]);
int is_exported(const char *name, const struct module *mod);
-#ifdef CONFIG_MODULE_UNLOAD
+extern void __module_put_and_exit(struct module *mod, long code)
+ __attribute__((noreturn));
+#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
+
+#ifdef CONFIG_MODULE_UNLOAD
unsigned int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);
-/* We only need protection against local interrupts. */
-#ifndef __HAVE_ARCH_LOCAL_INC
-#define local_inc(x) atomic_inc(x)
-#define local_dec(x) atomic_dec(x)
-#endif
-
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
@@ -445,6 +444,8 @@ static inline int unregister_module_notifier(struct notifier_block * nb)
return 0;
}
+#define module_put_and_exit(code) do_exit(code)
+
#endif /* CONFIG_MODULES */
#ifdef MODULE