diff options
| author | Rusty Russell <rusty@rustcorp.com.au> | 2002-03-04 23:04:15 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@penguin.transmeta.com> | 2002-03-04 23:04:15 -0800 |
| commit | 3d7af07825c07ddb3fbc27245ff01caae7ce764f (patch) | |
| tree | f151f055c01f6cbf1b4f0b5f6e261ed41f71743b /include | |
| parent | 09c1076ece2d7abd60356986436cc64c5253eb09 (diff) | |
[PATCH] per-cpu areas
This is the Richard Henderson-approved, cleaner, brighter per-cpu patch.
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/cache.h | 4 | ||||
| -rw-r--r-- | include/linux/compiler.h | 7 | ||||
| -rw-r--r-- | include/linux/smp.h | 16 |
3 files changed, 25 insertions, 2 deletions
diff --git a/include/linux/cache.h b/include/linux/cache.h index 14e9c0b89782..37d7a6fe4b6e 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -4,8 +4,10 @@ #include <linux/config.h> #include <asm/cache.h> +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + #ifndef L1_CACHE_ALIGN -#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) +#define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES) #endif #ifndef SMP_CACHE_BYTES diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 05205c74bc4c..cd8cca0fef8f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -13,4 +13,11 @@ #define likely(x) __builtin_expect((x),1) #define unlikely(x) __builtin_expect((x),0) +/* This macro obfuscates arithmetic on a variable address so that gcc + shouldn't recognize the original var, and make assumptions about it */ + strcpy(s, "xxx"+X) => memcpy(s, "xxx"+X, 4-X) */ +#define RELOC_HIDE(var, off) \ + ({ __typeof__(&(var)) __ptr; \ + __asm__ ("" : "=g"(__ptr) : "0"((void *)&(var) + (off))); \ + *__ptr; }) #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 43bef9087932..5290555e8101 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -11,6 +11,7 @@ #ifdef CONFIG_SMP #include <linux/kernel.h> +#include <linux/compiler.h> #include <asm/smp.h> /* @@ -71,7 +72,17 @@ extern volatile int smp_msg_id; #define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/ #define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */ -#else +#define __per_cpu_data __attribute__((section(".data.percpu"))) + +#ifndef __HAVE_ARCH_PER_CPU +extern unsigned long __per_cpu_offset[NR_CPUS]; + +/* var is in discarded region: offset to particular copy we want */ +#define per_cpu(var, cpu) RELOC_HIDE(var, per_cpu_offset(cpu)) + +#define this_cpu(var) per_cpu(var, smp_processor_id()) +#endif /* !__HAVE_ARCH_PER_CPU */ +#else /* !SMP */ /* * These macros fold the SMP functionality into a single CPU system @@ -90,6 +101,9 @@ extern volatile int smp_msg_id; #define cpu_online_map 1 static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule_all(void) { } +#define __per_cpu_data +#define per_cpu(var, cpu) var +#define this_cpu(var) var #endif #endif |
