diff options
| author | Andrew Morton <akpm@osdl.org> | 2003-08-18 06:43:55 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@home.osdl.org> | 2003-08-18 06:43:55 -0700 |
| commit | bf8cb61f2839b8885077adae5020491ec3e7f453 (patch) | |
| tree | 2cb32a3b80f9e21e0ca613c4fa63962a57940935 /include | |
| parent | 068a96bff3db85e237643e0e719b0563c35af8ea (diff) | |
[PATCH] cpumask_t: allow more than BITS_PER_LONG CPUs
From: William Lee Irwin III <wli@holomorphy.com>
Contributions from:
Jan Dittmer <jdittmer@sfhq.hn.org>
Arnd Bergmann <arnd@arndb.de>
"Bryan O'Sullivan" <bos@serpentine.com>
"David S. Miller" <davem@redhat.com>
Badari Pulavarty <pbadari@us.ibm.com>
"Martin J. Bligh" <mbligh@aracnet.com>
Zwane Mwaikambo <zwane@linuxpower.ca>
It has ben tested on x86, sparc64, x86_64, ia64 (I think), ppc and ppc64.
cpumask_t enables systems with NR_CPUS > BITS_PER_LONG to utilize all their
cpus by creating an abstract data type dedicated to representing cpu
bitmasks, similar to fd sets from userspace, and sweeping the appropriate
code to update callers to the access API. The fd set-like structure is
according to Linus' own suggestion; the macro calling convention to ambiguate
representations with minimal code impact is my own invention.
Specifically, a new set of inline functions for manipulating arbitrary-width
bitmaps is introduced with a relatively simple implementation, in tandem with
a new data type representing bitmaps of width NR_CPUS, cpumask_t, whose
accessor functions are defined in terms of the bitmap manipulation inlines.
This bitmap ADT found an additional use in i386 arch code handling sparse
physical APIC ID's, which was convenient to use in this case as the
accounting structure was required to be wider to accommodate the physids
consumed by larger numbers of cpus.
For the sake of simplicity and low code impact, these cpu bitmasks are passed
primarily by value; however, an additional set of accessors along with an
auxiliary data type with const call-by-reference semantics is provided to
address performance concerns raised in connection with very large systems,
such as SGI's larger models, where copying and call-by-value overhead would
be prohibitive. Few (if any) users of the call-by-reference API are
immediately introduced.
Also, in order to avoid calling convention overhead on architectures where
structures are required to be passed by value, NR_CPUS <= BITS_PER_LONG is
special-cased so that cpumask_t falls back to an unsigned long and the
accessors perform the usual bit twiddling on unsigned longs as opposed to
arrays thereof. Audits were done with the structure overhead in-place,
restoring this special-casing only afterward so as to ensure a more complete
API conversion while undergoing the majority of its end-user exposure in -mm.
More -mm's were shipped after its restoration to be sure that was tested,
too.
The immediate users of this functionality are Sun sparc64 systems, SGI mips64
and ia64 systems, and IBM ia32, ppc64, and s390 systems. Of these, only the
ppc64 machines needing the functionality have yet to be released; all others
have had systems requiring it for full functionality for at least 6 months,
and in some cases, since the initial Linux port to the affected architecture.
Diffstat (limited to 'include')
57 files changed, 779 insertions, 419 deletions
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h index 29c697133046..093fcb435747 100644 --- a/include/asm-alpha/smp.h +++ b/include/asm-alpha/smp.h @@ -3,6 +3,7 @@ #include <linux/config.h> #include <linux/threads.h> +#include <linux/cpumask.h> #include <linux/bitops.h> #include <asm/pal.h> @@ -44,27 +45,12 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; #define hard_smp_processor_id() __hard_smp_processor_id() #define smp_processor_id() (current_thread_info()->cpu) -extern unsigned long cpu_present_mask; -extern volatile unsigned long cpu_online_map; +extern cpumask_t cpu_present_mask; +extern cpumask_t long cpu_online_map; extern int smp_num_cpus; -#define cpu_possible(cpu) (cpu_present_mask & (1UL << (cpu))) -#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu))) - -static inline int -num_online_cpus(void) -{ - return hweight64(cpu_online_map); -} - -extern inline int -any_online_cpu(unsigned int mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - - return -1; -} +#define cpu_possible(cpu) cpu_isset(cpu, cpu_present_mask) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) extern int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, unsigned long cpu); diff --git a/include/asm-generic/cpumask_arith.h b/include/asm-generic/cpumask_arith.h new file mode 100644 index 000000000000..bd8712d38fee --- /dev/null +++ b/include/asm-generic/cpumask_arith.h @@ -0,0 +1,43 @@ +#ifndef __ASM_GENERIC_CPUMASK_ARITH_H +#define __ASM_GENERIC_CPUMASK_ARITH_H + +/* + * Arithmetic type -based cpu bitmaps. A single unsigned long is used + * to contain the whole cpu bitmap. + */ + +#define cpu_set(cpu, map) set_bit(cpu, &(map)) +#define cpu_clear(cpu, map) clear_bit(cpu, &(map)) +#define cpu_isset(cpu, map) test_bit(cpu, &(map)) +#define cpu_test_and_set(cpu, map) test_and_set_bit(cpu, &(map)) + +#define cpus_and(dst,src1,src2) do { dst = (src1) & (src2); } while (0) +#define cpus_or(dst,src1,src2) do { dst = (src1) | (src2); } while (0) +#define cpus_clear(map) do { map = 0; } while (0) +#define cpus_complement(map) do { map = ~(map); } while (0) +#define cpus_equal(map1, map2) ((map1) == (map2)) +#define cpus_empty(map) ((map) == 0) + +#if BITS_PER_LONG == 32 +#define cpus_weight(map) hweight32(map) +#elif BITS_PER_LONG == 64 +#define cpus_weight(map) hweight64(map) +#endif + +#define cpus_shift_right(dst, src, n) do { dst = (src) >> (n); } while (0) +#define cpus_shift_left(dst, src, n) do { dst = (src) << (n); } while (0) + +#define any_online_cpu(map) ({ (map) ? first_cpu(map) : NR_CPUS; }) + +#define CPU_MASK_ALL (~((cpumask_t)0) >> (8*sizeof(cpumask_t) - NR_CPUS)) +#define CPU_MASK_NONE ((cpumask_t)0) + +/* only ever use this for things that are _never_ used on large boxen */ +#define cpus_coerce(map) ((unsigned long)(map)) +#define cpus_promote(map) ({ map; }) +#define cpumask_of_cpu(cpu) ({ ((cpumask_t)1) << (cpu); }) + +#define first_cpu(map) __ffs(map) +#define next_cpu(cpu, map) find_next_bit(&(map), NR_CPUS, cpu + 1) + +#endif /* __ASM_GENERIC_CPUMASK_ARITH_H */ diff --git a/include/asm-generic/cpumask_array.h b/include/asm-generic/cpumask_array.h new file mode 100644 index 000000000000..991a04bf7062 --- /dev/null +++ b/include/asm-generic/cpumask_array.h @@ -0,0 +1,47 @@ +#ifndef __ASM_GENERIC_CPUMASK_ARRAY_H +#define __ASM_GENERIC_CPUMASK_ARRAY_H + +/* + * Array-based cpu bitmaps. An array of unsigned longs is used to contain + * the bitmap, and then contained in a structure so it may be passed by + * value. + */ + +#define CPU_ARRAY_SIZE BITS_TO_LONGS(NR_CPUS) + +#define cpu_set(cpu, map) set_bit(cpu, (map).mask) +#define cpu_clear(cpu, map) clear_bit(cpu, (map).mask) +#define cpu_isset(cpu, map) test_bit(cpu, (map).mask) +#define cpu_test_and_set(cpu, map) test_and_set_bit(cpu, (map).mask) + +#define cpus_and(dst,src1,src2) bitmap_and((dst).mask,(src1).mask, (src2).mask, NR_CPUS) +#define cpus_or(dst,src1,src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, NR_CPUS) +#define cpus_clear(map) bitmap_clear((map).mask, NR_CPUS) +#define cpus_complement(map) bitmap_complement((map).mask, NR_CPUS) +#define cpus_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, NR_CPUS) +#define cpus_empty(map) bitmap_empty(map.mask, NR_CPUS) +#define cpus_weight(map) bitmap_weight((map).mask, NR_CPUS) +#define cpus_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, NR_CPUS) +#define cpus_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, NR_CPUS) +#define first_cpu(map) find_first_bit((map).mask, NR_CPUS) +#define next_cpu(cpu, map) find_next_bit((map).mask, NR_CPUS, cpu + 1) + +/* only ever use this for things that are _never_ used on large boxen */ +#define cpus_coerce(map) ((map).mask[0]) +#define cpus_promote(map) ({ cpumask_t __cpu_mask = CPU_MASK_NONE;\ + __cpu_mask.mask[0] = map; \ + __cpu_mask; \ + }) +#define cpumask_of_cpu(cpu) ({ cpumask_t __cpu_mask = CPU_MASK_NONE;\ + cpu_set(cpu, __cpu_mask); \ + __cpu_mask; \ + }) +#define any_online_cpu(map) find_first_bit((map).mask, NR_CPUS) + +/* + * um, these need to be usable as static initializers + */ +#define CPU_MASK_ALL { {[0 ... CPU_ARRAY_SIZE-1] = ~0UL} } +#define CPU_MASK_NONE { {[0 ... CPU_ARRAY_SIZE-1] = 0UL} } + +#endif /* __ASM_GENERIC_CPUMASK_ARRAY_H */ diff --git a/include/asm-generic/cpumask_const_reference.h b/include/asm-generic/cpumask_const_reference.h new file mode 100644 index 000000000000..e98da01bcfdf --- /dev/null +++ b/include/asm-generic/cpumask_const_reference.h @@ -0,0 +1,29 @@ +#ifndef __ASM_GENERIC_CPUMASK_CONST_REFERENCE_H +#define __ASM_GENERIC_CPUMASK_CONST_REFERENCE_H + +struct cpumask_ref { + const cpumask_t *val; +}; + +typedef const struct cpumask_ref cpumask_const_t; + +#define mk_cpumask_const(map) ((cpumask_const_t){ &(map) }) +#define cpu_isset_const(cpu, map) cpu_isset(cpu, *(map).val) + +#define cpus_and_const(dst,src1,src2) cpus_and(dst,*(src1).val,*(src2).val) +#define cpus_or_const(dst,src1,src2) cpus_or(dst,*(src1).val,*(src2).val) + +#define cpus_equal_const(map1, map2) cpus_equal(*(map1).val, *(map2).val) + +#define cpus_copy_const(map1, map2) bitmap_copy((map1).mask, (map2).val->mask, NR_CPUS) + +#define cpus_empty_const(map) cpus_empty(*(map).val) +#define cpus_weight_const(map) cpus_weight(*(map).val) +#define first_cpu_const(map) first_cpu(*(map).val) +#define next_cpu_const(cpu, map) next_cpu(cpu, *(map).val) + +/* only ever use this for things that are _never_ used on large boxen */ +#define cpus_coerce_const(map) cpus_coerce(*(map).val) +#define any_online_cpu_const(map) any_online_cpu(*(map).val) + +#endif /* __ASM_GENERIC_CPUMASK_CONST_REFERENCE_H */ diff --git a/include/asm-generic/cpumask_const_value.h b/include/asm-generic/cpumask_const_value.h new file mode 100644 index 000000000000..7a4caefa4066 --- /dev/null +++ b/include/asm-generic/cpumask_const_value.h @@ -0,0 +1,21 @@ +#ifndef __ASM_GENERIC_CPUMASK_CONST_VALUE_H +#define __ASM_GENERIC_CPUMASK_CONST_VALUE_H + +typedef const cpumask_t cpumask_const_t; + +#define mk_cpumask_const(map) ((cpumask_const_t)(map)) +#define cpu_isset_const(cpu, map) cpu_isset(cpu, map) +#define cpus_and_const(dst,src1,src2) cpus_and(dst, src1, src2) +#define cpus_or_const(dst,src1,src2) cpus_or(dst, src1, src2) +#define cpus_equal_const(map1, map2) cpus_equal(map1, map2) +#define cpus_empty_const(map) cpus_empty(map) +#define cpus_copy_const(map1, map2) do { map1 = (cpumask_t)map2; } while (0) +#define cpus_weight_const(map) cpus_weight(map) +#define first_cpu_const(map) first_cpu(map) +#define next_cpu_const(cpu, map) next_cpu(cpu, map) + +/* only ever use this for things that are _never_ used on large boxen */ +#define cpus_coerce_const(map) cpus_coerce(map) +#define any_online_cpu_const(map) any_online_cpu(map) + +#endif /* __ASM_GENERIC_CPUMASK_CONST_VALUE_H */ diff --git a/include/asm-generic/cpumask_up.h b/include/asm-generic/cpumask_up.h new file mode 100644 index 000000000000..13ef0d83816d --- /dev/null +++ b/include/asm-generic/cpumask_up.h @@ -0,0 +1,58 @@ +#ifndef __ASM_GENERIC_CPUMASK_UP_H +#define __ASM_GENERIC_CPUMASK_UP_H + +#define cpus_coerce(map) (map) + +#define cpu_set(cpu, map) do { cpus_coerce(map) = 1UL; } while (0) +#define cpu_clear(cpu, map) do { cpus_coerce(map) = 0UL; } while (0) +#define cpu_isset(cpu, map) (cpus_coerce(map) != 0UL) +#define cpu_test_and_set(cpu, map) test_and_set_bit(0, (map).mask) + +#define cpus_and(dst, src1, src2) \ + do { \ + if (cpus_coerce(src1) && cpus_coerce(src2)) \ + cpus_coerce(dst) = 1UL; \ + else \ + cpus_coerce(dst) = 0UL; \ + } while (0) + +#define cpus_or(dst, src1, src2) \ + do { \ + if (cpus_coerce(src1) || cpus_coerce(src2)) \ + cpus_coerce(dst) = 1UL; \ + else \ + cpus_coerce(dst) = 0UL; \ + } while (0) + +#define cpus_clear(map) do { cpus_coerce(map) = 0UL; } while (0) + +#define cpus_complement(map) \ + do { \ + cpus_coerce(map) = !cpus_coerce(map); \ + } while (0) + +#define cpus_equal(map1, map2) (cpus_coerce(map1) == cpus_coerce(map2)) +#define cpus_empty(map) (cpus_coerce(map) == 0UL) +#define cpus_weight(map) (cpus_coerce(map) ? 1UL : 0UL) +#define cpus_shift_right(d, s, n) do { cpus_coerce(d) = 0UL; } while (0) +#define cpus_shift_left(d, s, n) do { cpus_coerce(d) = 0UL; } while (0) +#define first_cpu(map) (cpus_coerce(map) ? 0 : 1) +#define next_cpu(cpu, map) 1 + +/* only ever use this for things that are _never_ used on large boxen */ +#define cpus_promote(map) \ + ({ \ + cpumask_t __tmp__; \ + cpus_coerce(__tmp__) = map; \ + __tmp__; \ + }) +#define cpumask_of_cpu(cpu) cpus_promote(1) +#define any_online_cpu(map) (cpus_coerce(map) ? 0 : 1) + +/* + * um, these need to be usable as static initializers + */ +#define CPU_MASK_ALL 1UL +#define CPU_MASK_NONE 0UL + +#endif /* __ASM_GENERIC_CPUMASK_UP_H */ diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index b356b373944e..61a1aece830b 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -193,7 +193,7 @@ __asm__ __volatile__(LOCK "andl %0,%1" \ #define atomic_set_mask(mask, addr) \ __asm__ __volatile__(LOCK "orl %0,%1" \ -: : "r" (mask),"m" (*addr) : "memory") +: : "r" (mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index f854ed354038..a9a123b13ba2 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h @@ -270,7 +270,7 @@ static __inline__ int variable_test_bit(int nr, const volatile unsigned long * a * Returns the bit-number of the first zero bit, not the number of the byte * containing a bit. */ -static __inline__ int find_first_zero_bit(unsigned long * addr, unsigned size) +static __inline__ int find_first_zero_bit(const unsigned long *addr, unsigned size) { int d0, d1, d2; int res; @@ -302,7 +302,7 @@ static __inline__ int find_first_zero_bit(unsigned long * addr, unsigned size) * Returns the bit-number of the first set bit, not the number of the byte * containing a bit. */ -static __inline__ int find_first_bit(unsigned long * addr, unsigned size) +static __inline__ int find_first_bit(const unsigned long *addr, unsigned size) { int d0, d1; int res; @@ -328,7 +328,7 @@ static __inline__ int find_first_bit(unsigned long * addr, unsigned size) * @offset: The bitnumber to start searching at * @size: The maximum size to search */ -static __inline__ int find_next_zero_bit(unsigned long * addr, int size, int offset) +static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset) { unsigned long * p = ((unsigned long *) addr) + (offset >> 5); int set = 0, bit = offset & 31, res; @@ -361,9 +361,9 @@ static __inline__ int find_next_zero_bit(unsigned long * addr, int size, int off * @offset: The bitnumber to start searching at * @size: The maximum size to search */ -static __inline__ int find_next_bit(unsigned long *addr, int size, int offset) +static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset) { - unsigned long * p = addr + (offset >> 5); + const unsigned long *p = addr + (offset >> 5); int set = 0, bit = offset & 31, res; if (bit) { @@ -430,7 +430,7 @@ static __inline__ unsigned long __ffs(unsigned long word) * unlikely to be set. It's guaranteed that at least one of the 140 * bits is cleared. */ -static inline int sched_find_first_bit(unsigned long *b) +static inline int sched_find_first_bit(const unsigned long *b) { if (unlikely(b[0])) return __ffs(b[0]); diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h index 9b85a2259b4a..cd4b708133c3 100644 --- a/include/asm-i386/genapic.h +++ b/include/asm-i386/genapic.h @@ -1,13 +1,13 @@ #ifndef _ASM_GENAPIC_H #define _ASM_GENAPIC_H 1 -/* +/* * Generic APIC driver interface. - * - * An straight forward mapping of the APIC related parts of the + * + * An straight forward mapping of the APIC related parts of the * x86 subarchitecture interface to a dynamic object. - * - * This is used by the "generic" x86 subarchitecture. + * + * This is used by the "generic" x86 subarchitecture. * * Copyright 2003 Andi Kleen, SuSE Labs. */ @@ -22,23 +22,23 @@ struct genapic { int (*probe)(void); int (*apic_id_registered)(void); - unsigned long (*target_cpus)(void); + cpumask_t (*target_cpus)(void); int int_delivery_mode; int int_dest_mode; int apic_broadcast_id; int esr_disable; - unsigned long (*check_apicid_used)(unsigned long bitmap, int apicid); + unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); unsigned long (*check_apicid_present)(int apicid); int no_balance_irq; void (*init_apic_ldr)(void); - unsigned long (*ioapic_phys_id_map)(unsigned long map); + physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); void (*clustered_apic_check)(void); int (*multi_timer_check)(int apic, int irq); int (*apicid_to_node)(int logical_apicid); int (*cpu_to_logical_apicid)(int cpu); int (*cpu_present_to_apicid)(int mps_cpu); - unsigned long (*apicid_to_cpu_present)(int phys_apicid); + physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); int (*mpc_apic_id)(struct mpc_config_processor *m, struct mpc_config_translation *t); void (*setup_portio_remap)(void); @@ -59,11 +59,11 @@ struct genapic { int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); unsigned (*get_apic_id)(unsigned long x); - unsigned long apic_id_mask; - unsigned int (*cpu_mask_to_apicid)(unsigned long cpumask); + unsigned long apic_id_mask; + unsigned int (*cpu_mask_to_apicid)(cpumask_const_t cpumask); /* ipi */ - void (*send_IPI_mask)(int mask, int vector); + void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); }; diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h index a143e85d521e..207fc9c5f43d 100644 --- a/include/asm-i386/highmem.h +++ b/include/asm-i386/highmem.h @@ -22,6 +22,7 @@ #include <linux/config.h> #include <linux/interrupt.h> +#include <linux/threads.h> #include <asm/kmap_types.h> #include <asm/tlbflush.h> @@ -39,7 +40,11 @@ extern void kmap_init(void); * easily, subsequent pte tables have to be allocated in one physical * chunk of RAM. */ +#if NR_CPUS <= 32 #define PKMAP_BASE (0xff800000UL) +#else +#define PKMAP_BASE (0xff600000UL) +#endif #ifdef CONFIG_X86_PAE #define LAST_PKMAP 512 #else diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h index eaee6b7cd24f..0ed5a111d7ca 100644 --- a/include/asm-i386/hw_irq.h +++ b/include/asm-i386/hw_irq.h @@ -31,33 +31,33 @@ extern int irq_vector[NR_IRQS]; extern void (*interrupt[NR_IRQS])(void); #ifdef CONFIG_SMP -extern asmlinkage void reschedule_interrupt(void); -extern asmlinkage void invalidate_interrupt(void); -extern asmlinkage void call_function_interrupt(void); +asmlinkage void reschedule_interrupt(void); +asmlinkage void invalidate_interrupt(void); +asmlinkage void call_function_interrupt(void); #endif #ifdef CONFIG_X86_LOCAL_APIC -extern asmlinkage void apic_timer_interrupt(void); -extern asmlinkage void error_interrupt(void); -extern asmlinkage void spurious_interrupt(void); -extern asmlinkage void thermal_interrupt(struct pt_regs); +asmlinkage void apic_timer_interrupt(void); +asmlinkage void error_interrupt(void); +asmlinkage void spurious_interrupt(void); +asmlinkage void thermal_interrupt(struct pt_regs); #endif -extern void mask_irq(unsigned int irq); -extern void unmask_irq(unsigned int irq); -extern void disable_8259A_irq(unsigned int irq); -extern void enable_8259A_irq(unsigned int irq); -extern int i8259A_irq_pending(unsigned int irq); -extern void make_8259A_irq(unsigned int irq); -extern void init_8259A(int aeoi); -extern void FASTCALL(send_IPI_self(int vector)); -extern void init_VISWS_APIC_irqs(void); -extern void setup_IO_APIC(void); -extern void disable_IO_APIC(void); -extern void print_IO_APIC(void); -extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); -extern void send_IPI(int dest, int vector); -extern void setup_ioapic_dest(unsigned long mask); +void mask_irq(unsigned int irq); +void unmask_irq(unsigned int irq); +void disable_8259A_irq(unsigned int irq); +void enable_8259A_irq(unsigned int irq); +int i8259A_irq_pending(unsigned int irq); +void make_8259A_irq(unsigned int irq); +void init_8259A(int aeoi); +void FASTCALL(send_IPI_self(int vector)); +void init_VISWS_APIC_irqs(void); +void setup_IO_APIC(void); +void disable_IO_APIC(void); +void print_IO_APIC(void); +int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); +void send_IPI(int dest, int vector); +void setup_ioapic_dest(cpumask_t mask); extern unsigned long io_apic_irqs; diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h index c00889f9e196..a63520b95a36 100644 --- a/include/asm-i386/mach-bigsmp/mach_apic.h +++ b/include/asm-i386/mach-bigsmp/mach_apic.h @@ -20,7 +20,7 @@ static inline int apic_id_registered(void) } #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline unsigned long target_cpus(void) +static inline cpumask_t target_cpus(void) { return cpu_online_map; } @@ -29,14 +29,15 @@ static inline unsigned long target_cpus(void) #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ -#define APIC_BROADCAST_ID (0x0f) -static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) -{ +#define APIC_BROADCAST_ID (0xff) +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +{ return 0; -} +} + static inline unsigned long check_apicid_present(int bit) { - return (phys_cpu_present_map & (1 << bit)); + return physid_isset(bit, phys_cpu_present_map); } #define apicid_cluster(apicid) (apicid & 0xF0) @@ -88,12 +89,12 @@ static inline int cpu_present_to_apicid(int mps_cpu) return (int) bios_cpu_apicid[mps_cpu]; } -static inline unsigned long apicid_to_cpu_present(int phys_apicid) +static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) { - return (1ul << phys_apicid); + return physid_mask_of_physid(phys_apicid); } -extern volatile u8 cpu_2_logical_apicid[]; +extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { @@ -108,13 +109,13 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, m->mpc_apicver); - return (m->mpc_apicid); + return m->mpc_apicid; } -static inline ulong ioapic_phys_id_map(ulong phys_map) +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ - return (0x0F); + return physids_promote(0xFUL); } #define WAKE_SECONDARY_VIA_INIT @@ -132,25 +133,25 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) return (1); } -static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask) +static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; - num_bits_set = hweight32(cpumask); + num_bits_set = cpus_weight_const(cpumask); /* Return id to all */ - if (num_bits_set == 32) + if (num_bits_set == NR_CPUS) return (int) 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = ffs(cpumask)-1; + cpu = first_cpu_const(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpumask & (1 << cpu)) { + if (cpu_isset_const(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ diff --git a/include/asm-i386/mach-bigsmp/mach_ipi.h b/include/asm-i386/mach-bigsmp/mach_ipi.h index 87bd9fc4add8..4cb4ba486362 100644 --- a/include/asm-i386/mach-bigsmp/mach_ipi.h +++ b/include/asm-i386/mach-bigsmp/mach_ipi.h @@ -1,18 +1,19 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -inline void send_IPI_mask_sequence(int mask, int vector); +inline void send_IPI_mask_sequence(cpumask_t mask, int vector); -static inline void send_IPI_mask(int mask, int vector) +static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { - unsigned long mask = cpu_online_map & ~(1 << smp_processor_id()); + cpumask_t mask = cpu_online_map; + cpu_clear(smp_processor_id(), mask); - if (mask) + if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } diff --git a/include/asm-i386/mach-default/mach_apic.h b/include/asm-i386/mach-default/mach_apic.h index 12edf5ed9c58..a96477cf10a0 100644 --- a/include/asm-i386/mach-default/mach_apic.h +++ b/include/asm-i386/mach-default/mach_apic.h @@ -5,12 +5,12 @@ #define APIC_DFR_VALUE (APIC_DFR_FLAT) -static inline unsigned long target_cpus(void) +static inline cpumask_t target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_map; #else - return 1; + return cpumask_of_cpu(0); #endif } #define TARGET_CPUS (target_cpus()) @@ -21,16 +21,20 @@ static inline unsigned long target_cpus(void) #define INT_DELIVERY_MODE dest_LowestPrio #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ +/* + * this isn't really broadcast, just a (potentially inaccurate) upper + * bound for valid physical APIC id's + */ #define APIC_BROADCAST_ID 0x0F -static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) -{ - return (bitmap & (1UL << apicid)); -} +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long check_apicid_present(int bit) { - return (phys_cpu_present_map & (1UL << bit)); + return physid_isset(bit, phys_cpu_present_map); } /* @@ -50,7 +54,7 @@ static inline void init_apic_ldr(void) apic_write_around(APIC_LDR, val); } -static inline unsigned long ioapic_phys_id_map(unsigned long phys_map) +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { return phys_map; } @@ -82,9 +86,9 @@ static inline int cpu_present_to_apicid(int mps_cpu) return mps_cpu; } -static inline unsigned long apicid_to_cpu_present(int phys_apicid) +static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) { - return (1ul << phys_apicid); + return physid_mask_of_physid(phys_apicid); } static inline int mpc_apic_id(struct mpc_config_processor *m, @@ -104,18 +108,17 @@ static inline void setup_portio_remap(void) static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { - return test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map); + return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); } static inline int apic_id_registered(void) { - return (test_bit(GET_APIC_ID(apic_read(APIC_ID)), - &phys_cpu_present_map)); + return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); } -static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask) +static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) { - return cpumask; + return cpus_coerce_const(cpumask); } static inline void enable_apic_mode(void) diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h index 43d78f307db5..d41949849fa8 100644 --- a/include/asm-i386/mach-default/mach_ipi.h +++ b/include/asm-i386/mach-default/mach_ipi.h @@ -1,10 +1,10 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -inline void send_IPI_mask_bitmask(int mask, int vector); +inline void send_IPI_mask_bitmask(cpumask_t mask, int vector); inline void __send_IPI_shortcut(unsigned int shortcut, int vector); -static inline void send_IPI_mask(int mask, int vector) +static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_bitmask(mask, vector); } diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h index e9414e7f3154..f83d03b0458f 100644 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ b/include/asm-i386/mach-es7000/mach_apic.h @@ -11,12 +11,12 @@ static inline int apic_id_registered(void) return (1); } -static inline unsigned long target_cpus(void) +static inline cpumask_t target_cpus(void) { #if defined CONFIG_ES7000_CLUSTERED_APIC - return (0xff); + return CPU_MASK_ALL; #else - return (bios_cpu_apicid[smp_processor_id()]); + return cpumask_of_cpu(bios_cpu_apicid[smp_processor_id()]); #endif } #define TARGET_CPUS (target_cpus()) @@ -40,13 +40,13 @@ static inline unsigned long target_cpus(void) #define APIC_BROADCAST_ID (0xff) -static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } static inline unsigned long check_apicid_present(int bit) { - return (phys_cpu_present_map & (1 << bit)); + return physid_isset(bit, phys_cpu_present_map); } #define apicid_cluster(apicid) (apicid & 0xF0) @@ -88,7 +88,7 @@ static inline void clustered_apic_check(void) int apic = bios_cpu_apicid[smp_processor_id()]; printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? - "Physical Cluster" : "Logical Cluster", nr_ioapics, TARGET_CPUS); + "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_coerce(TARGET_CPUS)); } static inline int multi_timer_check(int apic, int irq) @@ -110,20 +110,23 @@ static inline int cpu_present_to_apicid(int mps_cpu) return (int) bios_cpu_apicid[mps_cpu]; } -static inline unsigned long apicid_to_cpu_present(int phys_apicid) +static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) { - static int cpu = 0; - return (1ul << cpu++); + static int id = 0; + physid_mask_t mask; + mask = physid_mask_of_physid(id); + ++id; + return mask; } -extern volatile u8 cpu_2_logical_apicid[]; +extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { return (int)cpu_2_logical_apicid[cpu]; } -static inline int mpc_apic_id(struct mpc_config_processor *m, int quad) +static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) { printk("Processor #%d %ld:%ld APIC version %d\n", m->mpc_apicid, @@ -133,10 +136,10 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, int quad) return (m->mpc_apicid); } -static inline ulong ioapic_phys_id_map(ulong phys_map) +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ - return (0xff); + return physids_promote(0xff); } @@ -151,32 +154,30 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) return (1); } -static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask) +static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; - if (cpumask == TARGET_CPUS) - return cpumask; - num_bits_set = hweight32(cpumask); + num_bits_set = cpus_weight_const(cpumask); /* Return id to all */ - if (num_bits_set == 32) - return TARGET_CPUS; + if (num_bits_set == NR_CPUS) + return 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = ffs(cpumask)-1; + cpu = first_cpu_const(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpumask & (1 << cpu)) { + if (cpu_isset_const(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ printk ("%s: Not a valid mask!\n",__FUNCTION__); - return TARGET_CPUS; + return 0xFF; } apicid = new_apicid; cpus_found++; diff --git a/include/asm-i386/mach-es7000/mach_ipi.h b/include/asm-i386/mach-es7000/mach_ipi.h index e0866134e624..979ac68643ba 100644 --- a/include/asm-i386/mach-es7000/mach_ipi.h +++ b/include/asm-i386/mach-es7000/mach_ipi.h @@ -1,18 +1,19 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -static inline void send_IPI_mask_sequence(int mask, int vector); +static inline void send_IPI_mask_sequence(cpumask_t mask, int vector); -static inline void send_IPI_mask(int mask, int vector) +static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { - unsigned long mask = cpu_online_map & ~(1 << smp_processor_id()); - - if (mask) + cpumask_t mask = cpumask_of_cpu(smp_processor_id()); + cpus_complement(mask); + cpus_and(mask, mask, cpu_online_map); + if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h index 7a0bf41b4db1..ceca92723c0e 100644 --- a/include/asm-i386/mach-numaq/mach_apic.h +++ b/include/asm-i386/mach-numaq/mach_apic.h @@ -6,7 +6,13 @@ #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -#define TARGET_CPUS (~0UL) +static inline cpumask_t target_cpus(void) +{ + cpumask_t tmp = CPU_MASK_ALL; + return tmp; +} + +#define TARGET_CPUS (target_cpus()) #define NO_BALANCE_IRQ (1) #define esr_disable (1) @@ -15,13 +21,13 @@ #define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ #define APIC_BROADCAST_ID 0x0F -#define check_apicid_used(bitmap, apicid) ((bitmap) & (1 << (apicid))) -#define check_apicid_present(bit) (phys_cpu_present_map & (1 << bit)) +#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) +#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) #define apicid_cluster(apicid) (apicid & 0xF0) static inline int apic_id_registered(void) { - return (1); + return 1; } static inline void init_apic_ldr(void) @@ -41,17 +47,17 @@ static inline void clustered_apic_check(void) */ static inline int multi_timer_check(int apic, int irq) { - return (apic != 0 && irq == 0); + return apic != 0 && irq == 0; } -static inline ulong ioapic_phys_id_map(ulong phys_map) +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* We don't have a good way to do this yet - hack */ - return 0xf; + return physids_promote(0xFUL); } /* Mapping from cpu number to logical apicid */ -extern volatile u8 cpu_2_logical_apicid[]; +extern u8 cpu_2_logical_apicid[]; static inline int cpu_to_logical_apicid(int cpu) { return (int)cpu_2_logical_apicid[cpu]; @@ -59,22 +65,25 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { - return ( ((mps_cpu/4)*16) + (1<<(mps_cpu%4)) ); + return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); } static inline int generate_logical_apicid(int quad, int phys_apicid) { - return ( (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1) ); + return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); } static inline int apicid_to_node(int logical_apicid) { - return (logical_apicid >> 4); + return logical_apicid >> 4; } -static inline unsigned long apicid_to_cpu_present(int logical_apicid) +static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) { - return ( (logical_apicid&0xf) << (4*apicid_to_node(logical_apicid)) ); + int node = apicid_to_node(logical_apicid); + int cpu = __ffs(logical_apicid & 0xf); + + return physid_mask_of_physid(cpu + 4*node); } static inline int mpc_apic_id(struct mpc_config_processor *m, @@ -115,7 +124,7 @@ static inline void enable_apic_mode(void) * We use physical apicids here, not logical, so just return the default * physical broadcast to stop people from breaking us */ -static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask) +static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) { return (int) 0xF; } diff --git a/include/asm-i386/mach-numaq/mach_ipi.h b/include/asm-i386/mach-numaq/mach_ipi.h index e0866134e624..29aabcd99524 100644 --- a/include/asm-i386/mach-numaq/mach_ipi.h +++ b/include/asm-i386/mach-numaq/mach_ipi.h @@ -1,18 +1,19 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -static inline void send_IPI_mask_sequence(int mask, int vector); +static inline void send_IPI_mask_sequence(cpumask_t, int vector); -static inline void send_IPI_mask(int mask, int vector) +static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { - unsigned long mask = cpu_online_map & ~(1 << smp_processor_id()); + cpumask_t mask = cpu_online_map; + cpu_clear(smp_processor_id(), mask); - if (mask) + if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index 624db3a2d9ba..2247c7adca3d 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h @@ -18,17 +18,18 @@ static inline unsigned long xapic_phys_to_log_apicid(int phys_apic) #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline unsigned long target_cpus(void) +static inline cpumask_t target_cpus(void) { - return (~0UL); + cpumask_t tmp = CPU_MASK_ALL; + return tmp; } #define TARGET_CPUS (target_cpus()) #define INT_DELIVERY_MODE (dest_Fixed) #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ -#define APIC_BROADCAST_ID (0x0F) -static inline unsigned long check_apicid_used(unsigned long bitmap, int apicid) +#define APIC_BROADCAST_ID (0xFF) +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { return 0; } @@ -72,11 +73,11 @@ static inline void clustered_apic_check(void) static inline int apicid_to_node(int logical_apicid) { - return (logical_apicid >> 5); /* 2 clusterids per CEC */ + return logical_apicid >> 5; /* 2 clusterids per CEC */ } /* Mapping from cpu number to logical apicid */ -extern volatile u8 cpu_2_logical_apicid[]; +extern u8 cpu_2_logical_apicid[]; static inline int cpu_to_logical_apicid(int cpu) { return (int)cpu_2_logical_apicid[cpu]; @@ -87,15 +88,15 @@ static inline int cpu_present_to_apicid(int mps_cpu) return (int) bios_cpu_apicid[mps_cpu]; } -static inline ulong ioapic_phys_id_map(ulong phys_map) +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) { /* For clustered we don't have a good way to do this yet - hack */ - return 0x0F; + return physids_promote(0x0F); } -static inline unsigned long apicid_to_cpu_present(int apicid) +static inline physid_mask_t apicid_to_cpu_present(int apicid) { - return 1; + return physid_mask_of_physid(0); } static inline int mpc_apic_id(struct mpc_config_processor *m, @@ -122,25 +123,25 @@ static inline void enable_apic_mode(void) { } -static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask) +static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; - num_bits_set = hweight32(cpumask); + num_bits_set = cpus_weight_const(cpumask); /* Return id to all */ - if (num_bits_set == 32) + if (num_bits_set == NR_CPUS) return (int) 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = ffs(cpumask)-1; + cpu = first_cpu_const(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpumask & (1 << cpu)) { + if (cpu_isset_const(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ diff --git a/include/asm-i386/mach-summit/mach_ipi.h b/include/asm-i386/mach-summit/mach_ipi.h index 87bd9fc4add8..4cb4ba486362 100644 --- a/include/asm-i386/mach-summit/mach_ipi.h +++ b/include/asm-i386/mach-summit/mach_ipi.h @@ -1,18 +1,19 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -inline void send_IPI_mask_sequence(int mask, int vector); +inline void send_IPI_mask_sequence(cpumask_t mask, int vector); -static inline void send_IPI_mask(int mask, int vector) +static inline void send_IPI_mask(cpumask_t mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { - unsigned long mask = cpu_online_map & ~(1 << smp_processor_id()); + cpumask_t mask = cpu_online_map; + cpu_clear(smp_processor_id(), mask); - if (mask) + if (!cpus_empty(mask)) send_IPI_mask(mask, vector); } diff --git a/include/asm-i386/mach-visws/mach_apic.h b/include/asm-i386/mach-visws/mach_apic.h index 0a66346ac13c..2376c50ba6e9 100644 --- a/include/asm-i386/mach-visws/mach_apic.h +++ b/include/asm-i386/mach-visws/mach_apic.h @@ -12,17 +12,16 @@ #ifdef CONFIG_SMP #define TARGET_CPUS cpu_online_map #else - #define TARGET_CPUS 0x01 + #define TARGET_CPUS cpumask_of_cpu(0) #endif #define APIC_BROADCAST_ID 0x0F -#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid)) -#define check_apicid_present(bit) (phys_cpu_present_map & (1 << bit)) +#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) +#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) static inline int apic_id_registered(void) { - return (test_bit(GET_APIC_ID(apic_read(APIC_ID)), - &phys_cpu_present_map)); + return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); } /* @@ -61,9 +60,9 @@ static inline int cpu_present_to_apicid(int mps_cpu) return mps_cpu; } -static inline unsigned long apicid_to_cpu_present(int apicid) +static inline physid_mask_t apicid_to_cpu_present(int apicid) { - return (1ul << apicid); + return physid_mask_of_physid(apicid); } #define WAKE_SECONDARY_VIA_INIT @@ -78,11 +77,11 @@ static inline void enable_apic_mode(void) static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) { - return test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map); + return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); } -static inline unsigned int cpu_mask_to_apicid (unsigned long cpumask) +static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) { - return cpumask; + return cpus_coerce_const(cpumask); } #endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h index 938fc1364344..99cff65069f5 100644 --- a/include/asm-i386/mmu_context.h +++ b/include/asm-i386/mmu_context.h @@ -31,12 +31,12 @@ static inline void switch_mm(struct mm_struct *prev, if (likely(prev != next)) { /* stop flush ipis for the previous mm */ - clear_bit(cpu, &prev->cpu_vm_mask); + cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP cpu_tlbstate[cpu].state = TLBSTATE_OK; cpu_tlbstate[cpu].active_mm = next; #endif - set_bit(cpu, &next->cpu_vm_mask); + cpu_set(cpu, next->cpu_vm_mask); /* Re-load page tables */ load_cr3(next->pgd); @@ -52,7 +52,7 @@ static inline void switch_mm(struct mm_struct *prev, cpu_tlbstate[cpu].state = TLBSTATE_OK; BUG_ON(cpu_tlbstate[cpu].active_mm != next); - if (!test_and_set_bit(cpu, &next->cpu_vm_mask)) { + if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload %cr3. */ diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h index 816712955656..b596438496a1 100644 --- a/include/asm-i386/mpspec.h +++ b/include/asm-i386/mpspec.h @@ -1,6 +1,7 @@ #ifndef __ASM_MPSPEC_H #define __ASM_MPSPEC_H +#include <linux/cpumask.h> #include <asm/mpspec_def.h> #include <mach_mpspec.h> @@ -11,7 +12,6 @@ extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; -extern unsigned long phys_cpu_present_map; extern int smp_found_config; extern void find_smp_config (void); extern void get_smp_config (void); @@ -41,5 +41,49 @@ extern void mp_config_ioapic_for_sci(int irq); extern void mp_parse_prt (void); #endif /*CONFIG_ACPI_BOOT*/ +#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) + +struct physid_mask +{ + unsigned long mask[PHYSID_ARRAY_SIZE]; +}; + +typedef struct physid_mask physid_mask_t; + +#define physid_set(physid, map) set_bit(physid, (map).mask) +#define physid_clear(physid, map) clear_bit(physid, (map).mask) +#define physid_isset(physid, map) test_bit(physid, (map).mask) +#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) + +#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) +#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) +#define physids_clear(map) bitmap_clear((map).mask, MAX_APICS) +#define physids_complement(map) bitmap_complement((map).mask, MAX_APICS) +#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) +#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) +#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) +#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) +#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) +#define physids_coerce(map) ((map).mask[0]) + +#define physids_promote(physids) \ + ({ \ + physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ + __physid_mask.mask[0] = physids; \ + __physid_mask; \ + }) + +#define physid_mask_of_physid(physid) \ + ({ \ + physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ + physid_set(physid, __physid_mask); \ + __physid_mask; \ + }) + +#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } +#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } + +extern physid_mask_t phys_cpu_present_map; + #endif diff --git a/include/asm-i386/numaq.h b/include/asm-i386/numaq.h index a6121a46e963..de52624c6e0b 100644 --- a/include/asm-i386/numaq.h +++ b/include/asm-i386/numaq.h @@ -28,7 +28,7 @@ #ifdef CONFIG_X86_NUMAQ -#define MAX_NUMNODES 8 +#define MAX_NUMNODES 16 extern void get_memcfg_numaq(void); #define get_memcfg_numa() get_memcfg_numaq() @@ -159,7 +159,7 @@ struct sys_cfg_data { static inline unsigned long *get_zholes_size(int nid) { - return 0; + return NULL; } #endif /* CONFIG_X86_NUMAQ */ #endif /* NUMAQ_H */ diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index ea83e1dc8660..9bca31a68fd4 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -8,6 +8,7 @@ #include <linux/config.h> #include <linux/kernel.h> #include <linux/threads.h> +#include <linux/cpumask.h> #endif #ifdef CONFIG_X86_LOCAL_APIC @@ -31,9 +32,7 @@ */ extern void smp_alloc_memory(void); -extern unsigned long phys_cpu_present_map; -extern unsigned long cpu_online_map; -extern volatile unsigned long smp_invalidate_needed; +extern physid_mask_t phys_cpu_present_map; extern int pic_mode; extern int smp_num_siblings; extern int cpu_sibling_map[]; @@ -54,37 +53,19 @@ extern void zap_low_mappings (void); */ #define smp_processor_id() (current_thread_info()->cpu) -extern volatile unsigned long cpu_callout_map; +extern cpumask_t cpu_callout_map; -#define cpu_possible(cpu) (cpu_callout_map & (1<<(cpu))) -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) - -#define for_each_cpu(cpu, mask) \ - for(mask = cpu_online_map; \ - cpu = __ffs(mask), mask != 0; \ - mask &= ~(1<<cpu)) - -extern inline unsigned int num_online_cpus(void) -{ - return hweight32(cpu_online_map); -} +#define cpu_possible(cpu) cpu_isset(cpu, cpu_callout_map) /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { - return hweight32(cpu_callout_map); + return cpus_weight(cpu_callout_map); } extern void map_cpu_to_logical_apicid(void); extern void unmap_cpu_to_logical_apicid(int cpu); -extern inline unsigned int any_online_cpu(unsigned int mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - - return NR_CPUS; -} #ifdef CONFIG_X86_LOCAL_APIC #ifdef APIC_DEFINITION diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index db289bc5bcb1..9190a4f3408f 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -31,9 +31,11 @@ #include <asm/mpspec.h> +#include <linux/cpumask.h> + /* Mappings between logical cpu number and node number */ -extern volatile unsigned long node_2_cpu_mask[]; -extern volatile int cpu_2_node[]; +extern cpumask_t node_2_cpu_mask[]; +extern int cpu_2_node[]; /* Returns the number of the node containing CPU 'cpu' */ static inline int cpu_to_node(int cpu) @@ -49,7 +51,7 @@ static inline int cpu_to_node(int cpu) #define parent_node(node) (node) /* Returns a bitmask of CPUs on Node 'node'. */ -static inline unsigned long node_to_cpumask(int node) +static inline cpumask_t node_to_cpumask(int node) { return node_2_cpu_mask[node]; } @@ -57,14 +59,15 @@ static inline unsigned long node_to_cpumask(int node) /* Returns the number of the first CPU on Node 'node'. */ static inline int node_to_first_cpu(int node) { - return __ffs(node_to_cpumask(node)); + cpumask_t mask = node_to_cpumask(node); + return first_cpu(mask); } /* Returns the number of the first MemBlk on Node 'node' */ #define node_to_memblk(node) (node) /* Returns the number of the node containing PCI bus 'bus' */ -static inline unsigned long pcibus_to_cpumask(int bus) +static inline cpumask_t pcibus_to_cpumask(int bus) { return node_to_cpumask(mp_bus_id_to_node[bus]); } diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index b133b67609eb..af15c6694522 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -409,7 +409,7 @@ found_middle: * Find next bit in a bitmap reasonably efficiently.. */ static inline int -find_next_bit (void *addr, unsigned long size, unsigned long offset) +find_next_bit(const void *addr, unsigned long size, unsigned long offset) { unsigned long *p = ((unsigned long *) addr) + (offset >> 6); unsigned long result = offset & ~63UL; diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 0b9a1253845c..0f114d98c2ee 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/threads.h> #include <linux/kernel.h> +#include <linux/cpumask.h> #include <asm/bitops.h> #include <asm/io.h> @@ -37,8 +38,8 @@ extern struct smp_boot_data { extern char no_int_routing __initdata; -extern unsigned long phys_cpu_present_map; -extern volatile unsigned long cpu_online_map; +extern cpumask_t phys_cpu_present_map; +extern cpumask_t cpu_online_map; extern unsigned long ipi_base_addr; extern unsigned char smp_int_redirect; @@ -47,22 +48,7 @@ extern volatile int ia64_cpu_to_sapicid[]; extern unsigned long ap_wakeup_vector; -#define cpu_possible(cpu) (phys_cpu_present_map & (1UL << (cpu))) -#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu))) - -static inline unsigned int -num_online_cpus (void) -{ - return hweight64(cpu_online_map); -} - -static inline unsigned int -any_online_cpu (unsigned int mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - return NR_CPUS; -} +#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map) /* * Function to map hard smp processor id to logical id. Slow, so don't use this in diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index f2cc964ff429..1c7e4eea094d 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h @@ -17,6 +17,7 @@ #include <linux/bitops.h> #include <linux/threads.h> +#include <linux/cpumask.h> #include <asm/atomic.h> #define smp_processor_id() (current_thread_info()->cpu) @@ -45,56 +46,22 @@ extern struct call_data_struct *call_data; #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ #define SMP_CALL_FUNCTION 0x2 -#if (NR_CPUS <= _MIPS_SZLONG) - -typedef unsigned long cpumask_t; - -#define CPUMASK_CLRALL(p) (p) = 0 -#define CPUMASK_SETB(p, bit) (p) |= 1UL << (bit) -#define CPUMASK_CLRB(p, bit) (p) &= ~(1UL << (bit)) -#define CPUMASK_TSTB(p, bit) ((p) & (1UL << (bit))) - -#elif (NR_CPUS <= 128) - -/* - * The foll should work till 128 cpus. - */ -#define CPUMASK_SIZE (NR_CPUS/_MIPS_SZLONG) -#define CPUMASK_INDEX(bit) ((bit) >> 6) -#define CPUMASK_SHFT(bit) ((bit) & 0x3f) - -typedef struct { - unsigned long _bits[CPUMASK_SIZE]; -} cpumask_t; - -#define CPUMASK_CLRALL(p) (p)._bits[0] = 0, (p)._bits[1] = 0 -#define CPUMASK_SETB(p, bit) (p)._bits[CPUMASK_INDEX(bit)] |= \ - (1UL << CPUMASK_SHFT(bit)) -#define CPUMASK_CLRB(p, bit) (p)._bits[CPUMASK_INDEX(bit)] &= \ - ~(1UL << CPUMASK_SHFT(bit)) -#define CPUMASK_TSTB(p, bit) ((p)._bits[CPUMASK_INDEX(bit)] & \ - (1UL << CPUMASK_SHFT(bit))) - -#else -#error cpumask macros only defined for 128p kernels -#endif - extern cpumask_t phys_cpu_present_map; extern cpumask_t cpu_online_map; -#define cpu_possible(cpu) (phys_cpu_present_map & (1<<(cpu))) -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) +#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) static inline unsigned int num_online_cpus(void) { - return hweight32(cpu_online_map); + return cpus_weight(cpu_online_map); } -extern volatile unsigned long cpu_callout_map; +extern cpumask_t cpu_callout_map; /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { - return hweight32(cpu_callout_map); + return cpus_weight(cpu_callout_map); } #endif /* CONFIG_SMP */ diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index e03ccc7732c2..0877e8c9bbf7 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h @@ -14,9 +14,10 @@ #ifndef ASSEMBLY #include <linux/bitops.h> #include <linux/threads.h> /* for NR_CPUS */ +#include <linux/cpumask.h> typedef unsigned long address_t; -extern volatile unsigned long cpu_online_map; +extern cpumask_t cpu_online_map; /* @@ -51,22 +52,10 @@ extern void smp_send_reschedule(int cpu); extern unsigned long cpu_present_mask; #define smp_processor_id() (current_thread_info()->cpu) -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) -#define cpu_possible(cpu) (cpu_present_mask & (1<<(cpu))) +#define cpu_possible(cpu) cpu_isset(cpu, cpu_present_mask) -extern inline unsigned int num_online_cpus(void) -{ - return hweight32(cpu_online_map); -} - -extern inline unsigned int any_online_cpu(unsigned int mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - - return NR_CPUS; -} #endif /* CONFIG_SMP */ #define NO_PROC_ID 0xFF /* No processor magic marker */ diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h index baabf274f815..8d27f08eeb6e 100644 --- a/include/asm-ppc/smp.h +++ b/include/asm-ppc/smp.h @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/errno.h> +#include <linux/cpumask.h> #include <linux/threads.h> #ifdef CONFIG_SMP @@ -29,8 +30,8 @@ struct cpuinfo_PPC { }; extern struct cpuinfo_PPC cpu_data[]; -extern unsigned long cpu_online_map; -extern unsigned long cpu_possible_map; +extern cpumask_t cpu_online_map; +extern cpumask_t cpu_possible_map; extern unsigned long smp_proc_in_lock[]; extern volatile unsigned long cpu_callin_map[]; extern int smp_tb_synchronized; @@ -46,21 +47,8 @@ extern void smp_local_timer_interrupt(struct pt_regs *); #define smp_processor_id() (current_thread_info()->cpu) -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) -#define cpu_possible(cpu) (cpu_possible_map & (1<<(cpu))) - -extern inline unsigned int num_online_cpus(void) -{ - return hweight32(cpu_online_map); -} - -extern inline unsigned int any_online_cpu(unsigned int mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - - return NR_CPUS; -} +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) +#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map) extern int __cpu_up(unsigned int cpu); diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h index a84ee5812e9f..cfe130bbd33d 100644 --- a/include/asm-ppc64/mmu_context.h +++ b/include/asm-ppc64/mmu_context.h @@ -143,7 +143,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { flush_stab(tsk, next); - set_bit(smp_processor_id(), &next->cpu_vm_mask); + cpu_set(smp_processor_id(), next->cpu_vm_mask); } #define deactivate_mm(tsk,mm) do { } while (0) diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h index 865f83ad1f55..467e8cfc42b6 100644 --- a/include/asm-ppc64/smp.h +++ b/include/asm-ppc64/smp.h @@ -19,6 +19,7 @@ #include <linux/config.h> #include <linux/threads.h> +#include <linux/cpumask.h> #include <linux/kernel.h> #ifdef CONFIG_SMP @@ -27,31 +28,14 @@ #include <asm/paca.h> -extern unsigned long cpu_online_map; - extern void smp_message_pass(int target, int msg, unsigned long data, int wait); extern void smp_send_tlb_invalidate(int); extern void smp_send_xmon_break(int cpu); struct pt_regs; extern void smp_message_recv(int, struct pt_regs *); -#define cpu_online(cpu) test_bit((cpu), &cpu_online_map) - #define cpu_possible(cpu) paca[cpu].active -static inline unsigned int num_online_cpus(void) -{ - return hweight64(cpu_online_map); -} - -static inline unsigned int any_online_cpu(unsigned long mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - - return NR_CPUS; -} - #define smp_processor_id() (get_paca()->xPacaIndex) /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. diff --git a/include/asm-ppc64/tlb.h b/include/asm-ppc64/tlb.h index 67ac2b480b2f..0a63e2811eee 100644 --- a/include/asm-ppc64/tlb.h +++ b/include/asm-ppc64/tlb.h @@ -49,6 +49,7 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu]; unsigned long i = batch->index; pte_t pte; + cpumask_t local_cpumask = cpumask_of_cpu(cpu); if (pte_val(*ptep) & _PAGE_HASHPTE) { pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0)); @@ -61,7 +62,7 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, if (i == PPC64_TLB_BATCH_NR) { int local = 0; - if (tlb->mm->cpu_vm_mask == (1UL << cpu)) + if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) local = 1; flush_hash_range(tlb->mm->context, i, local); @@ -78,8 +79,9 @@ static inline void tlb_flush(struct mmu_gather *tlb) int cpu = smp_processor_id(); struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu]; int local = 0; + cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); - if (tlb->mm->cpu_vm_mask == (1UL << smp_processor_id())) + if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) local = 1; flush_hash_range(tlb->mm->context, batch->index, local); diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index b844b8b6fdcd..d68d059e9118 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -505,7 +505,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr unsigned char ch; addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); - ch = *(unsigned char *) addr; + ch = *(volatile unsigned char *) addr; return (ch >> (nr & 7)) & 1; } diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h index 0e63fd521893..9bf56465a196 100644 --- a/include/asm-s390/mmu_context.h +++ b/include/asm-s390/mmu_context.h @@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, : : "m" (pgd) ); #endif /* __s390x__ */ } - set_bit(smp_processor_id(), &next->cpu_vm_mask); + cpu_set(smp_processor_id(), next->cpu_vm_mask); } #define deactivate_mm(tsk,mm) do { } while (0) diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 5a5ca3f07e58..0ff59e370a39 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -11,6 +11,7 @@ #include <linux/config.h> #include <linux/threads.h> +#include <linux/cpumask.h> #include <linux/bitops.h> #if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__) @@ -28,8 +29,8 @@ typedef struct __u16 cpu; } sigp_info; -extern volatile unsigned long cpu_online_map; -extern volatile unsigned long cpu_possible_map; +extern cpumask_t cpu_online_map; +extern cpumask_t cpu_possible_map; #define NO_PROC_ID 0xFF /* No processor magic marker */ @@ -47,25 +48,8 @@ extern volatile unsigned long cpu_possible_map; #define smp_processor_id() (current_thread_info()->cpu) -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) -#define cpu_possible(cpu) (cpu_possible_map & (1<<(cpu))) - -extern inline unsigned int num_online_cpus(void) -{ -#ifndef __s390x__ - return hweight32(cpu_online_map); -#else /* __s390x__ */ - return hweight64(cpu_online_map); -#endif /* __s390x__ */ -} - -extern inline unsigned int any_online_cpu(unsigned int mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - - return NR_CPUS; -} +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) +#define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map) extern __inline__ __u16 hard_smp_processor_id(void) { diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index b4bb4c1205b4..71908df445b7 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h @@ -98,13 +98,15 @@ static inline void global_flush_tlb(void) static inline void __flush_tlb_mm(struct mm_struct * mm) { + cpumask_t local_cpumask; preempt_disable(); - if (mm->cpu_vm_mask != (1UL << smp_processor_id())) { + local_cpumask = cpumask_of_cpu(smp_processor_id()); + if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) { /* mm was active on more than one cpu. */ if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) /* this cpu is the only one using the mm. */ - mm->cpu_vm_mask = 1UL << smp_processor_id(); + mm->cpu_vm_mask = local_cpumask; global_flush_tlb(); } else local_flush_tlb(); diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index b9959928eabf..7df16addb58e 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h @@ -8,6 +8,7 @@ #include <linux/config.h> #include <linux/threads.h> +#include <linux/cpumask.h> #include <asm/head.h> #include <asm/btfixup.h> diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 79adf5b5384b..9deee1ab7120 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -156,6 +156,14 @@ static __inline__ int ffs(int x) #ifdef ULTRA_HAS_POPULATION_COUNT +static __inline__ unsigned int hweight64(unsigned long w) +{ + unsigned int res; + + __asm__ ("popc %1,%0" : "=r" (res) : "r" (w)); + return res; +} + static __inline__ unsigned int hweight32(unsigned int w) { unsigned int res; @@ -182,6 +190,7 @@ static __inline__ unsigned int hweight8(unsigned int w) #else +#define hweight64(x) generic_hweight64(x) #define hweight32(x) generic_hweight32(x) #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 292757aa3176..cd6712997198 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -125,7 +125,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str } { - unsigned long vm_mask = (1UL << smp_processor_id()); + int cpu = smp_processor_id(); /* Even if (mm == old_mm) we _must_ check * the cpu_vm_mask. If we do not we could @@ -133,8 +133,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * smp_flush_tlb_{page,range,mm} on sparc64 * and lazy tlb switches work. -DaveM */ - if (!ctx_valid || !(mm->cpu_vm_mask & vm_mask)) { - mm->cpu_vm_mask |= vm_mask; + if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { + cpu_set(cpu, mm->cpu_vm_mask); __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); } } @@ -148,14 +148,14 @@ extern void __flush_tlb_mm(unsigned long, unsigned long); /* Activate a new MM instance for the current task. */ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) { - unsigned long vm_mask; + int cpu; spin_lock(&mm->page_table_lock); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); - vm_mask = (1UL << smp_processor_id()); - if (!(mm->cpu_vm_mask & vm_mask)) - mm->cpu_vm_mask |= vm_mask; + cpu = smp_processor_id(); + if (!cpu_isset(cpu, mm->cpu_vm_mask)) + cpu_set(cpu, mm->cpu_vm_mask); spin_unlock(&mm->page_table_lock); load_secondary_context(mm); diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index 533bbe729a1b..faf98d375f2a 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -14,6 +14,7 @@ #ifndef __ASSEMBLY__ +#include <linux/cpumask.h> #include <linux/cache.h> /* PROM provided per-processor information we need @@ -68,25 +69,14 @@ extern cpuinfo_sparc cpu_data[NR_CPUS]; extern unsigned char boot_cpu_id; -extern unsigned long phys_cpu_present_map; -#define cpu_possible(cpu) (phys_cpu_present_map & (1UL << (cpu))) +extern cpumask_t phys_cpu_present_map; +#define cpu_possible(cpu) cpu_isset(cpu, phys_cpu_present_map) -extern unsigned long cpu_online_map; -#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu))) - -extern atomic_t sparc64_num_cpus_online; -#define num_online_cpus() (atomic_read(&sparc64_num_cpus_online)) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) extern atomic_t sparc64_num_cpus_possible; #define num_possible_cpus() (atomic_read(&sparc64_num_cpus_possible)) -static inline unsigned int any_online_cpu(unsigned long mask) -{ - if ((mask &= cpu_online_map) != 0UL) - return __ffs(mask); - return NR_CPUS; -} - /* * General functions that each host system must provide. */ diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h index a991b9feea3f..846461275b29 100644 --- a/include/asm-um/smp.h +++ b/include/asm-um/smp.h @@ -1,13 +1,14 @@ #ifndef __UM_SMP_H #define __UM_SMP_H -extern unsigned long cpu_online_map; - #ifdef CONFIG_SMP #include "linux/config.h" #include "linux/bitops.h" #include "asm/current.h" +#include "linux/cpumask.h" + +extern cpumask_t cpu_online_map; #define smp_processor_id() (current->thread_info->cpu) #define cpu_logical_map(n) (n) @@ -16,16 +17,11 @@ extern unsigned long cpu_online_map; extern int hard_smp_processor_id(void); #define NO_PROC_ID -1 -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) extern int ncpus; #define cpu_possible(cpu) (cpu < ncpus) -extern inline unsigned int num_online_cpus(void) -{ - return(hweight32(cpu_online_map)); -} - extern inline void smp_cpus_done(unsigned int maxcpus) { } diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index 92260e080c5a..6830b9ee6174 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -477,6 +477,7 @@ static __inline__ int ffs(int x) * The Hamming Weight of a number is the total number of bits set in it. */ +#define hweight64(x) generic_hweight64(x) #define hweight32(x) generic_hweight32(x) #define hweight16(x) generic_hweight16(x) #define hweight8(x) generic_hweight8(x) diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h index 3f4b2a4a6787..bf783aa470ea 100644 --- a/include/asm-x86_64/mpspec.h +++ b/include/asm-x86_64/mpspec.h @@ -166,10 +166,10 @@ enum mp_bustype { }; extern unsigned char mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; -extern unsigned long mp_bus_to_cpumask [MAX_MP_BUSSES]; +extern cpumask_t mp_bus_to_cpumask [MAX_MP_BUSSES]; extern unsigned int boot_cpu_physical_apicid; -extern unsigned long phys_cpu_present_map; +extern cpumask_t phys_cpu_present_map; extern int smp_found_config; extern void find_smp_config (void); extern void get_smp_config (void); diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 13ab8fe70da1..7959579136bd 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -7,6 +7,7 @@ #ifndef __ASSEMBLY__ #include <linux/config.h> #include <linux/threads.h> +#include <linux/cpumask.h> #include <linux/bitops.h> extern int disable_apic; #endif @@ -35,8 +36,8 @@ struct pt_regs; */ extern void smp_alloc_memory(void); -extern unsigned long phys_cpu_present_map; -extern unsigned long cpu_online_map; +extern cpumask_t phys_cpu_present_map; +extern cpumask_t cpu_online_map; extern volatile unsigned long smp_invalidate_needed; extern int pic_mode; extern void smp_flush_tlb(void); @@ -56,36 +57,16 @@ void smp_stop_cpu(void); * compresses data structures. */ -extern volatile unsigned long cpu_callout_map; +extern cpumask_t cpu_callout_map; -#define cpu_possible(cpu) (cpu_callout_map & (1<<(cpu))) -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) - -#define for_each_cpu(cpu, mask) \ - for(mask = cpu_online_map; \ - cpu = __ffs(mask), mask != 0; \ - mask &= ~(1UL<<cpu)) - -extern inline unsigned int any_online_cpu(unsigned int mask) -{ - if (mask & cpu_online_map) - return __ffs(mask & cpu_online_map); - - return NR_CPUS; -} - -extern inline unsigned int num_online_cpus(void) -{ - return hweight32(cpu_online_map); -} +#define cpu_possible(cpu) cpu_isset(cpu, cpu_callout_map) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) static inline int num_booting_cpus(void) { - return hweight32(cpu_callout_map); + return cpus_weight(cpu_callout_map); } -extern volatile unsigned long cpu_callout_map; - #define smp_processor_id() read_pda(cpunumber) extern __inline int hard_smp_processor_id(void) @@ -104,7 +85,7 @@ extern inline int safe_smp_processor_id(void) return hard_smp_processor_id(); } -#define cpu_online(cpu) (cpu_online_map & (1<<(cpu))) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #endif /* !ASSEMBLY */ #define NO_PROC_ID 0xFF /* No processor magic marker */ @@ -113,11 +94,16 @@ extern inline int safe_smp_processor_id(void) #define INT_DELIVERY_MODE 1 /* logical delivery */ #define TARGET_CPUS 1 +#ifndef ASSEMBLY +static inline unsigned int cpu_mask_to_apicid(cpumask_const_t cpumask) +{ + return cpus_coerce_const(cpumask); +} +#endif #ifndef CONFIG_SMP #define stack_smp_processor_id() 0 #define safe_smp_processor_id() 0 -#define for_each_cpu(x,mask) (void)(mask), (x)=0; #define cpu_logical_map(x) (x) #else #include <asm/thread_info.h> diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index 5ffea4777d3d..c4f359c87973 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -10,18 +10,21 @@ /* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */ extern int fake_node; -extern unsigned long cpu_online_map; +extern cpumask_t cpu_online_map; #define cpu_to_node(cpu) (fake_node ? 0 : (cpu)) #define memblk_to_node(memblk) (fake_node ? 0 : (memblk)) #define parent_node(node) (node) #define node_to_first_cpu(node) (fake_node ? 0 : (node)) -#define node_to_cpu_mask(node) (fake_node ? cpu_online_map : (1UL << (node))) +#define node_to_cpu_mask(node) (fake_node ? cpu_online_map : cpumask_of_cpu(node)) #define node_to_memblk(node) (node) -static inline unsigned long pcibus_to_cpumask(int bus) +static inline cpumask_t pcibus_to_cpumask(int bus) { - return mp_bus_to_cpumask[bus] & cpu_online_map; + cpumask_t ret; + + cpus_and(ret, mp_bus_to_cpumask[bus], cpu_online_map); + return ret; } #define NODE_BALANCE_RATE 30 /* CHECKME */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h new file mode 100644 index 000000000000..74b89ea0aae5 --- /dev/null +++ b/include/linux/bitmap.h @@ -0,0 +1,159 @@ +#ifndef __LINUX_BITMAP_H +#define __LINUX_BITMAP_H + +#ifndef __ASSEMBLY__ + +#include <linux/config.h> +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/string.h> + +static inline int bitmap_empty(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (bitmap[k] & ((1UL << (bits % BITS_PER_LONG)) - 1)) + return 0; + + return 1; +} + +static inline int bitmap_full(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (~bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (~bitmap[k] & ((1UL << (bits % BITS_PER_LONG)) - 1)) + return 0; + + return 1; +} + +static inline int bitmap_equal(const unsigned long *bitmap1, + unsigned long *bitmap2, int bits) +{ + int k, lim = bits/BITS_PER_LONG;; + for (k = 0; k < lim; ++k) + if (bitmap1[k] != bitmap2[k]) + return 0; + + if (bits % BITS_PER_LONG) + if ((bitmap1[k] ^ bitmap2[k]) & + ((1UL << (bits % BITS_PER_LONG)) - 1)) + return 0; + + return 1; +} + +static inline void bitmap_complement(unsigned long *bitmap, int bits) +{ + int k; + + for (k = 0; k < BITS_TO_LONGS(bits); ++k) + bitmap[k] = ~bitmap[k]; +} + +static inline void bitmap_clear(unsigned long *bitmap, int bits) +{ + CLEAR_BITMAP((unsigned long *)bitmap, bits); +} + +static inline void bitmap_fill(unsigned long *bitmap, int bits) +{ + memset(bitmap, 0xff, BITS_TO_LONGS(bits)*sizeof(unsigned long)); +} + +static inline void bitmap_copy(unsigned long *dst, + const unsigned long *src, int bits) +{ + memcpy(dst, src, BITS_TO_LONGS(bits)*sizeof(unsigned long)); +} + +static inline void bitmap_shift_right(unsigned long *dst, + const unsigned long *src, int shift, int bits) +{ + int k; + DECLARE_BITMAP(__shr_tmp, bits); + + bitmap_clear(__shr_tmp, bits); + for (k = 0; k < bits - shift; ++k) + if (test_bit(k + shift, src)) + set_bit(k, __shr_tmp); + bitmap_copy(dst, __shr_tmp, bits); +} + +static inline void bitmap_shift_left(unsigned long *dst, + const unsigned long *src, int shift, int bits) +{ + int k; + DECLARE_BITMAP(__shl_tmp, bits); + + bitmap_clear(__shl_tmp, bits); + for (k = bits; k >= shift; --k) + if (test_bit(k - shift, src)) + set_bit(k, __shl_tmp); + bitmap_copy(dst, __shl_tmp, bits); +} + +static inline void bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] & bitmap2[k]; +} + +static inline void bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, int bits) +{ + int k; + int nr = BITS_TO_LONGS(bits); + + for (k = 0; k < nr; k++) + dst[k] = bitmap1[k] | bitmap2[k]; +} + +#if BITS_PER_LONG == 32 +static inline int bitmap_weight(const unsigned long *bitmap, int bits) +{ + int k, w = 0, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; k++) + w += hweight32(bitmap[k]); + + if (bits % BITS_PER_LONG) + w += hweight32(bitmap[k] & + ((1UL << (bits % BITS_PER_LONG)) - 1)); + + return w; +} +#else +static inline int bitmap_weight(const unsigned long *bitmap, int bits) +{ + int k, w = 0, lim = bits/BITS_PER_LONG; + + for (k = 0; k < lim; k++) + w += hweight64(bitmap[k]); + + if (bits % BITS_PER_LONG) + w += hweight64(bitmap[k] & + ((1UL << (bits % BITS_PER_LONG)) - 1)); + + return w; +} +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h new file mode 100644 index 000000000000..c43921ec27fe --- /dev/null +++ b/include/linux/cpumask.h @@ -0,0 +1,71 @@ +#ifndef __LINUX_CPUMASK_H +#define __LINUX_CPUMASK_H + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/threads.h> +#include <linux/types.h> +#include <linux/bitmap.h> + +#if NR_CPUS > BITS_PER_LONG && NR_CPUS != 1 +#define CPU_ARRAY_SIZE BITS_TO_LONGS(NR_CPUS) + +struct cpumask +{ + unsigned long mask[CPU_ARRAY_SIZE]; +}; + +typedef struct cpumask cpumask_t; + +#else +typedef unsigned long cpumask_t; +#endif + +#ifdef CONFIG_SMP +#if NR_CPUS > BITS_PER_LONG +#include <asm-generic/cpumask_array.h> +#else +#include <asm-generic/cpumask_arith.h> +#endif +#else +#include <asm-generic/cpumask_up.h> +#endif + +#if NR_CPUS <= 4*BITS_PER_LONG +#include <asm-generic/cpumask_const_value.h> +#else +#include <asm-generic/cpumask_const_reference.h> +#endif + + +#ifdef CONFIG_SMP + +extern cpumask_t cpu_online_map; + +#define num_online_cpus() cpus_weight(cpu_online_map) +#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) +#else +#define cpu_online_map cpumask_of_cpu(0) +#define num_online_cpus() 1 +#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) +#endif + +static inline int next_online_cpu(int cpu, cpumask_t map) +{ + do + cpu = next_cpu_const(cpu, map); + while (cpu < NR_CPUS && !cpu_online(cpu)); + return cpu; +} + +#define for_each_cpu(cpu, map) \ + for (cpu = first_cpu_const(map); \ + cpu < NR_CPUS; \ + cpu = next_cpu_const(cpu,map)) + +#define for_each_online_cpu(cpu, map) \ + for (cpu = first_cpu_const(map); \ + cpu < NR_CPUS; \ + cpu = next_online_cpu(cpu,map)) + +#endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 7b34a416ad05..59d4d291d455 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -70,7 +70,7 @@ .prio = MAX_PRIO-20, \ .static_prio = MAX_PRIO-20, \ .policy = SCHED_NORMAL, \ - .cpus_allowed = ~0UL, \ + .cpus_allowed = CPU_MASK_ALL, \ .mm = NULL, \ .active_mm = &init_mm, \ .run_list = LIST_HEAD_INIT(tsk.run_list), \ diff --git a/include/linux/irq.h b/include/linux/irq.h index 1e384d887ace..fa03b836c29a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -15,6 +15,7 @@ #include <linux/cache.h> #include <linux/spinlock.h> +#include <linux/cpumask.h> #include <asm/irq.h> #include <asm/ptrace.h> @@ -44,7 +45,7 @@ struct hw_interrupt_type { void (*disable)(unsigned int irq); void (*ack)(unsigned int irq); void (*end)(unsigned int irq); - void (*set_affinity)(unsigned int irq, unsigned long mask); + void (*set_affinity)(unsigned int irq, cpumask_t dest); }; typedef struct hw_interrupt_type hw_irq_controller; diff --git a/include/linux/node.h b/include/linux/node.h index d1c28e6a0b18..6b8d64c16e5e 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -20,9 +20,10 @@ #define _LINUX_NODE_H_ #include <linux/sysdev.h> +#include <linux/cpumask.h> struct node { - unsigned long cpumap; /* Bitmap of CPUs on the Node */ + cpumask_t cpumap; /* Bitmap of CPUs on the Node */ struct sys_device sysdev; }; diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e9e2287e1e1c..0287576eea0d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -40,6 +40,7 @@ #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/percpu.h> +#include <linux/cpumask.h> /** * struct rcu_head - callback structure for use with RCU @@ -67,7 +68,7 @@ struct rcu_ctrlblk { spinlock_t mutex; /* Guard this struct */ long curbatch; /* Current batch number. */ long maxbatch; /* Max requested batch number. */ - unsigned long rcu_cpu_mask; /* CPUs that need to switch in order */ + cpumask_t rcu_cpu_mask; /* CPUs that need to switch in order */ /* for current batch to proceed. */ }; @@ -114,7 +115,7 @@ static inline int rcu_pending(int cpu) rcu_batch_before(RCU_batch(cpu), rcu_ctrlblk.curbatch)) || (list_empty(&RCU_curlist(cpu)) && !list_empty(&RCU_nxtlist(cpu))) || - test_bit(cpu, &rcu_ctrlblk.rcu_cpu_mask)) + cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask)) return 1; else return 0; diff --git a/include/linux/sched.h b/include/linux/sched.h index 4c96f1df2e5c..61ec12b5b77a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -12,6 +12,7 @@ #include <linux/jiffies.h> #include <linux/rbtree.h> #include <linux/thread_info.h> +#include <linux/cpumask.h> #include <asm/system.h> #include <asm/semaphore.h> @@ -203,7 +204,7 @@ struct mm_struct { unsigned long arg_start, arg_end, env_start, env_end; unsigned long rss, total_vm, locked_vm; unsigned long def_flags; - unsigned long cpu_vm_mask; + cpumask_t cpu_vm_mask; unsigned long swap_address; unsigned dumpable:1; @@ -342,7 +343,7 @@ struct task_struct { unsigned long last_run; unsigned long policy; - unsigned long cpus_allowed; + cpumask_t cpus_allowed; unsigned int time_slice, first_time_slice; struct list_head tasks; @@ -489,9 +490,9 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ #ifdef CONFIG_SMP -extern int set_cpus_allowed(task_t *p, unsigned long new_mask); +extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); #else -static inline int set_cpus_allowed(task_t *p, unsigned long new_mask) +static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) { return 0; } diff --git a/include/linux/smp.h b/include/linux/smp.h index ade73fe7969b..c1a74b2345bc 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -102,9 +102,6 @@ void smp_prepare_boot_cpu(void); #define smp_call_function(func,info,retry,wait) ({ 0; }) #define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) static inline void smp_send_reschedule(int cpu) { } -#define cpu_online_map 1 -#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) -#define num_online_cpus() 1 #define num_booting_cpus() 1 #define cpu_possible(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define smp_prepare_boot_cpu() do {} while (0) diff --git a/include/linux/topology.h b/include/linux/topology.h index a7af2524ee7e..5f7543009717 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -27,6 +27,7 @@ #ifndef _LINUX_TOPOLOGY_H #define _LINUX_TOPOLOGY_H +#include <linux/cpumask.h> #include <linux/bitops.h> #include <linux/mmzone.h> #include <linux/smp.h> @@ -34,7 +35,12 @@ #include <asm/topology.h> #ifndef nr_cpus_node -#define nr_cpus_node(node) (hweight_long(node_to_cpumask(node))) +#define nr_cpus_node(node) \ + ({ \ + cpumask_t __tmp__; \ + __tmp__ = node_to_cpumask(node); \ + cpus_weight(__tmp__); \ + }) #endif static inline int __next_node_with_cpus(int node) |
