summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-06-23 18:52:44 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-23 18:52:44 -0700
commita3dcb7f41eced06d4e43365fefd98a3b9b48e340 (patch)
tree6c07c11b706109477508c6fa1c0551a38a4e5f2b
parent02d7effd5c525290aad9c0fa491dd9a3f035380e (diff)
[PATCH] clean up cpumask_t temporaries
From: Rusty Russell <rusty@rustcorp.com.au> Paul Jackson's cpumask tour-de-force allows us to get rid of those stupid temporaries which we used to hold CPU_MASK_ALL to hand them to functions. This used to break NR_CPUS > BITS_PER_LONG. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/ppc64/kernel/irq.c3
-rw-r--r--arch/ppc64/kernel/rtasd.c3
-rw-r--r--arch/ppc64/kernel/xics.c6
-rw-r--r--include/asm-i386/mach-numaq/mach_apic.h3
-rw-r--r--include/asm-i386/mach-summit/mach_apic.h3
-rw-r--r--kernel/kmod.c3
-rw-r--r--kernel/kthread.c3
-rw-r--r--kernel/sched.c5
8 files changed, 10 insertions, 19 deletions
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c
index 7335442e4178..004c3c1a034e 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/ppc64/kernel/irq.c
@@ -738,7 +738,6 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
irq_desc_t *desc = get_irq_desc(irq);
int ret;
cpumask_t new_value, tmp;
- cpumask_t allcpus = CPU_MASK_ALL;
if (!desc->handler->set_affinity)
return -EIO;
@@ -753,7 +752,7 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
* NR_CPUS == 32 and cpumask is a long), so we mask it here to
* be consistent.
*/
- cpus_and(new_value, new_value, allcpus);
+ cpus_and(new_value, new_value, CPU_MASK_ALL);
/*
* Grab lock here so cpu_online_map can't change, and also
diff --git a/arch/ppc64/kernel/rtasd.c b/arch/ppc64/kernel/rtasd.c
index 243d2ee8de8a..aa649a24a947 100644
--- a/arch/ppc64/kernel/rtasd.c
+++ b/arch/ppc64/kernel/rtasd.c
@@ -364,7 +364,6 @@ static int rtasd(void *unused)
unsigned int err_type;
int cpu = 0;
int event_scan = rtas_token("event-scan");
- cpumask_t all = CPU_MASK_ALL;
int rc;
daemonize("rtasd");
@@ -419,7 +418,7 @@ static int rtasd(void *unused)
for (;;) {
set_cpus_allowed(current, cpumask_of_cpu(cpu));
do_event_scan(event_scan);
- set_cpus_allowed(current, all);
+ set_cpus_allowed(current, CPU_MASK_ALL);
/* Drop hotplug lock, and sleep for a bit (at least
* one second since some machines have problems if we
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c
index 1d9cf20a2900..32adc8c22953 100644
--- a/arch/ppc64/kernel/xics.c
+++ b/arch/ppc64/kernel/xics.c
@@ -240,14 +240,13 @@ static unsigned int real_irq_to_virt(unsigned int real_irq)
static int get_irq_server(unsigned int irq)
{
cpumask_t cpumask = irq_affinity[irq];
- cpumask_t allcpus = CPU_MASK_ALL;
cpumask_t tmp = CPU_MASK_NONE;
unsigned int server;
#ifdef CONFIG_IRQ_ALL_CPUS
/* For the moment only implement delivery to all cpus or one cpu */
if (smp_threads_ready) {
- if (cpus_equal(cpumask, allcpus)) {
+ if (cpus_equal(cpumask, CPU_MASK_ALL)) {
server = default_distrib_server;
} else {
cpus_and(tmp, cpu_online_map, cpumask);
@@ -616,7 +615,6 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
long status;
unsigned long xics_status[2];
unsigned long newmask;
- cpumask_t allcpus = CPU_MASK_ALL;
cpumask_t tmp = CPU_MASK_NONE;
irq = virt_irq_to_real(irq_offset_down(virq));
@@ -632,7 +630,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
}
/* For the moment only implement delivery to all cpus or one cpu */
- if (cpus_equal(cpumask, allcpus)) {
+ if (cpus_equal(cpumask, CPU_MASK_ALL)) {
newmask = default_distrib_server;
} else {
cpus_and(tmp, cpu_online_map, cpumask);
diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h
index e40c308b15d3..b852593a1c7b 100644
--- a/include/asm-i386/mach-numaq/mach_apic.h
+++ b/include/asm-i386/mach-numaq/mach_apic.h
@@ -8,8 +8,7 @@
static inline cpumask_t target_cpus(void)
{
- cpumask_t tmp = CPU_MASK_ALL;
- return tmp;
+ return CPU_MASK_ALL;
}
#define TARGET_CPUS (target_cpus())
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
index 4bf36ddba96d..214263a48f71 100644
--- a/include/asm-i386/mach-summit/mach_apic.h
+++ b/include/asm-i386/mach-summit/mach_apic.h
@@ -19,8 +19,7 @@
static inline cpumask_t target_cpus(void)
{
- cpumask_t tmp = CPU_MASK_ALL;
- return tmp;
+ return CPU_MASK_ALL;
}
#define TARGET_CPUS (target_cpus())
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ea62192b7597..579269c38a3b 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -154,7 +154,6 @@ static int ____call_usermodehelper(void *data)
{
struct subprocess_info *sub_info = data;
int retval;
- cpumask_t mask = CPU_MASK_ALL;
/* Unblock all signals. */
flush_signals(current);
@@ -165,7 +164,7 @@ static int ____call_usermodehelper(void *data)
spin_unlock_irq(&current->sighand->siglock);
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed(current, mask);
+ set_cpus_allowed(current, CPU_MASK_ALL);
retval = -EPERM;
if (current->fs->root)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index da0ec5b25cdf..5689ebb1a250 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -65,7 +65,6 @@ static int kthread(void *_create)
void *data;
sigset_t blocked;
int ret = -EINTR;
- cpumask_t mask = CPU_MASK_ALL;
kthread_exit_files();
@@ -79,7 +78,7 @@ static int kthread(void *_create)
flush_signals(current);
/* By default we can run anywhere, unlike keventd. */
- set_cpus_allowed(current, mask);
+ set_cpus_allowed(current, CPU_MASK_ALL);
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/sched.c b/kernel/sched.c
index 017b59b8de5e..95f18cf8a5b6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3913,16 +3913,15 @@ void __init sched_init(void)
/* Set up an initial dummy domain for early boot */
static struct sched_domain sched_domain_init;
static struct sched_group sched_group_init;
- cpumask_t cpu_mask_all = CPU_MASK_ALL;
memset(&sched_domain_init, 0, sizeof(struct sched_domain));
- sched_domain_init.span = cpu_mask_all;
+ sched_domain_init.span = CPU_MASK_ALL;
sched_domain_init.groups = &sched_group_init;
sched_domain_init.last_balance = jiffies;
sched_domain_init.balance_interval = INT_MAX; /* Don't balance */
memset(&sched_group_init, 0, sizeof(struct sched_group));
- sched_group_init.cpumask = cpu_mask_all;
+ sched_group_init.cpumask = CPU_MASK_ALL;
sched_group_init.next = &sched_group_init;
sched_group_init.cpu_power = SCHED_LOAD_SCALE;
#endif