summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile18
-rw-r--r--kernel/auditsc.c7
-rw-r--r--kernel/configs.c25
-rw-r--r--kernel/cpu.c8
-rw-r--r--kernel/kmod.c3
-rw-r--r--kernel/kthread.c3
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/rcupdate.c229
-rw-r--r--kernel/sched.c39
-rw-r--r--kernel/sysctl.c12
10 files changed, 207 insertions, 138 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 2b7b352b1a38..47f98594e9e5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -33,23 +33,7 @@ ifneq ($(CONFIG_IA64),y)
CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
endif
-# configs.o uses generated files - dependecies must be listed explicitly
-$(obj)/configs.o: $(obj)/ikconfig.h
-
-ifdef CONFIG_IKCONFIG_PROC
$(obj)/configs.o: $(obj)/config_data.h
-endif
-
-# ikconfig.h contains all the selected config entries - generated
-# from top-level Makefile and .config. Info from ikconfig.h can
-# be extracted from the kernel binary.
-
-quiet_cmd_ikconfig = IKCFG $@
- cmd_ikconfig = $(CONFIG_SHELL) $< .config $(srctree)/Makefile > $@
-
-targets += ikconfig.h
-$(obj)/ikconfig.h: scripts/mkconfigs .config $(srctree)/Makefile FORCE
- $(call if_changed,ikconfig)
# config_data.h contains the same information as ikconfig.h but gzipped.
# Info from config_data can be extracted from /proc/config*
@@ -58,7 +42,7 @@ $(obj)/config_data.gz: .config FORCE
$(call if_changed,gzip)
quiet_cmd_ikconfiggz = IKCFG $@
- cmd_ikconfiggz = cat $< | scripts/bin2c kernel_config_data > $@
+ cmd_ikconfiggz = (echo "const char kernel_config_data[] = MAGIC_START"; cat $< | scripts/bin2c; echo "MAGIC_END;") > $@
targets += config_data.h
$(obj)/config_data.h: $(obj)/config_data.gz FORCE
$(call if_changed,ikconfiggz)
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 342b57141fd9..e688c73f6a9e 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -177,9 +177,10 @@ static inline int audit_add_rule(struct audit_entry *entry,
return 0;
}
-static void audit_free_rule(void *arg)
+static void audit_free_rule(struct rcu_head *head)
{
- kfree(arg);
+ struct audit_entry *e = container_of(head, struct audit_entry, rcu);
+ kfree(e);
}
/* Note that audit_add_rule and audit_del_rule are called via
@@ -195,7 +196,7 @@ static inline int audit_del_rule(struct audit_rule *rule,
list_for_each_entry(e, list, list) {
if (!audit_compare_rule(rule, &e->rule)) {
list_del_rcu(&e->list);
- call_rcu(&e->rcu, audit_free_rule, e);
+ call_rcu(&e->rcu, audit_free_rule);
return 0;
}
}
diff --git a/kernel/configs.c b/kernel/configs.c
index 326ab7b214f6..d18a944ad249 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -34,13 +34,26 @@
/**************************************************/
/* the actual current config file */
-/* This one is for extraction from the kernel binary file image. */
-#include "ikconfig.h"
+/*
+ * Define kernel_config_data and kernel_config_data_size, which contains the
+ * wrapped and compressed configuration file. The file is first compressed
+ * with gzip and then bounded by two eight byte magic numbers to allow
+ * extraction from a binary kernel image:
+ *
+ * IKCFG_ST
+ * <image>
+ * IKCFG_ED
+ */
+#define MAGIC_START "IKCFG_ST"
+#define MAGIC_END "IKCFG_ED"
+#include "config_data.h"
-#ifdef CONFIG_IKCONFIG_PROC
-/* This is the data that can be read from /proc/config.gz. */
-#include "config_data.h"
+#define MAGIC_SIZE (sizeof(MAGIC_START) - 1)
+#define kernel_config_data_size \
+ (sizeof(kernel_config_data) - 1 - MAGIC_SIZE * 2)
+
+#ifdef CONFIG_IKCONFIG_PROC
/**************************************************/
/* globals and useful constants */
@@ -58,7 +71,7 @@ ikconfig_read_current(struct file *file, char __user *buf,
return 0;
count = min(len, (size_t)(kernel_config_data_size - pos));
- if(copy_to_user(buf, kernel_config_data + pos, count))
+ if (copy_to_user(buf, kernel_config_data + MAGIC_SIZE + pos, count))
return -EFAULT;
*offset += count;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 72b984c67eb3..083521327e64 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -20,14 +20,6 @@
DECLARE_MUTEX(cpucontrol);
static struct notifier_block *cpu_chain;
-/*
- * Represents all cpu's present in the system
- * In systems capable of hotplug, this map could dynamically grow
- * as new cpu's are detected in the system via any platform specific
- * method, such as ACPI for e.g.
- */
-cpumask_t cpu_present_map;
-EXPORT_SYMBOL(cpu_present_map);
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ea62192b7597..579269c38a3b 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -154,7 +154,6 @@ static int ____call_usermodehelper(void *data)
{
struct subprocess_info *sub_info = data;
int retval;
- cpumask_t mask = CPU_MASK_ALL;
/* Unblock all signals. */
flush_signals(current);
@@ -165,7 +164,7 @@ static int ____call_usermodehelper(void *data)
spin_unlock_irq(&current->sighand->siglock);
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed(current, mask);
+ set_cpus_allowed(current, CPU_MASK_ALL);
retval = -EPERM;
if (current->fs->root)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index da0ec5b25cdf..5689ebb1a250 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -65,7 +65,6 @@ static int kthread(void *_create)
void *data;
sigset_t blocked;
int ret = -EINTR;
- cpumask_t mask = CPU_MASK_ALL;
kthread_exit_files();
@@ -79,7 +78,7 @@ static int kthread(void *_create)
flush_signals(current);
/* By default we can run anywhere, unlike keventd. */
- set_cpus_allowed(current, mask);
+ set_cpus_allowed(current, CPU_MASK_ALL);
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 347435415eaf..0d1f63ec7287 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -109,7 +109,6 @@ void thaw_processes(void)
wake_up_process(p);
} else
printk(KERN_INFO " Strange, %s not stopped\n", p->comm );
- wake_up_process(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 13a1b5a5825f..f35f944abe3d 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -47,8 +47,17 @@
/* Definition for rcupdate control block. */
struct rcu_ctrlblk rcu_ctrlblk =
- { .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1,
- .maxbatch = 1, .rcu_cpu_mask = CPU_MASK_NONE };
+ { .cur = -300, .completed = -300 , .lock = SEQCNT_ZERO };
+
+/* Bookkeeping of the progress of the grace period */
+struct {
+ spinlock_t mutex; /* Guard this struct and writes to rcu_ctrlblk */
+ cpumask_t rcu_cpu_mask; /* CPUs that need to switch in order */
+ /* for current batch to proceed. */
+} rcu_state ____cacheline_maxaligned_in_smp =
+ {.mutex = SPIN_LOCK_UNLOCKED, .rcu_cpu_mask = CPU_MASK_NONE };
+
+
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
/* Fake initialization required by compiler */
@@ -59,23 +68,24 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
* call_rcu - Queue an RCU update request.
* @head: structure to be used for queueing the RCU updates.
* @func: actual update function to be invoked after the grace period
- * @arg: argument to be passed to the update function
*
* The update function will be invoked as soon as all CPUs have performed
* a context switch or been seen in the idle loop or in a user process.
* The read-side of critical section that use call_rcu() for updation must
* be protected by rcu_read_lock()/rcu_read_unlock().
*/
-void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
+void fastcall call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
{
int cpu;
unsigned long flags;
head->func = func;
- head->arg = arg;
+ head->next = NULL;
local_irq_save(flags);
cpu = smp_processor_id();
- list_add_tail(&head->list, &RCU_nxtlist(cpu));
+ *RCU_nxttail(cpu) = head;
+ RCU_nxttail(cpu) = &head->next;
local_irq_restore(flags);
}
@@ -83,39 +93,70 @@ void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg
* Invoke the completed RCU callbacks. They are expected to be in
* a per-cpu list.
*/
-static void rcu_do_batch(struct list_head *list)
+static void rcu_do_batch(struct rcu_head *list)
{
- struct list_head *entry;
- struct rcu_head *head;
+ struct rcu_head *next;
- while (!list_empty(list)) {
- entry = list->next;
- list_del(entry);
- head = list_entry(entry, struct rcu_head, list);
- head->func(head->arg);
+ while (list) {
+ next = list->next;
+ list->func(list);
+ list = next;
}
}
/*
+ * Grace period handling:
+ * The grace period handling consists out of two steps:
+ * - A new grace period is started.
+ * This is done by rcu_start_batch. The start is not broadcasted to
+ * all cpus, they must pick this up by comparing rcu_ctrlblk.cur with
+ * RCU_quiescbatch(cpu). All cpus are recorded in the
+ * rcu_state.rcu_cpu_mask bitmap.
+ * - All cpus must go through a quiescent state.
+ * Since the start of the grace period is not broadcasted, at least two
+ * calls to rcu_check_quiescent_state are required:
+ * The first call just notices that a new grace period is running. The
+ * following calls check if there was a quiescent state since the beginning
+ * of the grace period. If so, it updates rcu_state.rcu_cpu_mask. If
+ * the bitmap is empty, then the grace period is completed.
+ * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
+ * period (if necessary).
+ */
+/*
* Register a new batch of callbacks, and start it up if there is currently no
* active batch and the batch to be registered has not already occurred.
- * Caller must hold the rcu_ctrlblk lock.
+ * Caller must hold rcu_state.mutex.
*/
-static void rcu_start_batch(long newbatch)
+static void rcu_start_batch(int next_pending)
{
- cpumask_t active;
-
- if (rcu_batch_before(rcu_ctrlblk.maxbatch, newbatch)) {
- rcu_ctrlblk.maxbatch = newbatch;
+ if (next_pending)
+ rcu_ctrlblk.next_pending = 1;
+
+ if (rcu_ctrlblk.next_pending &&
+ rcu_ctrlblk.completed == rcu_ctrlblk.cur) {
+ /* Can't change, since spin lock held. */
+ cpus_andnot(rcu_state.rcu_cpu_mask, cpu_online_map,
+ nohz_cpu_mask);
+ write_seqcount_begin(&rcu_ctrlblk.lock);
+ rcu_ctrlblk.next_pending = 0;
+ rcu_ctrlblk.cur++;
+ write_seqcount_end(&rcu_ctrlblk.lock);
}
- if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
- !cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
- return;
+}
+
+/*
+ * cpu went through a quiescent state since the beginning of the grace period.
+ * Clear it from the cpu mask and complete the grace period if it was the last
+ * cpu. Start another grace period if someone has further entries pending
+ */
+static void cpu_quiet(int cpu)
+{
+ cpu_clear(cpu, rcu_state.rcu_cpu_mask);
+ if (cpus_empty(rcu_state.rcu_cpu_mask)) {
+ /* batch completed ! */
+ rcu_ctrlblk.completed = rcu_ctrlblk.cur;
+ rcu_start_batch(0);
}
- /* Can't change, since spin lock held. */
- active = nohz_cpu_mask;
- cpus_complement(active);
- cpus_and(rcu_ctrlblk.rcu_cpu_mask, cpu_online_map, active);
}
/*
@@ -127,7 +168,19 @@ static void rcu_check_quiescent_state(void)
{
int cpu = smp_processor_id();
- if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
+ if (RCU_quiescbatch(cpu) != rcu_ctrlblk.cur) {
+ /* new grace period: record qsctr value. */
+ RCU_qs_pending(cpu) = 1;
+ RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
+ RCU_quiescbatch(cpu) = rcu_ctrlblk.cur;
+ return;
+ }
+
+ /* Grace period already completed for this cpu?
+ * qs_pending is checked instead of the actual bitmap to avoid
+ * cacheline trashing.
+ */
+ if (!RCU_qs_pending(cpu))
return;
/*
@@ -135,27 +188,19 @@ static void rcu_check_quiescent_state(void)
* we may miss one quiescent state of that CPU. That is
* tolerable. So no need to disable interrupts.
*/
- if (RCU_last_qsctr(cpu) == RCU_QSCTR_INVALID) {
- RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
- return;
- }
if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu))
return;
+ RCU_qs_pending(cpu) = 0;
- spin_lock(&rcu_ctrlblk.mutex);
- if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
- goto out_unlock;
-
- cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
- RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
- if (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
- goto out_unlock;
-
- rcu_ctrlblk.curbatch++;
- rcu_start_batch(rcu_ctrlblk.maxbatch);
+ spin_lock(&rcu_state.mutex);
+ /*
+ * RCU_quiescbatch/batch.cur and the cpu bitmap can come out of sync
+ * during cpu startup. Ignore the quiescent state.
+ */
+ if (likely(RCU_quiescbatch(cpu) == rcu_ctrlblk.cur))
+ cpu_quiet(cpu);
-out_unlock:
- spin_unlock(&rcu_ctrlblk.mutex);
+ spin_unlock(&rcu_state.mutex);
}
@@ -185,25 +230,11 @@ static void rcu_offline_cpu(int cpu)
* we can block indefinitely waiting for it, so flush
* it here
*/
- spin_lock_irq(&rcu_ctrlblk.mutex);
- if (cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
- goto unlock;
-
- cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
- if (cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
- rcu_ctrlblk.curbatch++;
- /* We may avoid calling start batch if
- * we are starting the batch only
- * because of the DEAD CPU (the current
- * CPU will start a new batch anyway for
- * the callbacks we will move to current CPU).
- * However, we will avoid this optimisation
- * for now.
- */
- rcu_start_batch(rcu_ctrlblk.maxbatch);
- }
+ spin_lock_bh(&rcu_state.mutex);
+ if (rcu_ctrlblk.cur != rcu_ctrlblk.completed)
+ cpu_quiet(cpu);
unlock:
- spin_unlock_irq(&rcu_ctrlblk.mutex);
+ spin_unlock_bh(&rcu_state.mutex);
rcu_move_batch(&RCU_curlist(cpu));
rcu_move_batch(&RCU_nxtlist(cpu));
@@ -213,39 +244,59 @@ unlock:
#endif
+void rcu_restart_cpu(int cpu)
+{
+ spin_lock_bh(&rcu_state.mutex);
+ RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
+ RCU_qs_pending(cpu) = 0;
+ spin_unlock_bh(&rcu_state.mutex);
+}
+
/*
* This does the RCU processing work from tasklet context.
*/
static void rcu_process_callbacks(unsigned long unused)
{
int cpu = smp_processor_id();
- LIST_HEAD(list);
+ struct rcu_head *rcu_list = NULL;
- if (!list_empty(&RCU_curlist(cpu)) &&
- rcu_batch_after(rcu_ctrlblk.curbatch, RCU_batch(cpu))) {
- __list_splice(&RCU_curlist(cpu), &list);
- INIT_LIST_HEAD(&RCU_curlist(cpu));
+ if (RCU_curlist(cpu) &&
+ !rcu_batch_before(rcu_ctrlblk.completed, RCU_batch(cpu))) {
+ rcu_list = RCU_curlist(cpu);
+ RCU_curlist(cpu) = NULL;
}
local_irq_disable();
- if (!list_empty(&RCU_nxtlist(cpu)) && list_empty(&RCU_curlist(cpu))) {
- __list_splice(&RCU_nxtlist(cpu), &RCU_curlist(cpu));
- INIT_LIST_HEAD(&RCU_nxtlist(cpu));
+ if (RCU_nxtlist(cpu) && !RCU_curlist(cpu)) {
+ int next_pending, seq;
+
+ RCU_curlist(cpu) = RCU_nxtlist(cpu);
+ RCU_nxtlist(cpu) = NULL;
+ RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
local_irq_enable();
/*
* start the next batch of callbacks
*/
- spin_lock(&rcu_ctrlblk.mutex);
- RCU_batch(cpu) = rcu_ctrlblk.curbatch + 1;
- rcu_start_batch(RCU_batch(cpu));
- spin_unlock(&rcu_ctrlblk.mutex);
+ do {
+ seq = read_seqcount_begin(&rcu_ctrlblk.lock);
+ /* determine batch number */
+ RCU_batch(cpu) = rcu_ctrlblk.cur + 1;
+ next_pending = rcu_ctrlblk.next_pending;
+ } while (read_seqcount_retry(&rcu_ctrlblk.lock, seq));
+
+ if (!next_pending) {
+ /* and start it/schedule start if it's a new batch */
+ spin_lock(&rcu_state.mutex);
+ rcu_start_batch(1);
+ spin_unlock(&rcu_state.mutex);
+ }
} else {
local_irq_enable();
}
rcu_check_quiescent_state();
- if (!list_empty(&list))
- rcu_do_batch(&list);
+ if (rcu_list)
+ rcu_do_batch(rcu_list);
}
void rcu_check_callbacks(int cpu, int user)
@@ -261,8 +312,9 @@ static void __devinit rcu_online_cpu(int cpu)
{
memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
- INIT_LIST_HEAD(&RCU_nxtlist(cpu));
- INIT_LIST_HEAD(&RCU_curlist(cpu));
+ RCU_nxttail(cpu) = &RCU_nxtlist(cpu);
+ RCU_quiescbatch(cpu) = rcu_ctrlblk.completed;
+ RCU_qs_pending(cpu) = 0;
}
static int __devinit rcu_cpu_notify(struct notifier_block *self,
@@ -302,11 +354,18 @@ void __init rcu_init(void)
register_cpu_notifier(&rcu_nb);
}
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
/* Because of FASTCALL declaration of complete, we use this wrapper */
-static void wakeme_after_rcu(void *completion)
+static void wakeme_after_rcu(struct rcu_head *head)
{
- complete(completion);
+ struct rcu_synchronize *rcu;
+
+ rcu = container_of(head, struct rcu_synchronize, head);
+ complete(&rcu->completion);
}
/**
@@ -315,14 +374,14 @@ static void wakeme_after_rcu(void *completion)
*/
void synchronize_kernel(void)
{
- struct rcu_head rcu;
- DECLARE_COMPLETION(completion);
+ struct rcu_synchronize rcu;
+ init_completion(&rcu.completion);
/* Will wake me after RCU finished */
- call_rcu(&rcu, wakeme_after_rcu, &completion);
+ call_rcu(&rcu.head, wakeme_after_rcu);
/* Wait for it */
- wait_for_completion(&completion);
+ wait_for_completion(&rcu.completion);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 8f49ba1202c3..9e676db0267b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -696,10 +696,9 @@ static int wake_idle(int cpu, task_t *p)
return cpu;
cpus_and(tmp, sd->span, cpu_online_map);
- for_each_cpu_mask(i, tmp) {
- if (!cpu_isset(i, p->cpus_allowed))
- continue;
+ cpus_and(tmp, tmp, p->cpus_allowed);
+ for_each_cpu_mask(i, tmp) {
if (idle_cpu(i))
return i;
}
@@ -2941,6 +2940,21 @@ out_unlock:
return retval;
}
+/*
+ * Represents all cpu's present in the system
+ * In systems capable of hotplug, this map could dynamically grow
+ * as new cpu's are detected in the system via any platform specific
+ * method, such as ACPI for e.g.
+ */
+
+cpumask_t cpu_present_map;
+EXPORT_SYMBOL(cpu_present_map);
+
+#ifndef CONFIG_SMP
+cpumask_t cpu_online_map = CPU_MASK_ALL;
+cpumask_t cpu_possible_map = CPU_MASK_ALL;
+#endif
+
/**
* sys_sched_getaffinity - get the cpu affinity of a process
* @pid: pid of the process
@@ -3320,7 +3334,7 @@ int set_cpus_allowed(task_t *p, cpumask_t new_mask)
runqueue_t *rq;
rq = task_rq_lock(p, &flags);
- if (any_online_cpu(new_mask) == NR_CPUS) {
+ if (!cpus_intersects(new_mask, cpu_online_map)) {
ret = -EINVAL;
goto out;
}
@@ -3495,8 +3509,7 @@ static void migrate_all_tasks(int src_cpu)
if (dest_cpu == NR_CPUS)
dest_cpu = any_online_cpu(tsk->cpus_allowed);
if (dest_cpu == NR_CPUS) {
- cpus_clear(tsk->cpus_allowed);
- cpus_complement(tsk->cpus_allowed);
+ cpus_setall(tsk->cpus_allowed);
dest_cpu = any_online_cpu(tsk->cpus_allowed);
/* Don't tell them about moving exiting tasks
@@ -3558,6 +3571,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
if (IS_ERR(p))
return NOTIFY_BAD;
+ p->flags |= PF_NOFREEZE;
kthread_bind(p, cpu);
/* Must be high prio: stop_machine expects to yield to it. */
rq = task_rq_lock(p, &flags);
@@ -3812,7 +3826,7 @@ void sched_domain_debug(void)
int j;
char str[NR_CPUS];
struct sched_group *group = sd->groups;
- cpumask_t groupmask, tmp;
+ cpumask_t groupmask;
cpumask_scnprintf(str, NR_CPUS, sd->span);
cpus_clear(groupmask);
@@ -3842,8 +3856,7 @@ void sched_domain_debug(void)
if (!cpus_weight(group->cpumask))
printk(" ERROR empty group:");
- cpus_and(tmp, groupmask, group->cpumask);
- if (cpus_weight(tmp) > 0)
+ if (cpus_intersects(groupmask, group->cpumask))
printk(" ERROR repeated CPUs:");
cpus_or(groupmask, groupmask, group->cpumask);
@@ -3862,8 +3875,7 @@ void sched_domain_debug(void)
sd = sd->parent;
if (sd) {
- cpus_and(tmp, groupmask, sd->span);
- if (!cpus_equal(tmp, groupmask))
+ if (!cpus_subset(groupmask, sd->span))
printk(KERN_DEBUG "ERROR parent span is not a superset of domain->span\n");
}
@@ -3902,16 +3914,15 @@ void __init sched_init(void)
/* Set up an initial dummy domain for early boot */
static struct sched_domain sched_domain_init;
static struct sched_group sched_group_init;
- cpumask_t cpu_mask_all = CPU_MASK_ALL;
memset(&sched_domain_init, 0, sizeof(struct sched_domain));
- sched_domain_init.span = cpu_mask_all;
+ sched_domain_init.span = CPU_MASK_ALL;
sched_domain_init.groups = &sched_group_init;
sched_domain_init.last_balance = jiffies;
sched_domain_init.balance_interval = INT_MAX; /* Don't balance */
memset(&sched_group_init, 0, sizeof(struct sched_group));
- sched_group_init.cpumask = cpu_mask_all;
+ sched_group_init.cpumask = CPU_MASK_ALL;
sched_group_init.next = &sched_group_init;
sched_group_init.cpu_power = SCHED_LOAD_SCALE;
#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7dca63a88ea2..641727bab22f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -39,6 +39,8 @@
#include <linux/initrd.h>
#include <linux/times.h>
#include <linux/limits.h>
+#include <linux/dcache.h>
+
#include <asm/uaccess.h>
#ifdef CONFIG_ROOT_NFS
@@ -777,6 +779,16 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
+ {
+ .ctl_name = VM_VFS_CACHE_PRESSURE,
+ .procname = "vfs_cache_pressure",
+ .data = &sysctl_vfs_cache_pressure,
+ .maxlen = sizeof(sysctl_vfs_cache_pressure),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
{ .ctl_name = 0 }
};