summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/ksyms.c6
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/softirq.c136
3 files changed, 90 insertions, 54 deletions
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index d1602d8d16f9..d60a225c6650 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -90,17 +90,13 @@ EXPORT_SYMBOL(exit_fs);
EXPORT_SYMBOL(exit_sighand);
/* internal kernel memory management */
+EXPORT_SYMBOL(_alloc_pages);
EXPORT_SYMBOL(__alloc_pages);
EXPORT_SYMBOL(alloc_pages_node);
EXPORT_SYMBOL(__get_free_pages);
EXPORT_SYMBOL(get_zeroed_page);
EXPORT_SYMBOL(__free_pages);
EXPORT_SYMBOL(free_pages);
-#ifndef CONFIG_DISCONTIGMEM
-EXPORT_SYMBOL(contig_page_data);
-#else
-EXPORT_SYMBOL(alloc_pages);
-#endif
EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(kmem_find_general_cachep);
EXPORT_SYMBOL(kmem_cache_create);
diff --git a/kernel/sched.c b/kernel/sched.c
index e51735116958..ff097124bda3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -544,7 +544,7 @@ need_resched_back:
release_kernel_lock(prev, this_cpu);
/* Do "administrative" work here while we don't hold any locks */
- if (softirq_active(this_cpu) & softirq_mask(this_cpu))
+ if (softirq_pending(this_cpu))
goto handle_softirq;
handle_softirq_back:
diff --git a/kernel/softirq.c b/kernel/softirq.c
index fe066399dafa..8981f85281c5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -52,12 +52,12 @@ asmlinkage void do_softirq()
int cpu = smp_processor_id();
__u32 active, mask;
+ local_irq_disable();
if (in_interrupt())
- return;
+ goto out;
local_bh_disable();
- local_irq_disable();
mask = softirq_mask(cpu);
active = softirq_active(cpu) & mask;
@@ -71,7 +71,6 @@ restart:
local_irq_enable();
h = softirq_vec;
- mask &= ~active;
do {
if (active & 1)
@@ -82,12 +81,13 @@ restart:
local_irq_disable();
- active = softirq_active(cpu);
- if ((active &= mask) != 0)
+ active = softirq_active(cpu) & mask;
+ if (active)
goto retry;
}
- local_bh_enable();
+ __local_bh_enable();
+out:
/* Leave with locally disabled hard irqs. It is critical to close
* window for infinite recursion, while we help local bh count,
@@ -121,6 +121,45 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
+void tasklet_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ local_irq_save(flags);
+ /*
+ * If nobody is running it then add it to this CPU's
+ * tasklet queue.
+ */
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
+ tasklet_trylock(t)) {
+ t->next = tasklet_vec[cpu].list;
+ tasklet_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ tasklet_unlock(t);
+ }
+ local_irq_restore(flags);
+}
+
+void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ local_irq_save(flags);
+
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
+ tasklet_trylock(t)) {
+ t->next = tasklet_hi_vec[cpu].list;
+ tasklet_hi_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, HI_SOFTIRQ);
+ tasklet_unlock(t);
+ }
+ local_irq_restore(flags);
+}
+
static void tasklet_action(struct softirq_action *a)
{
int cpu = smp_processor_id();
@@ -129,37 +168,37 @@ static void tasklet_action(struct softirq_action *a)
local_irq_disable();
list = tasklet_vec[cpu].list;
tasklet_vec[cpu].list = NULL;
- local_irq_enable();
- while (list != NULL) {
+ while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (atomic_read(&t->count) == 0) {
- clear_bit(TASKLET_STATE_SCHED, &t->state);
-
- t->func(t->data);
- /*
- * talklet_trylock() uses test_and_set_bit that imply
- * an mb when it returns zero, thus we need the explicit
- * mb only here: while closing the critical section.
- */
-#ifdef CONFIG_SMP
- smp_mb__before_clear_bit();
-#endif
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ /*
+ * A tasklet is only added to the queue while it's
+ * locked, so no other CPU can have this tasklet
+ * pending:
+ */
+ if (!tasklet_trylock(t))
+ BUG();
+repeat:
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ if (!atomic_read(&t->count)) {
+ local_irq_enable();
+ t->func(t->data);
+ local_irq_disable();
+ /*
+ * One more run if the tasklet got reactivated:
+ */
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ goto repeat;
}
- local_irq_disable();
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_enable();
+ tasklet_unlock(t);
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ tasklet_schedule(t);
}
+ local_irq_enable();
}
@@ -174,39 +213,40 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable();
list = tasklet_hi_vec[cpu].list;
tasklet_hi_vec[cpu].list = NULL;
- local_irq_enable();
- while (list != NULL) {
+ while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (atomic_read(&t->count) == 0) {
- clear_bit(TASKLET_STATE_SCHED, &t->state);
-
- t->func(t->data);
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ if (!tasklet_trylock(t))
+ BUG();
+repeat:
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ if (!atomic_read(&t->count)) {
+ local_irq_enable();
+ t->func(t->data);
+ local_irq_disable();
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ goto repeat;
}
- local_irq_disable();
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_enable();
+ tasklet_unlock(t);
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ tasklet_hi_schedule(t);
}
+ local_irq_enable();
}
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
- t->func = func;
- t->data = data;
+ t->next = NULL;
t->state = 0;
atomic_set(&t->count, 0);
+ t->func = func;
+ t->data = data;
}
void tasklet_kill(struct tasklet_struct *t)