summaryrefslogtreecommitdiff
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 18:35:05 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 18:35:05 -0800
commitd2a0e8decde7ef3354d8cdaa0ddf5517e5759ca2 (patch)
tree7769ba8a9aa7c7fe6a510409dfcecbbfede212b9 /kernel/softirq.c
parent9c6f70be049f5a0439996107e58bf65e9a5d9a09 (diff)
v2.4.5 -> v2.4.5.1
- Andreas Dilger: make ext2fs react more gracefully to inode disk errors - Andrea Arkangeli: fix up alpha compile issues - Ingo Molnar: io-apic MP table parsing update and softirq latency update - Johannes Erdfelt: USB updates - Richard Henderson: alpha rawhide irq handling fixes - Marcelo, Andrea, Rik: more VM issues - Al Viro: fix various ext2 directory handling checks by biting the bullet and using the page cache.
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c136
1 files changed, 88 insertions, 48 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index fe066399dafa..8981f85281c5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -52,12 +52,12 @@ asmlinkage void do_softirq()
int cpu = smp_processor_id();
__u32 active, mask;
+ local_irq_disable();
if (in_interrupt())
- return;
+ goto out;
local_bh_disable();
- local_irq_disable();
mask = softirq_mask(cpu);
active = softirq_active(cpu) & mask;
@@ -71,7 +71,6 @@ restart:
local_irq_enable();
h = softirq_vec;
- mask &= ~active;
do {
if (active & 1)
@@ -82,12 +81,13 @@ restart:
local_irq_disable();
- active = softirq_active(cpu);
- if ((active &= mask) != 0)
+ active = softirq_active(cpu) & mask;
+ if (active)
goto retry;
}
- local_bh_enable();
+ __local_bh_enable();
+out:
/* Leave with locally disabled hard irqs. It is critical to close
* window for infinite recursion, while we help local bh count,
@@ -121,6 +121,45 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
+void tasklet_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ local_irq_save(flags);
+ /*
+ * If nobody is running it then add it to this CPU's
+ * tasklet queue.
+ */
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
+ tasklet_trylock(t)) {
+ t->next = tasklet_vec[cpu].list;
+ tasklet_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ tasklet_unlock(t);
+ }
+ local_irq_restore(flags);
+}
+
+void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ local_irq_save(flags);
+
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
+ tasklet_trylock(t)) {
+ t->next = tasklet_hi_vec[cpu].list;
+ tasklet_hi_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, HI_SOFTIRQ);
+ tasklet_unlock(t);
+ }
+ local_irq_restore(flags);
+}
+
static void tasklet_action(struct softirq_action *a)
{
int cpu = smp_processor_id();
@@ -129,37 +168,37 @@ static void tasklet_action(struct softirq_action *a)
local_irq_disable();
list = tasklet_vec[cpu].list;
tasklet_vec[cpu].list = NULL;
- local_irq_enable();
- while (list != NULL) {
+ while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (atomic_read(&t->count) == 0) {
- clear_bit(TASKLET_STATE_SCHED, &t->state);
-
- t->func(t->data);
- /*
- * talklet_trylock() uses test_and_set_bit that imply
- * an mb when it returns zero, thus we need the explicit
- * mb only here: while closing the critical section.
- */
-#ifdef CONFIG_SMP
- smp_mb__before_clear_bit();
-#endif
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ /*
+ * A tasklet is only added to the queue while it's
+ * locked, so no other CPU can have this tasklet
+ * pending:
+ */
+ if (!tasklet_trylock(t))
+ BUG();
+repeat:
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ if (!atomic_read(&t->count)) {
+ local_irq_enable();
+ t->func(t->data);
+ local_irq_disable();
+ /*
+ * One more run if the tasklet got reactivated:
+ */
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ goto repeat;
}
- local_irq_disable();
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_enable();
+ tasklet_unlock(t);
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ tasklet_schedule(t);
}
+ local_irq_enable();
}
@@ -174,39 +213,40 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable();
list = tasklet_hi_vec[cpu].list;
tasklet_hi_vec[cpu].list = NULL;
- local_irq_enable();
- while (list != NULL) {
+ while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (atomic_read(&t->count) == 0) {
- clear_bit(TASKLET_STATE_SCHED, &t->state);
-
- t->func(t->data);
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ if (!tasklet_trylock(t))
+ BUG();
+repeat:
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ if (!atomic_read(&t->count)) {
+ local_irq_enable();
+ t->func(t->data);
+ local_irq_disable();
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ goto repeat;
}
- local_irq_disable();
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_enable();
+ tasklet_unlock(t);
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ tasklet_hi_schedule(t);
}
+ local_irq_enable();
}
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
- t->func = func;
- t->data = data;
+ t->next = NULL;
t->state = 0;
atomic_set(&t->count, 0);
+ t->func = func;
+ t->data = data;
}
void tasklet_kill(struct tasklet_struct *t)