summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/sched.c9
-rw-r--r--kernel/softirq.c16
4 files changed, 14 insertions, 17 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index f727a3c511c8..71d017f2fd8f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -530,10 +530,10 @@ NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITING;
del_timer_sync(&tsk->real_timer);
- if (unlikely(preempt_get_count()))
+ if (unlikely(preempt_count()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid,
- preempt_get_count());
+ preempt_count());
fake_volatile:
acct_process(code);
diff --git a/kernel/panic.c b/kernel/panic.c
index 5281e7cce9f8..9661ad669363 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -94,7 +94,7 @@ NORET_TYPE void panic(const char * fmt, ...)
#if defined(CONFIG_ARCH_S390)
disabled_wait(caller);
#endif
- sti();
+ __sti();
for(;;) {
CHECK_EMERGENCY_SYNC
}
diff --git a/kernel/sched.c b/kernel/sched.c
index c8a11b29794e..3d275a38109e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -727,7 +727,8 @@ void scheduler_tick(int user_tick, int system)
task_t *p = current;
if (p == rq->idle) {
- if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
+ /* note: this timer irq context must be accounted for as well */
+ if (preempt_count() >= 2*IRQ_OFFSET)
kstat.per_cpu_system[cpu] += system;
#if CONFIG_SMP
idle_tick();
@@ -816,7 +817,7 @@ need_resched:
prev = current;
rq = this_rq();
- release_kernel_lock(prev, smp_processor_id());
+ release_kernel_lock(prev);
prepare_arch_schedule(prev);
prev->sleep_timestamp = jiffies;
spin_lock_irq(&rq->lock);
@@ -825,7 +826,7 @@ need_resched:
* if entering off of a kernel preemption go straight
* to picking the next task.
*/
- if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
+ if (unlikely(preempt_count() & PREEMPT_ACTIVE))
goto pick_next_task;
switch (prev->state) {
@@ -1694,7 +1695,9 @@ void __init init_idle(task_t *idle, int cpu)
__restore_flags(flags);
/* Set the preempt count _outside_ the spinlocks! */
+#if CONFIG_PREEMPT
idle->thread_info->preempt_count = (idle->lock_depth >= 0);
+#endif
}
extern void init_timervecs(void);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index e0093420e169..a53dd2828cb5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -61,17 +61,17 @@ static inline void wakeup_softirqd(unsigned cpu)
asmlinkage void do_softirq()
{
- int cpu;
__u32 pending;
long flags;
__u32 mask;
+ int cpu;
if (in_interrupt())
return;
local_irq_save(flags);
-
cpu = smp_processor_id();
+
pending = softirq_pending(cpu);
if (pending) {
@@ -111,7 +111,7 @@ restart:
}
/*
- * This function must run with irq disabled!
+ * This function must run with irqs disabled!
*/
inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
@@ -126,7 +126,7 @@ inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
* Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon.
*/
- if (!(local_irq_count(cpu) | local_bh_count(cpu)))
+ if (!in_interrupt())
wakeup_softirqd(cpu);
}
@@ -290,22 +290,16 @@ spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
static void bh_action(unsigned long nr)
{
- int cpu = smp_processor_id();
-
if (!spin_trylock(&global_bh_lock))
goto resched;
- if (!hardirq_trylock(cpu))
- goto resched_unlock;
-
if (bh_base[nr])
bh_base[nr]();
- hardirq_endlock(cpu);
+ hardirq_endlock();
spin_unlock(&global_bh_lock);
return;
-resched_unlock:
spin_unlock(&global_bh_lock);
resched:
mark_bh(nr);