summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 20:33:54 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 20:33:54 -0800
commit1040c54c3b98ac4f8d91bc313cdc9d6669481da3 (patch)
treed2c91b7b7e2aa5ffc88774ce1aa9aef08d4c709f /kernel
parent5aa875d2cbee34727963bd81aa992b64480045ca (diff)
v2.4.14.8 -> v2.4.14.9
- David Brownell: usbnet update - Greg KH: USB and PCI hotplug update - Ingo/me: fix SCHED_FIFO for UP/SMP for good (flw). - Add back direct_IO now that it works again.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/ksyms.c1
-rw-r--r--kernel/ptrace.c36
-rw-r--r--kernel/sched.c97
-rw-r--r--kernel/signal.c2
6 files changed, 78 insertions, 64 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 708cad8597f5..2650ac328e4a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -35,13 +35,13 @@ static void release_task(struct task_struct * p)
*/
for (;;) {
task_lock(p);
- if (!p->has_cpu)
+ if (!task_has_cpu(p))
break;
task_unlock(p);
do {
cpu_relax();
barrier();
- } while (p->has_cpu);
+ } while (task_has_cpu(p));
}
task_unlock(p);
#endif
diff --git a/kernel/fork.c b/kernel/fork.c
index 91aeda9ef591..3c28fc2a9777 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -638,7 +638,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
#ifdef CONFIG_SMP
{
int i;
- p->has_cpu = 0;
+ p->cpus_runnable = ~0UL;
p->processor = current->processor;
/* ?? should we just memset this ?? */
for(i = 0; i < smp_num_cpus; i++)
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 6ecd30f3d323..bd2d762baf94 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -199,6 +199,7 @@ EXPORT_SYMBOL(submit_bh);
EXPORT_SYMBOL(unlock_buffer);
EXPORT_SYMBOL(__wait_on_buffer);
EXPORT_SYMBOL(___wait_on_page);
+EXPORT_SYMBOL(generic_direct_IO);
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_prepare_write);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b9cf12d4c02d..da45fa2be7a3 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -16,6 +16,42 @@
#include <asm/pgtable.h>
#include <asm/uaccess.h>
+/*
+ * Check that we have indeed attached to the thing..
+ */
+int ptrace_check_attach(struct task_struct *child, int kill)
+{
+ if (!(child->ptrace & PT_PTRACED))
+ return -ESRCH;
+
+ if (child->p_pptr != current)
+ return -ESRCH;
+
+ if (!kill) {
+ if (child->state != TASK_STOPPED)
+ return -ESRCH;
+#ifdef CONFIG_SMP
+ /* Make sure the child gets off its CPU.. */
+ for (;;) {
+ task_lock(child);
+ if (!task_has_cpu(child))
+ break;
+ task_unlock(child);
+ do {
+ if (child->state != TASK_STOPPED)
+ return -ESRCH;
+ barrier();
+ cpu_relax();
+ } while (task_has_cpu(child));
+ }
+ task_unlock(child);
+#endif
+ }
+
+ /* All systems go.. */
+ return 0;
+}
+
int ptrace_attach(struct task_struct *task)
{
task_lock(task);
diff --git a/kernel/sched.c b/kernel/sched.c
index 11de5d6951a9..db3e42f74c42 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -28,6 +28,7 @@
#include <linux/kernel_stat.h>
#include <linux/completion.h>
#include <linux/prefetch.h>
+#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -114,8 +115,8 @@ extern struct task_struct *child_reaper;
#ifdef CONFIG_SMP
#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
-#define can_schedule(p,cpu) ((!(p)->has_cpu) && \
- ((p)->cpus_allowed & (1 << cpu)))
+#define can_schedule(p,cpu) \
+ ((p)->cpus_runnable & (p)->cpus_allowed & (1 << cpu))
#else
@@ -455,11 +456,11 @@ static inline void __schedule_tail(struct task_struct *prev)
/*
* prev->policy can be written from here only before `prev'
- * can be scheduled (before setting prev->has_cpu to zero).
+ * can be scheduled (before setting prev->cpus_runnable to ~0UL).
* Of course it must also be read before allowing prev
* to be rescheduled, but since the write depends on the read
* to complete, wmb() is enough. (the spin_lock() acquired
- * before setting has_cpu is not enough because the spin_lock()
+ * before setting cpus_runnable is not enough because the spin_lock()
* common code semantics allows code outside the critical section
* to enter inside the critical section)
*/
@@ -468,12 +469,12 @@ static inline void __schedule_tail(struct task_struct *prev)
wmb();
/*
- * fast path falls through. We have to clear has_cpu before
- * checking prev->state to avoid a wakeup race - thus we
- * also have to protect against the task exiting early.
+ * fast path falls through. We have to clear cpus_runnable before
+ * checking prev->state to avoid a wakeup race. Protect against
+ * the task exiting early.
*/
task_lock(prev);
- prev->has_cpu = 0;
+ task_release_cpu(prev);
mb();
if (prev->state == TASK_RUNNING)
goto needs_resched;
@@ -505,7 +506,7 @@ needs_resched:
goto out_unlock;
spin_lock_irqsave(&runqueue_lock, flags);
- if ((prev->state == TASK_RUNNING) && !prev->has_cpu)
+ if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
reschedule_idle(prev);
spin_unlock_irqrestore(&runqueue_lock, flags);
goto out_unlock;
@@ -545,8 +546,10 @@ need_resched_back:
prev = current;
this_cpu = prev->processor;
- if (in_interrupt())
- goto scheduling_in_interrupt;
+ if (unlikely(in_interrupt())) {
+ printk("Scheduling in interrupt\n");
+ BUG();
+ }
release_kernel_lock(prev, this_cpu);
@@ -559,9 +562,11 @@ need_resched_back:
spin_lock_irq(&runqueue_lock);
/* move an exhausted RR process to be last.. */
- if (prev->policy == SCHED_RR)
- goto move_rr_last;
-move_rr_back:
+ if (unlikely(prev->policy == SCHED_RR))
+ if (!prev->counter) {
+ prev->counter = NICE_TO_TICKS(prev->nice);
+ move_last_runqueue(prev);
+ }
switch (prev->state) {
case TASK_INTERRUPTIBLE:
@@ -585,10 +590,6 @@ repeat_schedule:
*/
next = idle_task(this_cpu);
c = -1000;
- if (prev->state == TASK_RUNNING)
- goto still_running;
-
-still_running_back:
list_for_each(tmp, &runqueue_head) {
p = list_entry(tmp, struct task_struct, run_list);
if (can_schedule(p, this_cpu)) {
@@ -599,21 +600,28 @@ still_running_back:
}
/* Do we need to re-calculate counters? */
- if (!c)
- goto recalculate;
+ if (unlikely(!c)) {
+ struct task_struct *p;
+
+ spin_unlock_irq(&runqueue_lock);
+ read_lock(&tasklist_lock);
+ for_each_task(p)
+ p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
+ read_unlock(&tasklist_lock);
+ spin_lock_irq(&runqueue_lock);
+ goto repeat_schedule;
+ }
+
/*
* from this point on nothing can prevent us from
* switching to the next task, save this fact in
* sched_data.
*/
sched_data->curr = next;
-#ifdef CONFIG_SMP
- next->has_cpu = 1;
- next->processor = this_cpu;
-#endif
+ task_set_cpu(next, this_cpu);
spin_unlock_irq(&runqueue_lock);
- if (prev == next) {
+ if (unlikely(prev == next)) {
/* We won't go through the normal tail, so do this by hand */
prev->policy &= ~SCHED_YIELD;
goto same_process;
@@ -678,38 +686,6 @@ same_process:
reacquire_kernel_lock(current);
if (current->need_resched)
goto need_resched_back;
-
- return;
-
-recalculate:
- {
- struct task_struct *p;
- spin_unlock_irq(&runqueue_lock);
- read_lock(&tasklist_lock);
- for_each_task(p)
- p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
- read_unlock(&tasklist_lock);
- spin_lock_irq(&runqueue_lock);
- }
- goto repeat_schedule;
-
-still_running:
- if (!(prev->cpus_allowed & (1UL << this_cpu)))
- goto still_running_back;
- c = goodness(prev, this_cpu, prev->active_mm);
- next = prev;
- goto still_running_back;
-
-move_rr_last:
- if (!prev->counter) {
- prev->counter = NICE_TO_TICKS(prev->nice);
- move_last_runqueue(prev);
- }
- goto move_rr_back;
-
-scheduling_in_interrupt:
- printk("Scheduling in interrupt\n");
- BUG();
return;
}
@@ -1072,6 +1048,10 @@ asmlinkage long sys_sched_yield(void)
if (current->policy == SCHED_OTHER)
current->policy |= SCHED_YIELD;
current->need_resched = 1;
+
+ spin_lock_irq(&runqueue_lock);
+ move_last_runqueue(current);
+ spin_unlock_irq(&runqueue_lock);
}
return 0;
}
@@ -1176,13 +1156,10 @@ static void show_task(struct task_struct * p)
else
printk(" (NOTLB)\n");
-#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_ARM) || defined(CONFIG_ALPHA)
-/* This is very useful, but only works on ARM, x86 and sparc64 right now */
{
extern void show_trace_task(struct task_struct *tsk);
show_trace_task(p);
}
-#endif
}
char * render_sigset_t(sigset_t *set, char *buffer)
diff --git a/kernel/signal.c b/kernel/signal.c
index 7f4d2e71c751..44acecd851c7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -479,7 +479,7 @@ static inline void signal_wake_up(struct task_struct *t)
* other than doing an extra (lightweight) IPI interrupt.
*/
spin_lock(&runqueue_lock);
- if (t->has_cpu && t->processor != smp_processor_id())
+ if (task_has_cpu(t) && t->processor != smp_processor_id())
smp_send_reschedule(t->processor);
spin_unlock(&runqueue_lock);
#endif /* CONFIG_SMP */