summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-01-18 18:30:29 -0800
committerLinus Torvalds <torvalds@home.osdl.org>2004-01-18 18:30:29 -0800
commit2df40901f668a2943de6c93b4f3221a4599c7597 (patch)
tree0c8fbb1305907678e951c515eecb209c7a83fe14 /kernel
parentea9c300dbdc3bb6417969ac6e65459567cfda8ce (diff)
[PATCH] CPU scheduler cleanup
From: Ingo Molnar <mingo@elte.hu> - move scheduling-state initializtion from copy_process() to sched_fork() (Nick Piggin)
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c56
-rw-r--r--kernel/sched.c54
2 files changed, 59 insertions, 51 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 5a8bf386fbc7..b9a13609b635 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -910,23 +910,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
-#ifdef CONFIG_PREEMPT
- /*
- * schedule_tail drops this_rq()->lock so we compensate with a count
- * of 1. Also, we want to start with kernel preemption disabled.
- */
- p->thread_info->preempt_count = 1;
-#endif
p->did_exec = 0;
-
- /*
- * We mark the process as running here, but have not actually
- * inserted it onto the runqueue yet. This guarantees that
- * nobody will actually run it, and a signal or other external
- * event cannot wake it up and insert it on the runqueue either.
- */
- p->state = TASK_RUNNING;
-
copy_flags(clone_flags, p);
if (clone_flags & CLONE_IDLETASK)
p->pid = 0;
@@ -942,15 +926,12 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->proc_dentry = NULL;
- INIT_LIST_HEAD(&p->run_list);
-
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
INIT_LIST_HEAD(&p->posix_timers);
init_waitqueue_head(&p->wait_chldexit);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
- spin_lock_init(&p->switch_lock);
spin_lock_init(&p->proc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
@@ -965,7 +946,6 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->tty_old_pgrp = 0;
p->utime = p->stime = 0;
p->cutime = p->cstime = 0;
- p->array = NULL;
p->lock_depth = -1; /* -1 = no lock */
p->start_time = get_jiffies_64();
p->security = NULL;
@@ -1014,38 +994,12 @@ struct task_struct *copy_process(unsigned long clone_flags,
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
+ /* Perform scheduler related setup */
+ sched_fork(p);
+
/*
- * Share the timeslice between parent and child, thus the
- * total amount of pending timeslices in the system doesn't change,
- * resulting in more scheduling fairness.
- */
- local_irq_disable();
- p->time_slice = (current->time_slice + 1) >> 1;
- /*
- * The remainder of the first timeslice might be recovered by
- * the parent if the child exits early enough.
- */
- p->first_time_slice = 1;
- current->time_slice >>= 1;
- p->timestamp = sched_clock();
- if (!current->time_slice) {
- /*
- * This case is rare, it happens when the parent has only
- * a single jiffy left from its timeslice. Taking the
- * runqueue lock is not a problem.
- */
- current->time_slice = 1;
- preempt_disable();
- scheduler_tick(0, 0);
- local_irq_enable();
- preempt_enable();
- } else
- local_irq_enable();
- /*
- * Ok, add it to the run-queues and make it
- * visible to the rest of the system.
- *
- * Let it rip!
+ * Ok, make it visible to the rest of the system.
+ * We dont wake it up yet.
*/
p->tgid = p->pid;
p->group_leader = p;
diff --git a/kernel/sched.c b/kernel/sched.c
index bf56c3cc980e..aabe4e13b7e3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -674,6 +674,60 @@ int wake_up_state(task_t *p, unsigned int state)
}
/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+void sched_fork(task_t *p)
+{
+ /*
+ * We mark the process as running here, but have not actually
+ * inserted it onto the runqueue yet. This guarantees that
+ * nobody will actually run it, and a signal or other external
+ * event cannot wake it up and insert it on the runqueue either.
+ */
+ p->state = TASK_RUNNING;
+ INIT_LIST_HEAD(&p->run_list);
+ p->array = NULL;
+ spin_lock_init(&p->switch_lock);
+#ifdef CONFIG_PREEMPT
+ /*
+ * During context-switch we hold precisely one spinlock, which
+ * schedule_tail drops. (in the common case it's this_rq()->lock,
+ * but it also can be p->switch_lock.) So we compensate with a count
+ * of 1. Also, we want to start with kernel preemption disabled.
+ */
+ p->thread_info->preempt_count = 1;
+#endif
+ /*
+ * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesn't change,
+ * resulting in more scheduling fairness.
+ */
+ local_irq_disable();
+ p->time_slice = (current->time_slice + 1) >> 1;
+ /*
+ * The remainder of the first timeslice might be recovered by
+ * the parent if the child exits early enough.
+ */
+ p->first_time_slice = 1;
+ current->time_slice >>= 1;
+ p->timestamp = sched_clock();
+ if (!current->time_slice) {
+ /*
+ * This case is rare, it happens when the parent has only
+ * a single jiffy left from its timeslice. Taking the
+ * runqueue lock is not a problem.
+ */
+ current->time_slice = 1;
+ preempt_disable();
+ scheduler_tick(0, 0);
+ local_irq_enable();
+ preempt_enable();
+ } else
+ local_irq_enable();
+}
+
+/*
* wake_up_forked_process - wake up a freshly forked process.
*
* This function will do some initial scheduler statistics housekeeping