diff options
| author | Andrew Morton <akpm@osdl.org> | 2004-05-09 23:29:19 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-05-09 23:29:19 -0700 |
| commit | 8c8cfc36d9ec9e9cd6a440fd7bf8b5404bd11635 (patch) | |
| tree | 4a0083c3db59626be538d56645069eaa6e01726d /include | |
| parent | a690c9b7ac0ddc28785c38526a69a7fe2e692500 (diff) | |
[PATCH] sched: balance-on-clone
From: Ingo Molnar <mingo@elte.hu>
Implement balancing during clone(). It does the following things:
- introduces SD_BALANCE_CLONE that can serve as a tool for an
architecture to limit the search-idlest-CPU scope on clone().
E.g. the 512-CPU systems should rather not enable this.
- uses the highest sd for the imbalance_pct, not this_rq (which didnt
make sense).
- unifies balance-on-exec and balance-on-clone via the find_idlest_cpu()
function. Gets rid of sched_best_cpu() which was still a bit
inconsistent IMO, it used 'min_load < load' as a condition for
balancing - while a more correct approach would be to use half of the
imbalance_pct, like passive balancing does.
- the patch also reintroduces the possibility to do SD_BALANCE_EXEC on
SMP systems, and activates it - to get testing.
- NOTE: there's one thing in this patch that is slightly unclean: i
introduced wake_up_forked_thread. I did this to make it easier to get
rid of this patch later (wake_up_forked_process() has lots of
dependencies in various architectures). If this capability remains in
the kernel then i'll clean it up and introduce one function for
wake_up_forked_process/thread.
- NOTE2: i added the SD_BALANCE_CLONE flag to the NUMA CPU template too.
Some NUMA architectures probably want to disable this.
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/sched.h | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1ff71bf1f5ea..66faf991b373 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -547,10 +547,11 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) #define SD_BALANCE_NEWIDLE 1 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 2 /* Balance on exec */ -#define SD_WAKE_IDLE 4 /* Wake to idle CPU on task wakeup */ -#define SD_WAKE_AFFINE 8 /* Wake task to waking CPU */ -#define SD_WAKE_BALANCE 16 /* Perform balancing at task wakeup */ -#define SD_SHARE_CPUPOWER 32 /* Domain members share cpu power */ +#define SD_BALANCE_CLONE 4 /* Balance on clone */ +#define SD_WAKE_IDLE 8 /* Wake to idle CPU on task wakeup */ +#define SD_WAKE_AFFINE 16 /* Wake task to waking CPU */ +#define SD_WAKE_BALANCE 32 /* Perform balancing at task wakeup */ +#define SD_SHARE_CPUPOWER 64 /* Domain members share cpu power */ struct sched_group { struct sched_group *next; /* Must be a circular list */ @@ -598,6 +599,8 @@ struct sched_domain { .cache_nice_tries = 0, \ .per_cpu_gain = 15, \ .flags = SD_BALANCE_NEWIDLE \ + | SD_BALANCE_EXEC \ + | SD_BALANCE_CLONE \ | SD_WAKE_AFFINE \ | SD_WAKE_IDLE \ | SD_SHARE_CPUPOWER, \ @@ -619,6 +622,8 @@ struct sched_domain { .cache_nice_tries = 1, \ .per_cpu_gain = 100, \ .flags = SD_BALANCE_NEWIDLE \ + | SD_BALANCE_EXEC \ + | SD_BALANCE_CLONE \ | SD_WAKE_AFFINE \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ @@ -640,6 +645,7 @@ struct sched_domain { .cache_nice_tries = 1, \ .per_cpu_gain = 100, \ .flags = SD_BALANCE_EXEC \ + | SD_BALANCE_CLONE \ | SD_WAKE_BALANCE, \ .last_balance = jiffies, \ .balance_interval = 1, \ @@ -659,7 +665,7 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifdef CONFIG_NUMA +#ifdef CONFIG_SMP extern void sched_balance_exec(void); #else #define sched_balance_exec() {} @@ -717,12 +723,17 @@ extern void do_timer(struct pt_regs *); extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_process(struct task_struct * tsk)); +extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); + extern void FASTCALL(wake_up_forked_thread(struct task_struct * tsk)); #else static inline void kick_process(struct task_struct *tsk) { } + static inline void wake_up_forked_thread(struct task_struct * tsk) + { + return wake_up_forked_process(tsk); + } #endif -extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); extern void FASTCALL(sched_fork(task_t * p)); extern void FASTCALL(sched_exit(task_t * p)); |
