From 2b75b53521e41a4b62ccafb7d2e2b978d3ba7eda Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 11 Jun 2002 04:30:21 +0200 Subject: - put the sync wakeup feature back in, based on Mike Kravetz's patch. --- include/linux/sched.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 79988f7e6aa3..3b43d3bb1123 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -491,6 +491,7 @@ extern unsigned long prof_len; extern unsigned long prof_shift; extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); +extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); extern void FASTCALL(sleep_on(wait_queue_head_t *q)); extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, signed long timeout)); @@ -507,6 +508,11 @@ extern void FASTCALL(sched_exit(task_t * p)); #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr) #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0) +#ifdef CONFIG_SMP +#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1) +#else +#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1) +#endif asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); extern int in_group_p(gid_t); -- cgit v1.2.3 From 0ed4dd24ff471eb7542378359f2852e2bf12bb99 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 11 Jun 2002 06:02:35 +0200 Subject: - rq-lock optimization in the preemption case, from Robert Love, plus some more cleanups. --- include/linux/spinlock.h | 33 ++++++++++++++++++++------------- kernel/sched.c | 42 ++++++++++++++++++++++++++++-------------- 2 files changed, 48 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 0e9f7247bc86..5e1cc9fae5df 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -157,6 +157,12 @@ do { \ preempt_enable(); \ } while (0) +#define spin_unlock_no_resched(lock) \ +do { \ + _raw_spin_unlock(lock); \ + preempt_enable_no_resched(); \ +} while (0) + #define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) #define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) #define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) @@ -166,20 +172,21 @@ do { \ #else -#define preempt_get_count() (0) -#define preempt_disable() do { } while (0) +#define preempt_get_count() (0) +#define preempt_disable() do { } while (0) #define preempt_enable_no_resched() do {} while(0) -#define preempt_enable() do { } while (0) - -#define spin_lock(lock) _raw_spin_lock(lock) -#define spin_trylock(lock) _raw_spin_trylock(lock) -#define spin_unlock(lock) _raw_spin_unlock(lock) - -#define read_lock(lock) _raw_read_lock(lock) -#define read_unlock(lock) _raw_read_unlock(lock) -#define write_lock(lock) _raw_write_lock(lock) -#define write_unlock(lock) _raw_write_unlock(lock) -#define write_trylock(lock) _raw_write_trylock(lock) +#define preempt_enable() do { } while (0) + +#define spin_lock(lock) _raw_spin_lock(lock) +#define spin_trylock(lock) _raw_spin_trylock(lock) +#define spin_unlock(lock) _raw_spin_unlock(lock) +#define spin_unlock_no_resched(lock) _raw_spin_unlock(lock) + +#define read_lock(lock) _raw_read_lock(lock) +#define read_unlock(lock) _raw_read_unlock(lock) +#define write_lock(lock) _raw_write_lock(lock) +#define write_unlock(lock) _raw_write_unlock(lock) +#define write_trylock(lock) _raw_write_trylock(lock) #endif /* "lock on reference count zero" */ diff --git a/kernel/sched.c b/kernel/sched.c index c6fd5cb821b6..856ca71682d2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -152,17 +152,21 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define rt_task(p) ((p)->prio < MAX_RT_PRIO) +/* + * task_rq_lock - lock the runqueue a given task resides on and disable + * interrupts. Note the ordering: we can safely lookup the task_rq without + * explicitly disabling preemption. + */ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) { struct runqueue *rq; repeat_lock_task: - preempt_disable(); + local_irq_save(*flags); rq = task_rq(p); - spin_lock_irqsave(&rq->lock, *flags); + spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { spin_unlock_irqrestore(&rq->lock, *flags); - preempt_enable(); goto repeat_lock_task; } return rq; @@ -171,7 +175,23 @@ repeat_lock_task: static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) { spin_unlock_irqrestore(&rq->lock, *flags); - preempt_enable(); +} + +/* + * rq_lock - lock a given runqueue and disable interrupts. + */ +static inline runqueue_t *rq_lock(runqueue_t *rq) +{ + local_irq_disable(); + rq = this_rq(); + spin_lock(&rq->lock); + return rq; +} + +static inline void rq_unlock(runqueue_t *rq) +{ + spin_unlock(&rq->lock); + local_irq_enable(); } /* @@ -364,9 +384,7 @@ void wake_up_forked_process(task_t * p) { runqueue_t *rq; - preempt_disable(); - rq = this_rq(); - spin_lock_irq(&rq->lock); + rq = rq_lock(rq); p->state = TASK_RUNNING; if (!rt_task(p)) { @@ -382,8 +400,7 @@ void wake_up_forked_process(task_t * p) p->thread_info->cpu = smp_processor_id(); activate_task(p, rq); - spin_unlock_irq(&rq->lock); - preempt_enable(); + rq_unlock(rq); } /* @@ -1367,8 +1384,7 @@ asmlinkage long sys_sched_yield(void) runqueue_t *rq; prio_array_t *array; - preempt_disable(); - rq = this_rq(); + rq = rq_lock(rq); /* * Decrease the yielding task's priority by one, to avoid @@ -1378,7 +1394,6 @@ asmlinkage long sys_sched_yield(void) * If priority is already MAX_PRIO-1 then we still * roundrobin the task within the runlist. */ - spin_lock_irq(&rq->lock); array = current->array; /* * If the task has reached maximum priority (or is a RT task) @@ -1395,8 +1410,7 @@ asmlinkage long sys_sched_yield(void) list_add_tail(¤t->run_list, array->queue + current->prio); __set_bit(current->prio, array->bitmap); } - spin_unlock(&rq->lock); - preempt_enable_no_resched(); + spin_unlock_no_resched(&rq->lock); schedule(); -- cgit v1.2.3 From 019afe86fbee596319a952757ff23426319e5d18 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 11 Jun 2002 23:26:40 +0200 Subject: - squeeze a few more cycles out of the wakeup hotpath. --- include/linux/spinlock.h | 8 ++++++++ kernel/sched.c | 25 ++++++++++++++++++++----- 2 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 5e1cc9fae5df..194541968c6a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -26,6 +26,7 @@ #define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0) #define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0) +#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0) #define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0) #define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0) @@ -143,6 +144,12 @@ do { \ preempt_schedule(); \ } while (0) +#define preempt_check_resched() \ +do { \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + preempt_schedule(); \ +} while (0) + #define spin_lock(lock) \ do { \ preempt_disable(); \ @@ -176,6 +183,7 @@ do { \ #define preempt_disable() do { } while (0) #define preempt_enable_no_resched() do {} while(0) #define preempt_enable() do { } while (0) +#define preempt_check_resched() do { } while (0) #define spin_lock(lock) _raw_spin_lock(lock) #define spin_trylock(lock) _raw_spin_trylock(lock) diff --git a/kernel/sched.c b/kernel/sched.c index 8366584b7887..d0a3d52502ab 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -156,6 +156,12 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned; * task_rq_lock - lock the runqueue a given task resides on and disable * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. + * + * WARNING: to squeeze out a few more cycles we do not disable preemption + * explicitly (or implicitly), we just keep interrupts disabled. This means + * that within task_rq_lock/unlock sections you must be careful + * about locking/unlocking spinlocks, since they could cause an unexpected + * preemption. */ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) { @@ -164,9 +170,9 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) repeat_lock_task: local_irq_save(*flags); rq = task_rq(p); - spin_lock(&rq->lock); + _raw_spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { - spin_unlock_irqrestore(&rq->lock, *flags); + _raw_spin_unlock_irqrestore(&rq->lock, *flags); goto repeat_lock_task; } return rq; @@ -174,7 +180,7 @@ repeat_lock_task: static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags) { - spin_unlock_irqrestore(&rq->lock, *flags); + _raw_spin_unlock_irqrestore(&rq->lock, *flags); } /* @@ -282,8 +288,15 @@ static inline void resched_task(task_t *p) nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG); if (!need_resched && !nrpolling && (p->thread_info->cpu != smp_processor_id())) + /* + * NOTE: smp_send_reschedule() can be called from + * spinlocked sections which do not have an elevated + * preemption count. So the code either has to avoid + * spinlocks, or has to put preempt_disable() and + * preempt_enable_no_resched() around the code. + */ smp_send_reschedule(p->thread_info->cpu); - preempt_enable(); + preempt_enable_no_resched(); #else set_tsk_need_resched(p); #endif @@ -334,8 +347,10 @@ repeat: */ void kick_if_running(task_t * p) { - if (p == task_rq(p)->curr) + if (p == task_rq(p)->curr) { resched_task(p); + preempt_check_resched(); + } } #endif -- cgit v1.2.3