diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched.c | 37 | ||||
| -rw-r--r-- | kernel/timer.c | 4 |
2 files changed, 22 insertions, 19 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9e19d4c0d4a9..b42029abe679 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -225,6 +225,13 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) +extern unsigned long __scheduling_functions_start_here; +extern unsigned long __scheduling_functions_end_here; +const unsigned long scheduling_functions_start_here = + (unsigned long)&__scheduling_functions_start_here; +const unsigned long scheduling_functions_end_here = + (unsigned long)&__scheduling_functions_end_here; + /* * Default context-switch locking: */ @@ -1587,12 +1594,10 @@ out: rebalance_tick(rq, 0); } -void scheduling_functions_start_here(void) { } - /* * schedule() is the main scheduler function. */ -asmlinkage void schedule(void) +asmlinkage void __sched schedule(void) { long *switch_count; task_t *prev, *next; @@ -1731,7 +1736,7 @@ EXPORT_SYMBOL(schedule); * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ -asmlinkage void preempt_schedule(void) +asmlinkage void __sched preempt_schedule(void) { struct thread_info *ti = current_thread_info(); @@ -1869,7 +1874,7 @@ void fastcall complete_all(struct completion *x) spin_unlock_irqrestore(&x->wait.lock, flags); } -void fastcall wait_for_completion(struct completion *x) +void fastcall __sched wait_for_completion(struct completion *x) { might_sleep(); spin_lock_irq(&x->wait.lock); @@ -1907,7 +1912,7 @@ EXPORT_SYMBOL(wait_for_completion); __remove_wait_queue(q, &wait); \ spin_unlock_irqrestore(&q->lock, flags); -void fastcall interruptible_sleep_on(wait_queue_head_t *q) +void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1920,7 +1925,7 @@ void fastcall interruptible_sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(interruptible_sleep_on); -long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) +long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR @@ -1935,7 +1940,7 @@ long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) EXPORT_SYMBOL(interruptible_sleep_on_timeout); -void fastcall sleep_on(wait_queue_head_t *q) +void fastcall __sched sleep_on(wait_queue_head_t *q) { SLEEP_ON_VAR @@ -1948,7 +1953,7 @@ void fastcall sleep_on(wait_queue_head_t *q) EXPORT_SYMBOL(sleep_on); -long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout) +long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) { SLEEP_ON_VAR @@ -1963,8 +1968,6 @@ long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout) EXPORT_SYMBOL(sleep_on_timeout); -void scheduling_functions_end_here(void) { } - void set_user_nice(task_t *p, long nice) { unsigned long flags; @@ -2424,7 +2427,7 @@ asmlinkage long sys_sched_yield(void) return 0; } -void __cond_resched(void) +void __sched __cond_resched(void) { set_current_state(TASK_RUNNING); schedule(); @@ -2438,7 +2441,7 @@ EXPORT_SYMBOL(__cond_resched); * this is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */ -void yield(void) +void __sched yield(void) { set_current_state(TASK_RUNNING); sys_sched_yield(); @@ -2453,7 +2456,7 @@ EXPORT_SYMBOL(yield); * But don't do that if it is a deliberate, throttling IO wait (this task * has set its backing_dev_info: the queue against which it should throttle) */ -void io_schedule(void) +void __sched io_schedule(void) { struct runqueue *rq = this_rq(); @@ -2464,7 +2467,7 @@ void io_schedule(void) EXPORT_SYMBOL(io_schedule); -long io_schedule_timeout(long timeout) +long __sched io_schedule_timeout(long timeout) { struct runqueue *rq = this_rq(); long ret; @@ -3010,7 +3013,7 @@ EXPORT_SYMBOL(__might_sleep); * * Called inside preempt_disable(). */ -void __preempt_spin_lock(spinlock_t *lock) +void __sched __preempt_spin_lock(spinlock_t *lock) { if (preempt_count() > 1) { _raw_spin_lock(lock); @@ -3026,7 +3029,7 @@ void __preempt_spin_lock(spinlock_t *lock) EXPORT_SYMBOL(__preempt_spin_lock); -void __preempt_write_lock(rwlock_t *lock) +void __sched __preempt_write_lock(rwlock_t *lock) { if (preempt_count() > 1) { _raw_write_lock(lock); diff --git a/kernel/timer.c b/kernel/timer.c index f53e0749b0d2..cbcb5522866d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -996,7 +996,7 @@ static void process_timeout(unsigned long __data) * * In all cases the return value is guaranteed to be non-negative. */ -fastcall signed long schedule_timeout(signed long timeout) +fastcall signed long __sched schedule_timeout(signed long timeout) { struct timer_list timer; unsigned long expire; @@ -1056,7 +1056,7 @@ asmlinkage long sys_gettid(void) return current->pid; } -static long nanosleep_restart(struct restart_block *restart) +static long __sched nanosleep_restart(struct restart_block *restart) { unsigned long expire = restart->arg0, now = jiffies; struct timespec __user *rmtp = (struct timespec __user *) restart->arg1; |
