From fe7de49f9d4e53f24ec9ef762a503f70b562341c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 20 Oct 2010 16:01:12 -0700 Subject: sched: Make sched_param argument static in sched_setscheduler() callers Andrew Morton pointed out almost all sched_setscheduler() callers are using fixed parameters and can be converted to static. It reduces runtime memory use a little. Signed-off-by: KOSAKI Motohiro Reported-by: Andrew Morton Acked-by: James Morris Cc: Ingo Molnar Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/kthread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/kthread.c') diff --git a/kernel/kthread.c b/kernel/kthread.c index 2dc3786349d1..74cf6f5e7ade 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -148,7 +148,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), wait_for_completion(&create.done); if (!IS_ERR(create.result)) { - struct sched_param param = { .sched_priority = 0 }; + static struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); -- cgit v1.2.3 From 4f32e9b1f812fd6c00cc85a127583fefbdedaedc Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Wed, 22 Dec 2010 10:27:53 +0100 Subject: kthread_work: make lockdep happy spinlock in kthread_worker and wait_queue_head in kthread_work both should be lockdep sensible, so change the interface to make it suiltable for CONFIG_LOCKDEP. tj: comment update Reported-by: Nicolas Signed-off-by: Yong Zhang Signed-off-by: Andy Walls Tested-by: Andy Walls Cc: Tejun Heo Cc: Andrew Morton Signed-off-by: Tejun Heo --- include/linux/kthread.h | 45 +++++++++++++++++++++++++++++++++++---------- kernel/kthread.c | 11 +++++++++++ 2 files changed, 46 insertions(+), 10 deletions(-) (limited to 'kernel/kthread.c') diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 685ea65eb803..ce0775aa64c3 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -81,16 +81,41 @@ struct kthread_work { #define DEFINE_KTHREAD_WORK(work, fn) \ struct kthread_work work = KTHREAD_WORK_INIT(work, fn) -static inline void init_kthread_worker(struct kthread_worker *worker) -{ - *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); -} - -static inline void init_kthread_work(struct kthread_work *work, - kthread_work_func_t fn) -{ - *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); -} +/* + * kthread_worker.lock and kthread_work.done need their own lockdep class + * keys if they are defined on stack with lockdep enabled. Use the + * following macros when defining them on stack. + */ +#ifdef CONFIG_LOCKDEP +# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ + ({ init_kthread_worker(&worker); worker; }) +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ + struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) +# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \ + ({ init_kthread_work((&work), fn); work; }) +# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \ + struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn) +#else +# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) +# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn) +#endif + +extern void __init_kthread_worker(struct kthread_worker *worker, + const char *name, struct lock_class_key *key); + +#define init_kthread_worker(worker) \ + do { \ + static struct lock_class_key __key; \ + __init_kthread_worker((worker), "("#worker")->lock", &__key); \ + } while (0) + +#define init_kthread_work(work, fn) \ + do { \ + memset((work), 0, sizeof(struct kthread_work)); \ + INIT_LIST_HEAD(&(work)->node); \ + (work)->func = (fn); \ + init_waitqueue_head(&(work)->done); \ + } while (0) int kthread_worker_fn(void *worker_ptr); diff --git a/kernel/kthread.c b/kernel/kthread.c index 2dc3786349d1..ca61bbdd44b2 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -265,6 +265,17 @@ int kthreadd(void *unused) return 0; } +void __init_kthread_worker(struct kthread_worker *worker, + const char *name, + struct lock_class_key *key) +{ + spin_lock_init(&worker->lock); + lockdep_set_class_and_name(&worker->lock, key, name); + INIT_LIST_HEAD(&worker->work_list); + worker->task = NULL; +} +EXPORT_SYMBOL_GPL(__init_kthread_worker); + /** * kthread_worker_fn - kthread function to process kthread_worker * @worker_ptr: pointer to initialized kthread_worker -- cgit v1.2.3 From c9b5f501ef1580faa30c40c644b7691870462201 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 7 Jan 2011 13:41:40 +0100 Subject: sched: Constify function scope static struct sched_param usage Function-scope statics are discouraged because they are easily overlooked and can cause subtle bugs/races due to their global (non-SMP safe) nature. Linus noticed that we did this for sched_param - at minimum make the const. Suggested-by: Linus Torvalds Signed-off-by: Peter Zijlstra LKML-Reference: Message-ID: Signed-off-by: Ingo Molnar --- kernel/irq/manage.c | 2 +- kernel/kthread.c | 2 +- kernel/softirq.c | 2 +- kernel/trace/trace_selftest.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel/kthread.c') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 91a5fa25054e..0caa59f747dd 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -577,7 +577,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } */ static int irq_thread(void *data) { - static struct sched_param param = { + static const struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; struct irqaction *action = data; diff --git a/kernel/kthread.c b/kernel/kthread.c index 5355cfd44a3f..c55afba990a3 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -148,7 +148,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), wait_for_completion(&create.done); if (!IS_ERR(create.result)) { - static struct sched_param param = { .sched_priority = 0 }; + static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); diff --git a/kernel/softirq.c b/kernel/softirq.c index d4d918a91881..c10150cb456b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -853,7 +853,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, cpumask_any(cpu_online_mask)); case CPU_DEAD: case CPU_DEAD_FROZEN: { - static struct sched_param param = { + static const struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 562c56e048fd..659732eba07c 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -558,7 +558,7 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) static int trace_wakeup_test_thread(void *data) { /* Make this a RT thread, doesn't need to be too high */ - static struct sched_param param = { .sched_priority = 5 }; + static const struct sched_param param = { .sched_priority = 5 }; struct completion *x = data; sched_setscheduler(current, SCHED_FIFO, ¶m); -- cgit v1.2.3