diff options
| author | Tony Lindgren <tony@atomide.com> | 2017-11-28 07:06:34 -0800 |
|---|---|---|
| committer | Tony Lindgren <tony@atomide.com> | 2017-11-28 07:06:34 -0800 |
| commit | 2db57789e6612ce0cf2fcbb577a1c8307b708566 (patch) | |
| tree | dd9f9e3dffabbf5cb932fcf5055ab329ca940fa0 /kernel/irq_work.c | |
| parent | f0c96c6d40312b1a76cd36709dc3eb5948c1b97f (diff) | |
| parent | e9a9bb4e4779ca74cb52a6e2f8acbc0881d3bb18 (diff) | |
Merge branch 'soc-fixes' into omap-for-v4.15/fixes
Diffstat (limited to 'kernel/irq_work.c')
| -rw-r--r-- | kernel/irq_work.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index bcf107ce0854..40e9d739c169 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -56,7 +56,6 @@ void __weak arch_irq_work_raise(void) */ } -#ifdef CONFIG_SMP /* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. @@ -68,6 +67,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); +#ifdef CONFIG_SMP + /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); @@ -78,10 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) arch_send_call_function_single_ipi(cpu); +#else /* #ifdef CONFIG_SMP */ + irq_work_queue(work); +#endif /* #else #ifdef CONFIG_SMP */ + return true; } -EXPORT_SYMBOL_GPL(irq_work_queue_on); -#endif /* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) @@ -128,9 +131,9 @@ bool irq_work_needs_cpu(void) static void irq_work_run_list(struct llist_head *list) { - unsigned long flags; - struct irq_work *work; + struct irq_work *work, *tmp; struct llist_node *llnode; + unsigned long flags; BUG_ON(!irqs_disabled()); @@ -138,11 +141,7 @@ static void irq_work_run_list(struct llist_head *list) return; llnode = llist_del_all(list); - while (llnode != NULL) { - work = llist_entry(llnode, struct irq_work, llnode); - - llnode = llist_next(llnode); - + llist_for_each_entry_safe(work, tmp, llnode, llnode) { /* * Clear the PENDING bit, after this point the @work * can be re-used. @@ -188,7 +187,7 @@ void irq_work_tick(void) */ void irq_work_sync(struct irq_work *work) { - WARN_ON_ONCE(irqs_disabled()); + lockdep_assert_irqs_enabled(); while (work->flags & IRQ_WORK_BUSY) cpu_relax(); |
