summaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c35
-rw-r--r--kernel/irq/handle.c10
-rw-r--r--kernel/irq/irqdesc.c24
-rw-r--r--kernel/irq/irqdomain.c32
-rw-r--r--kernel/irq/manage.c176
-rw-r--r--kernel/irq/msi.c2
-rw-r--r--kernel/irq/proc.c2
7 files changed, 165 insertions, 116 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3ffa0d80ddd1..678f094d261a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -897,8 +897,9 @@ void handle_percpu_irq(struct irq_desc *desc)
void handle_percpu_devid_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irqaction *action = desc->action;
unsigned int irq = irq_desc_get_irq(desc);
+ unsigned int cpu = smp_processor_id();
+ struct irqaction *action;
irqreturn_t res;
/*
@@ -910,12 +911,15 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
+ for (action = desc->action; action; action = action->next)
+ if (cpumask_test_cpu(cpu, action->affinity))
+ break;
+
if (likely(action)) {
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
} else {
- unsigned int cpu = smp_processor_id();
bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
if (enabled)
@@ -929,31 +933,6 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-/**
- * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
- * dev ids
- * @desc: the interrupt description structure for this irq
- *
- * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
- * as a percpu pointer.
- */
-void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irqaction *action = desc->action;
- unsigned int irq = irq_desc_get_irq(desc);
- irqreturn_t res;
-
- __kstat_incr_irqs_this_cpu(desc);
-
- trace_irq_handler_entry(irq, action);
- res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
- trace_irq_handler_exit(irq, action, res);
-
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
-}
-
static void
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
int is_chained, const char *name)
@@ -1030,7 +1009,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
- scoped_irqdesc_get_and_lock(irq, 0)
+ scoped_irqdesc_get_and_buslock(irq, 0)
__irq_do_set_handler(scoped_irqdesc, handle, is_chained, name);
}
EXPORT_SYMBOL_GPL(__irq_set_handler);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e103451243a0..786f5570a640 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -133,7 +133,15 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
*/
atomic_inc(&desc->threads_active);
- wake_up_process(action->thread);
+ /*
+ * This might be a premature wakeup before the thread reached the
+ * thread function and set the IRQTF_READY bit. It's waiting in
+ * kthread code with state UNINTERRUPTIBLE. Once it reaches the
+ * thread function it waits with INTERRUPTIBLE. The wakeup is not
+ * lost in that case because the thread is guaranteed to observe
+ * the RUN flag before it goes to sleep in wait_for_interrupt().
+ */
+ wake_up_state(action->thread, TASK_INTERRUPTIBLE);
}
static DEFINE_STATIC_KEY_FALSE(irqhandler_duration_check_enabled);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index db714d3014b5..6acf268f005b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -879,8 +879,7 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
chip_bus_sync_unlock(desc);
}
-int irq_set_percpu_devid_partition(unsigned int irq,
- const struct cpumask *affinity)
+int irq_set_percpu_devid(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -892,31 +891,10 @@ int irq_set_percpu_devid_partition(unsigned int irq,
if (!desc->percpu_enabled)
return -ENOMEM;
- desc->percpu_affinity = affinity ? : cpu_possible_mask;
-
irq_set_percpu_devid_flags(irq);
return 0;
}
-int irq_set_percpu_devid(unsigned int irq)
-{
- return irq_set_percpu_devid_partition(irq, NULL);
-}
-
-int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (!desc || !desc->percpu_enabled)
- return -EINVAL;
-
- if (affinity)
- cpumask_copy(affinity, desc->percpu_affinity);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
-
void kstat_incr_irq_this_cpu(unsigned int irq)
{
kstat_incr_irqs_this_cpu(irq_to_desc(irq));
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index dc473faadcc8..2652c4cfd877 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -867,13 +867,9 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
}
EXPORT_SYMBOL_GPL(of_phandle_args_to_fwspec);
-unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+static struct irq_domain *fwspec_to_domain(struct irq_fwspec *fwspec)
{
struct irq_domain *domain;
- struct irq_data *irq_data;
- irq_hw_number_t hwirq;
- unsigned int type = IRQ_TYPE_NONE;
- int virq;
if (fwspec->fwnode) {
domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
@@ -883,6 +879,32 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
domain = irq_default_domain;
}
+ return domain;
+}
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ struct irq_domain *domain = fwspec_to_domain(fwspec);
+
+ memset(info, 0, sizeof(*info));
+
+ if (!domain || !domain->ops->get_fwspec_info)
+ return 0;
+
+ return domain->ops->get_fwspec_info(fwspec, info);
+}
+#endif
+
+unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+{
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_domain *domain;
+ struct irq_data *irq_data;
+ irq_hw_number_t hwirq;
+ int virq;
+
+ domain = fwspec_to_domain(fwspec);
if (!domain) {
pr_warn("no irq domain found for %s !\n",
of_node_full_name(to_of_node(fwspec->fwnode)));
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c94837382037..0bb29316b436 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -547,7 +547,7 @@ int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *noti
INIT_WORK(&notify->work, irq_affinity_notify);
}
- scoped_guard(raw_spinlock_irqsave, &desc->lock) {
+ scoped_guard(raw_spinlock_irq, &desc->lock) {
old_notify = desc->affinity_notify;
desc->affinity_notify = notify;
}
@@ -659,7 +659,7 @@ void __disable_irq(struct irq_desc *desc)
static int __disable_irq_nosync(unsigned int irq)
{
- scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
+ scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
__disable_irq(scoped_irqdesc);
return 0;
}
@@ -789,7 +789,7 @@ void __enable_irq(struct irq_desc *desc)
*/
void enable_irq(unsigned int irq)
{
- scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
+ scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
struct irq_desc *desc = scoped_irqdesc;
if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq))
@@ -1001,7 +1001,6 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
- bool valid = false;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
@@ -1018,21 +1017,13 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *a
}
scoped_guard(raw_spinlock_irq, &desc->lock) {
- /*
- * This code is triggered unconditionally. Check the affinity
- * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
- */
- if (cpumask_available(desc->irq_common_data.affinity)) {
- const struct cpumask *m;
+ const struct cpumask *m;
- m = irq_data_get_effective_affinity_mask(&desc->irq_data);
- cpumask_copy(mask, m);
- valid = true;
- }
+ m = irq_data_get_effective_affinity_mask(&desc->irq_data);
+ cpumask_copy(mask, m);
}
- if (valid)
- set_cpus_allowed_ptr(current, mask);
+ set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
@@ -1239,7 +1230,10 @@ static int irq_thread(void *data)
irq_thread_set_ready(desc, action);
- sched_set_fifo(current);
+ if (action->handler == irq_forced_secondary_handler)
+ sched_set_fifo_secondary(current);
+ else
+ sched_set_fifo(current);
if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
&action->thread_flags))
@@ -1405,19 +1399,39 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
* references an already freed task_struct.
*/
new->thread = get_task_struct(t);
+
+ /*
+ * The affinity can not be established yet, but it will be once the
+ * interrupt is enabled. Delay and defer the actual setting to the
+ * thread itself once it is ready to run. In the meantime, prevent
+ * it from ever being re-affined directly by cpuset or
+ * housekeeping. The proper way to do it is to re-affine the whole
+ * vector.
+ */
+ kthread_bind_mask(t, cpu_possible_mask);
+
/*
- * Tell the thread to set its affinity. This is
- * important for shared interrupt handlers as we do
- * not invoke setup_affinity() for the secondary
- * handlers as everything is already set up. Even for
- * interrupts marked with IRQF_NO_BALANCE this is
- * correct as we want the thread to move to the cpu(s)
- * on which the requesting code placed the interrupt.
+ * Ensure the thread adjusts the affinity once it reaches the
+ * thread function.
*/
- set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ new->thread_flags = BIT(IRQTF_AFFINITY);
+
return 0;
}
+static bool valid_percpu_irqaction(struct irqaction *old, struct irqaction *new)
+{
+ do {
+ if (cpumask_intersects(old->affinity, new->affinity) ||
+ old->percpu_dev_id == new->percpu_dev_id)
+ return false;
+
+ old = old->next;
+ } while (old);
+
+ return true;
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
@@ -1438,6 +1452,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
struct irqaction *old, **old_ptr;
unsigned long flags, thread_mask = 0;
int ret, nested, shared = 0;
+ bool per_cpu_devid;
if (!desc)
return -EINVAL;
@@ -1447,6 +1462,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!try_module_get(desc->owner))
return -ENODEV;
+ per_cpu_devid = irq_settings_is_per_cpu_devid(desc);
+
new->irq = irq;
/*
@@ -1554,13 +1571,20 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
unsigned int oldtype;
- if (irq_is_nmi(desc)) {
+ if (irq_is_nmi(desc) && !per_cpu_devid) {
pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
new->name, irq, desc->irq_data.chip->name);
ret = -EINVAL;
goto out_unlock;
}
+ if (per_cpu_devid && !valid_percpu_irqaction(old, new)) {
+ pr_err("Overlapping affinities for %s (irq %d) on irqchip %s.\n",
+ new->name, irq, desc->irq_data.chip->name);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/*
* If nobody did set the configuration before, inherit
* the one provided by the requester.
@@ -1711,7 +1735,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!(new->flags & IRQF_NO_AUTOEN) &&
irq_settings_can_autoenable(desc)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
- } else {
+ } else if (!per_cpu_devid) {
/*
* Shared interrupts do not go well with disabling
* auto enable. The sharing interrupt might request
@@ -2346,7 +2370,7 @@ void disable_percpu_nmi(unsigned int irq)
static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
- struct irqaction *action;
+ struct irqaction *action, **action_ptr;
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -2354,21 +2378,33 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
return NULL;
scoped_guard(raw_spinlock_irqsave, &desc->lock) {
- action = desc->action;
- if (!action || action->percpu_dev_id != dev_id) {
- WARN(1, "Trying to free already-free IRQ %d\n", irq);
- return NULL;
+ action_ptr = &desc->action;
+ for (;;) {
+ action = *action_ptr;
+
+ if (!action) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ return NULL;
+ }
+
+ if (action->percpu_dev_id == dev_id)
+ break;
+
+ action_ptr = &action->next;
}
- if (!cpumask_empty(desc->percpu_enabled)) {
- WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
- irq, cpumask_first(desc->percpu_enabled));
+ if (cpumask_intersects(desc->percpu_enabled, action->affinity)) {
+ WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", irq,
+ cpumask_first_and(desc->percpu_enabled, action->affinity));
return NULL;
}
/* Found it - now remove it from the list of entries: */
- desc->action = NULL;
- desc->istate &= ~IRQS_NMI;
+ *action_ptr = action->next;
+
+ /* Demote from NMI if we killed the last action */
+ if (!desc->action)
+ desc->istate &= ~IRQS_NMI;
}
unregister_handler_proc(irq, action);
@@ -2442,17 +2478,49 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
return retval;
}
+static
+struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long flags,
+ const char *devname, const cpumask_t *affinity,
+ void __percpu *dev_id)
+{
+ struct irqaction *action;
+
+ if (!affinity)
+ affinity = cpu_possible_mask;
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ action->handler = handler;
+ action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
+ action->name = devname;
+ action->percpu_dev_id = dev_id;
+ action->affinity = affinity;
+
+ /*
+ * We allow some form of sharing for non-overlapping affinity
+ * masks. Obviously, covering all CPUs prevents any sharing in
+ * the first place.
+ */
+ if (!cpumask_equal(affinity, cpu_possible_mask))
+ action->flags |= IRQF_SHARED;
+
+ return action;
+}
+
/**
* __request_percpu_irq - allocate a percpu interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* @flags: Interrupt type flags (IRQF_TIMER only)
* @devname: An ascii name for the claiming device
+ * @affinity: A cpumask describing the target CPUs for this interrupt
* @dev_id: A percpu cookie passed back to the handler function
*
- * This call allocates interrupt resources and enables the interrupt on the
- * local CPU. If the interrupt is supposed to be enabled on other CPUs, it
- * has to be done on each CPU using enable_percpu_irq().
+ * This call allocates interrupt resources, but doesn't enable the interrupt
+ * on any CPU, as all percpu-devid interrupts are flagged with IRQ_NOAUTOEN.
+ * It has to be done on each CPU using enable_percpu_irq().
*
* @dev_id must be globally unique. It is a per-cpu variable, and
* the handler gets called with the interrupted CPU's instance of
@@ -2460,7 +2528,7 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
*/
int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
- void __percpu *dev_id)
+ const cpumask_t *affinity, void __percpu *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -2477,15 +2545,10 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
if (flags && flags != IRQF_TIMER)
return -EINVAL;
- action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ action = create_percpu_irqaction(handler, flags, devname, affinity, dev_id);
if (!action)
return -ENOMEM;
- action->handler = handler;
- action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
- action->name = devname;
- action->percpu_dev_id = dev_id;
-
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0) {
kfree(action);
@@ -2508,6 +2571,7 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq);
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* @name: An ascii name for the claiming device
+ * @affinity: A cpumask describing the target CPUs for this interrupt
* @dev_id: A percpu cookie passed back to the handler function
*
* This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
@@ -2524,8 +2588,8 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq);
* If the interrupt line cannot be used to deliver NMIs, function
* will fail returning a negative value.
*/
-int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
- const char *name, void __percpu *dev_id)
+int request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
+ const struct cpumask *affinity, void __percpu *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -2542,20 +2606,16 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
!irq_supports_nmi(desc))
return -EINVAL;
- /* The line cannot already be NMI */
- if (irq_is_nmi(desc))
+ /* The line cannot be NMI already if the new request covers all CPUs */
+ if (irq_is_nmi(desc) &&
+ (!affinity || cpumask_equal(affinity, cpu_possible_mask)))
return -EINVAL;
- action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ action = create_percpu_irqaction(handler, IRQF_NO_THREAD | IRQF_NOBALANCING,
+ name, affinity, dev_id);
if (!action)
return -ENOMEM;
- action->handler = handler;
- action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
- | IRQF_NOBALANCING;
- action->name = name;
- action->percpu_dev_id = dev_id;
-
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
goto err_out;
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index e7ad99254841..68886881fe10 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -706,7 +706,7 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
int i, ret;
- if (irq_find_mapping(domain, hwirq) > 0)
+ if (irq_resolve_mapping(domain, hwirq))
return -EEXIST;
if (domain->parent) {
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 29c2404e743b..77258eafbf63 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -48,6 +48,8 @@ static int show_irq_affinity(int type, struct seq_file *m)
struct irq_desc *desc = irq_to_desc((long)m->private);
const struct cpumask *mask;
+ guard(raw_spinlock_irq)(&desc->lock);
+
switch (type) {
case AFFINITY:
case AFFINITY_LIST: