From 530a8ba71b4c3b7fcee323dd997f4bab1be1a6ba Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 16 May 2025 14:35:35 -0700 Subject: KVM: Bound the number of dirty ring entries in a single reset at INT_MAX Cap the number of ring entries that are reset in a single ioctl to INT_MAX to ensure userspace isn't confused by a wrap into negative space, and so that, in a truly pathological scenario, KVM doesn't miss a TLB flush due to the count wrapping to zero. While the size of the ring is fixed at 0x10000 entries and KVM (currently) supports at most 4096, userspace is allowed to harvest entries from the ring while the reset is in-progress, i.e. it's possible for the ring to always have harvested entries. Opportunistically return an actual error code from the helper so that a future fix to handle pending signals can gracefully return -EINTR. Drop the function comment now that the return code is a stanard 0/-errno (and because a future commit will add a proper lockdep assertion). Opportunistically drop a similarly stale comment for kvm_dirty_ring_push(). Cc: Peter Xu Cc: Yan Zhao Cc: Maxim Levitsky Cc: Binbin Wu Fixes: fb04a1eddb1a ("KVM: X86: Implement ring-based dirty memory tracking") Reviewed-by: James Houghton Reviewed-by: Binbin Wu Reviewed-by: Yan Zhao Reviewed-by: Peter Xu Link: https://lore.kernel.org/r/20250516213540.2546077-2-seanjc@google.com Signed-off-by: Sean Christopherson --- include/linux/kvm_dirty_ring.h | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h index da4d9b5f58f1..eb10d87adf7d 100644 --- a/include/linux/kvm_dirty_ring.h +++ b/include/linux/kvm_dirty_ring.h @@ -49,9 +49,10 @@ static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *r } static inline int kvm_dirty_ring_reset(struct kvm *kvm, - struct kvm_dirty_ring *ring) + struct kvm_dirty_ring *ring, + int *nr_entries_reset) { - return 0; + return -ENOENT; } static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, @@ -77,17 +78,8 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm); u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm); int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring, int index, u32 size); - -/* - * called with kvm->slots_lock held, returns the number of - * processed pages. - */ -int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring); - -/* - * returns =0: successfully pushed - * <0: unable to push, need to wait - */ +int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring, + int *nr_entries_reset); void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset); bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From cd4178d19420359554e3da6fd77ecfd0f58067ce Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 12 Jun 2025 16:51:47 -0700 Subject: KVM: arm64: WARN if unmapping a vLPI fails in any path When unmapping a vLPI, WARN if nullifying vCPU affinity fails, not just if failure occurs when freeing an ITE. If undoing vCPU affinity fails, then odds are very good that vLPI state tracking has has gotten out of whack, i.e. that KVM and the GIC disagree on the state of an IRQ/vLPI. At best, inconsistent state means there is a lurking bug/flaw somewhere. At worst, the inconsistency could eventually be fatal to the host, e.g. if an ITS command fails because KVM's view of things doesn't match reality/hardware. Note, only the call from kvm_arch_irq_bypass_del_producer() by way of kvm_vgic_v4_unset_forwarding() doesn't already WARN. Common KVM's kvm_irq_routing_update() WARNs if kvm_arch_update_irqfd_routing() fails. For that path, if its_unmap_vlpi() fails in kvm_vgic_v4_unset_forwarding(), the only possible causes are that the GIC doesn't have a v4 ITS (from its_irq_set_vcpu_affinity()): /* Need a v4 ITS */ if (!is_v4(its_dev->its)) return -EINVAL; guard(raw_spinlock)(&its_dev->event_map.vlpi_lock); /* Unmap request? */ if (!info) return its_vlpi_unmap(d); or that KVM has gotten out of sync with the GIC/ITS (from its_vlpi_unmap()): if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) return -EINVAL; All of the above failure scenarios are warnable offences, as they should never occur absent a kernel/KVM bug. Acked-by: Oliver Upton Link: https://lore.kernel.org/all/aFWY2LTVIxz5rfhh@google.com Signed-off-by: Sean Christopherson --- arch/arm64/kvm/vgic/vgic-its.c | 2 +- arch/arm64/kvm/vgic/vgic-v4.c | 4 ++-- drivers/irqchip/irq-gic-v4.c | 4 ++-- include/linux/irqchip/arm-gic-v4.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 534049c7c94b..98630dae910d 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -758,7 +758,7 @@ static void its_free_ite(struct kvm *kvm, struct its_ite *ite) if (irq) { scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) { if (irq->hw) - WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); + its_unmap_vlpi(ite->irq->host_irq); irq->hw = false; } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 193946108192..911170d4a9c8 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -545,10 +545,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) if (irq->hw) { atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); irq->hw = false; - ret = its_unmap_vlpi(host_irq); + its_unmap_vlpi(host_irq); } raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(kvm, irq); - return ret; + return 0; } diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 58c28895f8c4..8455b4a5fbb0 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -342,10 +342,10 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map) return irq_set_vcpu_affinity(irq, &info); } -int its_unmap_vlpi(int irq) +void its_unmap_vlpi(int irq) { irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY); - return irq_set_vcpu_affinity(irq, NULL); + WARN_ON_ONCE(irq_set_vcpu_affinity(irq, NULL)); } int its_prop_update_vlpi(int irq, u8 config, bool inv) diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 7f1f11a5e4e4..0b0887099fd7 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -146,7 +146,7 @@ int its_commit_vpe(struct its_vpe *vpe); int its_invall_vpe(struct its_vpe *vpe); int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map); -int its_unmap_vlpi(int irq); +void its_unmap_vlpi(int irq); int its_prop_update_vlpi(int irq, u8 config, bool inv); int its_prop_update_vsgi(int irq, u8 priority, bool group); -- cgit v1.2.3 From 2b521d86ee80a436a92445b8206d38d75aeb39ea Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 16 May 2025 16:07:29 -0700 Subject: irqbypass: Take ownership of producer/consumer token tracking Move ownership of IRQ bypass token tracking into irqbypass.ko, and explicitly require callers to pass an eventfd_ctx structure instead of a completely opaque token. Relying on producers and consumers to set the token appropriately is error prone, and hiding the fact that the token must be an eventfd_ctx pointer (for all intents and purposes) unnecessarily obfuscates the code and makes it more brittle. Reviewed-by: Kevin Tian Acked-by: Michael S. Tsirkin Reviewed-by: Alex Williamson Link: https://lore.kernel.org/r/20250516230734.2564775-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 4 ++-- drivers/vfio/pci/vfio_pci_intrs.c | 9 +++----- drivers/vhost/vdpa.c | 8 +++---- include/linux/irqbypass.h | 35 +++++++++++++++++-------------- virt/kvm/eventfd.c | 7 +++---- virt/lib/irqbypass.c | 44 +++++++++++++++++++++++++-------------- 6 files changed, 58 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b58a74c1722d..3dc93e2d4777 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13669,8 +13669,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, ret = kvm_x86_call(pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); if (ret) - printk(KERN_INFO "irq bypass consumer (token %p) unregistration" - " fails: %d\n", irqfd->consumer.token, ret); + printk(KERN_INFO "irq bypass consumer (eventfd %p) unregistration" + " fails: %d\n", irqfd->consumer.eventfd, ret); spin_unlock_irq(&kvm->irqfds.lock); diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 565966351dfa..d87fe116762a 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -505,15 +505,12 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev, if (ret) goto out_put_eventfd_ctx; - ctx->producer.token = trigger; ctx->producer.irq = irq; - ret = irq_bypass_register_producer(&ctx->producer); + ret = irq_bypass_register_producer(&ctx->producer, trigger); if (unlikely(ret)) { dev_info(&pdev->dev, - "irq bypass producer (token %p) registration fails: %d\n", - ctx->producer.token, ret); - - ctx->producer.token = NULL; + "irq bypass producer (eventfd %p) registration fails: %d\n", + trigger, ret); } ctx->trigger = trigger; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 5a49b5a6d496..7b265ffda697 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -213,10 +213,10 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) return; vq->call_ctx.producer.irq = irq; - ret = irq_bypass_register_producer(&vq->call_ctx.producer); + ret = irq_bypass_register_producer(&vq->call_ctx.producer, vq->call_ctx.ctx); if (unlikely(ret)) - dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n", - qid, vq->call_ctx.producer.token, ret); + dev_info(&v->dev, "vq %u, irq bypass producer (eventfd %p) registration fails, ret = %d\n", + qid, vq->call_ctx.ctx, ret); } static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid) @@ -712,7 +712,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) vhost_vdpa_unsetup_vq_irq(v, idx); - vq->call_ctx.producer.token = NULL; } break; } @@ -753,7 +752,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, cb.callback = vhost_vdpa_virtqueue_cb; cb.private = vq; cb.trigger = vq->call_ctx.ctx; - vq->call_ctx.producer.token = vq->call_ctx.ctx; if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) vhost_vdpa_setup_vq_irq(v, idx); diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index 9bdb2a781841..1b57d15ac4cf 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -10,6 +10,7 @@ #include +struct eventfd_ctx; struct irq_bypass_consumer; /* @@ -18,20 +19,20 @@ struct irq_bypass_consumer; * The IRQ bypass manager is a simple set of lists and callbacks that allows * IRQ producers (ex. physical interrupt sources) to be matched to IRQ * consumers (ex. virtualization hardware that allows IRQ bypass or offload) - * via a shared token (ex. eventfd_ctx). Producers and consumers register - * independently. When a token match is found, the optional @stop callback - * will be called for each participant. The pair will then be connected via - * the @add_* callbacks, and finally the optional @start callback will allow - * any final coordination. When either participant is unregistered, the - * process is repeated using the @del_* callbacks in place of the @add_* - * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings - * are not supported. + * via a shared eventfd_ctx. Producers and consumers register independently. + * When a producer and consumer are paired, i.e. an eventfd match is found, the + * optional @stop callback will be called for each participant. The pair will + * then be connected via the @add_* callbacks, and finally the optional @start + * callback will allow any final coordination. When either participant is + * unregistered, the process is repeated using the @del_* callbacks in place of + * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N + * pairings are not supported. */ /** * struct irq_bypass_producer - IRQ bypass producer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer (non-NULL) + * @eventfd: eventfd context used to match producers and consumers * @irq: Linux IRQ number for the producer device * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) @@ -44,7 +45,7 @@ struct irq_bypass_consumer; */ struct irq_bypass_producer { struct list_head node; - void *token; + struct eventfd_ctx *eventfd; int irq; int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); @@ -57,7 +58,7 @@ struct irq_bypass_producer { /** * struct irq_bypass_consumer - IRQ bypass consumer definition * @node: IRQ bypass manager private list management - * @token: opaque token to match between producer and consumer (non-NULL) + * @eventfd: eventfd context used to match producers and consumers * @add_producer: Connect the IRQ consumer to an IRQ producer * @del_producer: Disconnect the IRQ consumer from an IRQ producer * @stop: Perform any quiesce operations necessary prior to add/del (optional) @@ -70,7 +71,7 @@ struct irq_bypass_producer { */ struct irq_bypass_consumer { struct list_head node; - void *token; + struct eventfd_ctx *eventfd; int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); void (*del_producer)(struct irq_bypass_consumer *, @@ -79,9 +80,11 @@ struct irq_bypass_consumer { void (*start)(struct irq_bypass_consumer *); }; -int irq_bypass_register_producer(struct irq_bypass_producer *); -void irq_bypass_unregister_producer(struct irq_bypass_producer *); -int irq_bypass_register_consumer(struct irq_bypass_consumer *); -void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); +int irq_bypass_register_producer(struct irq_bypass_producer *producer, + struct eventfd_ctx *eventfd); +void irq_bypass_unregister_producer(struct irq_bypass_producer *producer); +int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer, + struct eventfd_ctx *eventfd); +void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer); #endif /* IRQBYPASS_H */ diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 11e5d1e3f12e..5bc6abe30748 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -426,15 +426,14 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) if (kvm_arch_has_irq_bypass()) { - irqfd->consumer.token = (void *)irqfd->eventfd; irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer; irqfd->consumer.stop = kvm_arch_irq_bypass_stop; irqfd->consumer.start = kvm_arch_irq_bypass_start; - ret = irq_bypass_register_consumer(&irqfd->consumer); + ret = irq_bypass_register_consumer(&irqfd->consumer, irqfd->eventfd); if (ret) - pr_info("irq bypass consumer (token %p) registration fails: %d\n", - irqfd->consumer.token, ret); + pr_info("irq bypass consumer (eventfd %p) registration fails: %d\n", + irqfd->eventfd, ret); } #endif diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c index 28a4d933569a..e8d7c420db52 100644 --- a/virt/lib/irqbypass.c +++ b/virt/lib/irqbypass.c @@ -77,30 +77,32 @@ static void __disconnect(struct irq_bypass_producer *prod, /** * irq_bypass_register_producer - register IRQ bypass producer * @producer: pointer to producer structure + * @eventfd: pointer to the eventfd context associated with the producer * * Add the provided IRQ producer to the list of producers and connect - * with any matching token found on the IRQ consumers list. + * with any matching eventfd found on the IRQ consumers list. */ -int irq_bypass_register_producer(struct irq_bypass_producer *producer) +int irq_bypass_register_producer(struct irq_bypass_producer *producer, + struct eventfd_ctx *eventfd) { struct irq_bypass_producer *tmp; struct irq_bypass_consumer *consumer; int ret; - if (!producer->token) + if (WARN_ON_ONCE(producer->eventfd)) return -EINVAL; mutex_lock(&lock); list_for_each_entry(tmp, &producers, node) { - if (tmp->token == producer->token) { + if (tmp->eventfd == eventfd) { ret = -EBUSY; goto out_err; } } list_for_each_entry(consumer, &consumers, node) { - if (consumer->token == producer->token) { + if (consumer->eventfd == eventfd) { ret = __connect(producer, consumer); if (ret) goto out_err; @@ -108,6 +110,7 @@ int irq_bypass_register_producer(struct irq_bypass_producer *producer) } } + producer->eventfd = eventfd; list_add(&producer->node, &producers); mutex_unlock(&lock); @@ -131,26 +134,28 @@ void irq_bypass_unregister_producer(struct irq_bypass_producer *producer) struct irq_bypass_producer *tmp; struct irq_bypass_consumer *consumer; - if (!producer->token) + if (!producer->eventfd) return; mutex_lock(&lock); list_for_each_entry(tmp, &producers, node) { - if (tmp->token != producer->token) + if (tmp->eventfd != producer->eventfd) continue; list_for_each_entry(consumer, &consumers, node) { - if (consumer->token == producer->token) { + if (consumer->eventfd == producer->eventfd) { __disconnect(producer, consumer); break; } } + producer->eventfd = NULL; list_del(&producer->node); break; } + WARN_ON_ONCE(producer->eventfd); mutex_unlock(&lock); } EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer); @@ -158,31 +163,35 @@ EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer); /** * irq_bypass_register_consumer - register IRQ bypass consumer * @consumer: pointer to consumer structure + * @eventfd: pointer to the eventfd context associated with the consumer * * Add the provided IRQ consumer to the list of consumers and connect - * with any matching token found on the IRQ producer list. + * with any matching eventfd found on the IRQ producer list. */ -int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer) +int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer, + struct eventfd_ctx *eventfd) { struct irq_bypass_consumer *tmp; struct irq_bypass_producer *producer; int ret; - if (!consumer->token || - !consumer->add_producer || !consumer->del_producer) + if (WARN_ON_ONCE(consumer->eventfd)) + return -EINVAL; + + if (!consumer->add_producer || !consumer->del_producer) return -EINVAL; mutex_lock(&lock); list_for_each_entry(tmp, &consumers, node) { - if (tmp->token == consumer->token || tmp == consumer) { + if (tmp->eventfd == eventfd) { ret = -EBUSY; goto out_err; } } list_for_each_entry(producer, &producers, node) { - if (producer->token == consumer->token) { + if (producer->eventfd == eventfd) { ret = __connect(producer, consumer); if (ret) goto out_err; @@ -190,6 +199,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer) } } + consumer->eventfd = eventfd; list_add(&consumer->node, &consumers); mutex_unlock(&lock); @@ -213,7 +223,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer) struct irq_bypass_consumer *tmp; struct irq_bypass_producer *producer; - if (!consumer->token) + if (!consumer->eventfd) return; mutex_lock(&lock); @@ -223,16 +233,18 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer) continue; list_for_each_entry(producer, &producers, node) { - if (producer->token == consumer->token) { + if (producer->eventfd == consumer->eventfd) { __disconnect(producer, consumer); break; } } + consumer->eventfd = NULL; list_del(&consumer->node); break; } + WARN_ON_ONCE(consumer->eventfd); mutex_unlock(&lock); } EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer); -- cgit v1.2.3 From add57f493e0893ac0fb4acbdc441918d3e800f10 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 16 May 2025 16:07:30 -0700 Subject: irqbypass: Explicitly track producer and consumer bindings Explicitly track IRQ bypass producer:consumer bindings. This will allow making removal an O(1) operation; searching through the list to find information that is trivially tracked (and useful for debug) is wasteful. Reviewed-by: Kevin Tian Acked-by: Michael S. Tsirkin Reviewed-by: Alex Williamson Link: https://lore.kernel.org/r/20250516230734.2564775-5-seanjc@google.com Signed-off-by: Sean Christopherson --- include/linux/irqbypass.h | 7 +++++++ virt/lib/irqbypass.c | 9 +++++++++ 2 files changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index 1b57d15ac4cf..b28197c87483 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -29,10 +29,13 @@ struct irq_bypass_consumer; * pairings are not supported. */ +struct irq_bypass_consumer; + /** * struct irq_bypass_producer - IRQ bypass producer definition * @node: IRQ bypass manager private list management * @eventfd: eventfd context used to match producers and consumers + * @consumer: The connected consumer (NULL if no connection) * @irq: Linux IRQ number for the producer device * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) @@ -46,6 +49,7 @@ struct irq_bypass_consumer; struct irq_bypass_producer { struct list_head node; struct eventfd_ctx *eventfd; + struct irq_bypass_consumer *consumer; int irq; int (*add_consumer)(struct irq_bypass_producer *, struct irq_bypass_consumer *); @@ -59,6 +63,7 @@ struct irq_bypass_producer { * struct irq_bypass_consumer - IRQ bypass consumer definition * @node: IRQ bypass manager private list management * @eventfd: eventfd context used to match producers and consumers + * @producer: The connected producer (NULL if no connection) * @add_producer: Connect the IRQ consumer to an IRQ producer * @del_producer: Disconnect the IRQ consumer from an IRQ producer * @stop: Perform any quiesce operations necessary prior to add/del (optional) @@ -72,6 +77,8 @@ struct irq_bypass_producer { struct irq_bypass_consumer { struct list_head node; struct eventfd_ctx *eventfd; + struct irq_bypass_producer *producer; + int (*add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); void (*del_producer)(struct irq_bypass_consumer *, diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c index e8d7c420db52..fdbf7ecc0c21 100644 --- a/virt/lib/irqbypass.c +++ b/virt/lib/irqbypass.c @@ -51,6 +51,10 @@ static int __connect(struct irq_bypass_producer *prod, if (prod->start) prod->start(prod); + if (!ret) { + prod->consumer = cons; + cons->producer = prod; + } return ret; } @@ -72,6 +76,9 @@ static void __disconnect(struct irq_bypass_producer *prod, cons->start(cons); if (prod->start) prod->start(prod); + + prod->consumer = NULL; + cons->producer = NULL; } /** @@ -145,6 +152,7 @@ void irq_bypass_unregister_producer(struct irq_bypass_producer *producer) list_for_each_entry(consumer, &consumers, node) { if (consumer->eventfd == producer->eventfd) { + WARN_ON_ONCE(producer->consumer != consumer); __disconnect(producer, consumer); break; } @@ -234,6 +242,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer) list_for_each_entry(producer, &producers, node) { if (producer->eventfd == consumer->eventfd) { + WARN_ON_ONCE(consumer->producer != producer); __disconnect(producer, consumer); break; } -- cgit v1.2.3 From 8394b32faecd9c63b3c436e78e62519e9548e530 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 16 May 2025 16:07:33 -0700 Subject: irqbypass: Use xarray to track producers and consumers Track IRQ bypass producers and consumers using an xarray to avoid the O(2n) insertion time associated with walking a list to check for duplicate entries, and to search for an partner. At low (tens or few hundreds) total producer/consumer counts, using a list is faster due to the need to allocate backing storage for xarray. But as count creeps into the thousands, xarray wins easily, and can provide several orders of magnitude better latency at high counts. E.g. hundreds of nanoseconds vs. hundreds of milliseconds. Cc: Oliver Upton Cc: David Matlack Cc: Like Xu Cc: Binbin Wu Reported-by: Yong He Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217379 Link: https://lore.kernel.org/all/20230801115646.33990-1-likexu@tencent.com Reviewed-by: Kevin Tian Acked-by: Michael S. Tsirkin Reviewed-by: Alex Williamson Link: https://lore.kernel.org/r/20250516230734.2564775-8-seanjc@google.com Signed-off-by: Sean Christopherson --- include/linux/irqbypass.h | 4 --- virt/lib/irqbypass.c | 74 ++++++++++++++++++++++++----------------------- 2 files changed, 38 insertions(+), 40 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index b28197c87483..cd64fcaa88fe 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -33,7 +33,6 @@ struct irq_bypass_consumer; /** * struct irq_bypass_producer - IRQ bypass producer definition - * @node: IRQ bypass manager private list management * @eventfd: eventfd context used to match producers and consumers * @consumer: The connected consumer (NULL if no connection) * @irq: Linux IRQ number for the producer device @@ -47,7 +46,6 @@ struct irq_bypass_consumer; * for a physical device assigned to a VM. */ struct irq_bypass_producer { - struct list_head node; struct eventfd_ctx *eventfd; struct irq_bypass_consumer *consumer; int irq; @@ -61,7 +59,6 @@ struct irq_bypass_producer { /** * struct irq_bypass_consumer - IRQ bypass consumer definition - * @node: IRQ bypass manager private list management * @eventfd: eventfd context used to match producers and consumers * @producer: The connected producer (NULL if no connection) * @add_producer: Connect the IRQ consumer to an IRQ producer @@ -75,7 +72,6 @@ struct irq_bypass_producer { * portions of the interrupt handling to the VM. */ struct irq_bypass_consumer { - struct list_head node; struct eventfd_ctx *eventfd; struct irq_bypass_producer *producer; diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c index 828556c081f5..ea888b9203d2 100644 --- a/virt/lib/irqbypass.c +++ b/virt/lib/irqbypass.c @@ -22,8 +22,8 @@ MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IRQ bypass manager utility module"); -static LIST_HEAD(producers); -static LIST_HEAD(consumers); +static DEFINE_XARRAY(producers); +static DEFINE_XARRAY(consumers); static DEFINE_MUTEX(lock); /* @lock must be held when calling connect */ @@ -86,13 +86,13 @@ static void __disconnect(struct irq_bypass_producer *prod, * @producer: pointer to producer structure * @eventfd: pointer to the eventfd context associated with the producer * - * Add the provided IRQ producer to the list of producers and connect - * with any matching eventfd found on the IRQ consumers list. + * Add the provided IRQ producer to the set of producers and connect with the + * consumer with a matching eventfd, if one exists. */ int irq_bypass_register_producer(struct irq_bypass_producer *producer, struct eventfd_ctx *eventfd) { - struct irq_bypass_producer *tmp; + unsigned long index = (unsigned long)eventfd; struct irq_bypass_consumer *consumer; int ret; @@ -101,22 +101,20 @@ int irq_bypass_register_producer(struct irq_bypass_producer *producer, guard(mutex)(&lock); - list_for_each_entry(tmp, &producers, node) { - if (tmp->eventfd == eventfd) - return -EBUSY; - } + ret = xa_insert(&producers, index, producer, GFP_KERNEL); + if (ret) + return ret; - list_for_each_entry(consumer, &consumers, node) { - if (consumer->eventfd == eventfd) { - ret = __connect(producer, consumer); - if (ret) - return ret; - break; + consumer = xa_load(&consumers, index); + if (consumer) { + ret = __connect(producer, consumer); + if (ret) { + WARN_ON_ONCE(xa_erase(&producers, index) != producer); + return ret; } } producer->eventfd = eventfd; - list_add(&producer->node, &producers); return 0; } EXPORT_SYMBOL_GPL(irq_bypass_register_producer); @@ -125,11 +123,14 @@ EXPORT_SYMBOL_GPL(irq_bypass_register_producer); * irq_bypass_unregister_producer - unregister IRQ bypass producer * @producer: pointer to producer structure * - * Remove a previously registered IRQ producer from the list of producers - * and disconnect it from any connected IRQ consumer. + * Remove a previously registered IRQ producer (note, it's safe to call this + * even if registration was unsuccessful). Disconnect from the associated + * consumer, if one exists. */ void irq_bypass_unregister_producer(struct irq_bypass_producer *producer) { + unsigned long index = (unsigned long)producer->eventfd; + if (!producer->eventfd) return; @@ -138,8 +139,8 @@ void irq_bypass_unregister_producer(struct irq_bypass_producer *producer) if (producer->consumer) __disconnect(producer, producer->consumer); + WARN_ON_ONCE(xa_erase(&producers, index) != producer); producer->eventfd = NULL; - list_del(&producer->node); } EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer); @@ -148,13 +149,13 @@ EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer); * @consumer: pointer to consumer structure * @eventfd: pointer to the eventfd context associated with the consumer * - * Add the provided IRQ consumer to the list of consumers and connect - * with any matching eventfd found on the IRQ producer list. + * Add the provided IRQ consumer to the set of consumers and connect with the + * producer with a matching eventfd, if one exists. */ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer, struct eventfd_ctx *eventfd) { - struct irq_bypass_consumer *tmp; + unsigned long index = (unsigned long)eventfd; struct irq_bypass_producer *producer; int ret; @@ -166,22 +167,20 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer, guard(mutex)(&lock); - list_for_each_entry(tmp, &consumers, node) { - if (tmp->eventfd == eventfd) - return -EBUSY; - } + ret = xa_insert(&consumers, index, consumer, GFP_KERNEL); + if (ret) + return ret; - list_for_each_entry(producer, &producers, node) { - if (producer->eventfd == eventfd) { - ret = __connect(producer, consumer); - if (ret) - return ret; - break; + producer = xa_load(&producers, index); + if (producer) { + ret = __connect(producer, consumer); + if (ret) { + WARN_ON_ONCE(xa_erase(&consumers, index) != consumer); + return ret; } } consumer->eventfd = eventfd; - list_add(&consumer->node, &consumers); return 0; } EXPORT_SYMBOL_GPL(irq_bypass_register_consumer); @@ -190,11 +189,14 @@ EXPORT_SYMBOL_GPL(irq_bypass_register_consumer); * irq_bypass_unregister_consumer - unregister IRQ bypass consumer * @consumer: pointer to consumer structure * - * Remove a previously registered IRQ consumer from the list of consumers - * and disconnect it from any connected IRQ producer. + * Remove a previously registered IRQ consumer (note, it's safe to call this + * even if registration was unsuccessful). Disconnect from the associated + * producer, if one exists. */ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer) { + unsigned long index = (unsigned long)consumer->eventfd; + if (!consumer->eventfd) return; @@ -203,7 +205,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer) if (consumer->producer) __disconnect(consumer->producer, consumer); + WARN_ON_ONCE(xa_erase(&consumers, index) != consumer); consumer->eventfd = NULL; - list_del(&consumer->node); } EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer); -- cgit v1.2.3 From 23b54381cee2928e8b5622e654ca4516f30d2f1a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 16 May 2025 16:07:34 -0700 Subject: irqbypass: Require producers to pass in Linux IRQ number during registration Pass in the Linux IRQ associated with an IRQ bypass producer instead of relying on the caller to set the field prior to registration, as there's no benefit to relying on callers to do the right thing. Take care to set producer->irq before __connect(), as KVM expects the IRQ to be valid as soon as a connection is possible. Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Acked-by: Michael S. Tsirkin Link: https://lore.kernel.org/r/20250516230734.2564775-9-seanjc@google.com Signed-off-by: Sean Christopherson --- drivers/vfio/pci/vfio_pci_intrs.c | 3 +-- drivers/vhost/vdpa.c | 4 ++-- include/linux/irqbypass.h | 2 +- virt/lib/irqbypass.c | 5 ++++- 4 files changed, 8 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index d87fe116762a..123298a4dc8f 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -505,8 +505,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev, if (ret) goto out_put_eventfd_ctx; - ctx->producer.irq = irq; - ret = irq_bypass_register_producer(&ctx->producer, trigger); + ret = irq_bypass_register_producer(&ctx->producer, trigger, irq); if (unlikely(ret)) { dev_info(&pdev->dev, "irq bypass producer (eventfd %p) registration fails: %d\n", diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 7b265ffda697..af1e1fdfd9ed 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -212,8 +212,8 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) if (!vq->call_ctx.ctx) return; - vq->call_ctx.producer.irq = irq; - ret = irq_bypass_register_producer(&vq->call_ctx.producer, vq->call_ctx.ctx); + ret = irq_bypass_register_producer(&vq->call_ctx.producer, + vq->call_ctx.ctx, irq); if (unlikely(ret)) dev_info(&v->dev, "vq %u, irq bypass producer (eventfd %p) registration fails, ret = %d\n", qid, vq->call_ctx.ctx, ret); diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h index cd64fcaa88fe..ede1fa938152 100644 --- a/include/linux/irqbypass.h +++ b/include/linux/irqbypass.h @@ -84,7 +84,7 @@ struct irq_bypass_consumer { }; int irq_bypass_register_producer(struct irq_bypass_producer *producer, - struct eventfd_ctx *eventfd); + struct eventfd_ctx *eventfd, int irq); void irq_bypass_unregister_producer(struct irq_bypass_producer *producer); int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer, struct eventfd_ctx *eventfd); diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c index ea888b9203d2..62c160200be9 100644 --- a/virt/lib/irqbypass.c +++ b/virt/lib/irqbypass.c @@ -85,12 +85,13 @@ static void __disconnect(struct irq_bypass_producer *prod, * irq_bypass_register_producer - register IRQ bypass producer * @producer: pointer to producer structure * @eventfd: pointer to the eventfd context associated with the producer + * @irq: Linux IRQ number of the underlying producer device * * Add the provided IRQ producer to the set of producers and connect with the * consumer with a matching eventfd, if one exists. */ int irq_bypass_register_producer(struct irq_bypass_producer *producer, - struct eventfd_ctx *eventfd) + struct eventfd_ctx *eventfd, int irq) { unsigned long index = (unsigned long)eventfd; struct irq_bypass_consumer *consumer; @@ -99,6 +100,8 @@ int irq_bypass_register_producer(struct irq_bypass_producer *producer, if (WARN_ON_ONCE(producer->eventfd)) return -EINVAL; + producer->irq = irq; + guard(mutex)(&lock); ret = xa_insert(&producers, index, producer, GFP_KERNEL); -- cgit v1.2.3 From e295d2e7fbe69ddec772c951c466dfbfc1c96818 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 14:35:40 -0700 Subject: KVM: x86: Trigger I/O APIC route rescan in kvm_arch_irq_routing_update() Trigger the I/O APIC route rescan that's performed for a split IRQ chip after userspace updates IRQ routes in kvm_arch_irq_routing_update(), i.e. before dropping kvm->irq_lock. Calling kvm_make_all_cpus_request() under a mutex is perfectly safe, and the smp_wmb()+smp_mb__after_atomic() pair in __kvm_make_request()+kvm_check_request() ensures the new routing is visible to vCPUs prior to the request being visible to vCPUs. In all likelihood, commit b053b2aef25d ("KVM: x86: Add EOI exit bitmap inference") somewhat arbitrarily made the request outside of irq_lock to avoid holding irq_lock any longer than is strictly necessary. And then commit abdb080f7ac8 ("kvm/irqchip: kvm_arch_irq_routing_update renaming split") took the easy route of adding another arch hook instead of risking a functional change. Note, the call to synchronize_srcu_expedited() does NOT provide ordering guarantees with respect to vCPUs scanning the new routing; as above, the request infrastructure provides the necessary ordering. I.e. there's no need to wait for kvm_scan_ioapic_routes() to complete if it's actively running, because regardless of whether it grabs the old or new table, the vCPU will have another KVM_REQ_SCAN_IOAPIC pending, i.e. will rescan again and see the new mappings. Acked-by: Kai Huang Link: https://lore.kernel.org/r/20250611213557.294358-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/irq_comm.c | 10 +++------- include/linux/kvm_host.h | 4 ---- virt/kvm/irqchip.c | 2 -- 3 files changed, 3 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index d6d792b5d1bd..e2ae62ff9cc2 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -395,13 +395,6 @@ int kvm_setup_default_irq_routing(struct kvm *kvm) ARRAY_SIZE(default_routing), 0); } -void kvm_arch_post_irq_routing_update(struct kvm *kvm) -{ - if (!irqchip_split(kvm)) - return; - kvm_make_scan_ioapic_request(kvm); -} - void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode, u8 vector, unsigned long *ioapic_handled_vectors) { @@ -466,4 +459,7 @@ void kvm_arch_irq_routing_update(struct kvm *kvm) #ifdef CONFIG_KVM_HYPERV kvm_hv_irq_routing_update(kvm); #endif + + if (irqchip_split(kvm)) + kvm_make_scan_ioapic_request(kvm); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3bde4fb5c6aa..9461517b4e62 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1024,14 +1024,10 @@ void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); -void kvm_arch_post_irq_routing_update(struct kvm *kvm); #else static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) { } -static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) -{ -} #endif #ifdef CONFIG_HAVE_KVM_IRQCHIP diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index 162d8ed889f2..6ccabfd32287 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -222,8 +222,6 @@ int kvm_set_irq_routing(struct kvm *kvm, kvm_arch_irq_routing_update(kvm); mutex_unlock(&kvm->irq_lock); - kvm_arch_post_irq_routing_update(kvm); - synchronize_srcu_expedited(&kvm->irq_srcu); new = old; -- cgit v1.2.3 From 77a74b8ff41ae620f5a5d727d596b670b7b9994e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 14:35:48 -0700 Subject: KVM: x86: Move kvm_{request,free}_irq_source_id() to i8254.c (PIT) Move kvm_{request,free}_irq_source_id() to i8254.c, i.e. the dedicated PIT emulation file, in anticipation of removing them entirely in favor of hardcoding the PIT's "requested" source ID (the source ID can only ever be '2', and the request can never fail). No functional change intended. Acked-by: Kai Huang Link: https://lore.kernel.org/r/20250611213557.294358-10-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/i8254.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/irq_comm.c | 44 -------------------------------------------- include/linux/kvm_host.h | 2 -- 3 files changed, 44 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 59f956f35f4c..2bb223bf0dac 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -641,6 +641,50 @@ static void kvm_pit_reset(struct kvm_pit *pit) kvm_pit_reset_reinject(pit); } +static int kvm_request_irq_source_id(struct kvm *kvm) +{ + unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; + int irq_source_id; + + mutex_lock(&kvm->irq_lock); + irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); + + if (irq_source_id >= BITS_PER_LONG) { + pr_warn("exhausted allocatable IRQ sources!\n"); + irq_source_id = -EFAULT; + goto unlock; + } + + ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); + ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); + set_bit(irq_source_id, bitmap); +unlock: + mutex_unlock(&kvm->irq_lock); + + return irq_source_id; +} + +static void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) +{ + ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); + ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); + + mutex_lock(&kvm->irq_lock); + if (irq_source_id < 0 || + irq_source_id >= BITS_PER_LONG) { + pr_err("IRQ source ID out of range!\n"); + goto unlock; + } + clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); + if (!irqchip_full(kvm)) + goto unlock; + + kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); + kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); +unlock: + mutex_unlock(&kvm->irq_lock); +} + static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) { struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 99c521bd9db5..138c675dc24b 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -165,50 +165,6 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, return -EWOULDBLOCK; } -int kvm_request_irq_source_id(struct kvm *kvm) -{ - unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; - int irq_source_id; - - mutex_lock(&kvm->irq_lock); - irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); - - if (irq_source_id >= BITS_PER_LONG) { - pr_warn("exhausted allocatable IRQ sources!\n"); - irq_source_id = -EFAULT; - goto unlock; - } - - ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); - ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); - set_bit(irq_source_id, bitmap); -unlock: - mutex_unlock(&kvm->irq_lock); - - return irq_source_id; -} - -void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) -{ - ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); - ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); - - mutex_lock(&kvm->irq_lock); - if (irq_source_id < 0 || - irq_source_id >= BITS_PER_LONG) { - pr_err("IRQ source ID out of range!\n"); - goto unlock; - } - clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); - if (!irqchip_full(kvm)) - goto unlock; - - kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); - kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); -unlock: - mutex_unlock(&kvm->irq_lock); -} - void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9461517b4e62..cba8fc4529e8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1784,8 +1784,6 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); -int kvm_request_irq_source_id(struct kvm *kvm); -void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* -- cgit v1.2.3 From 61423c413a746fd5fe5b0d865ea722e11b01105e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 14:35:49 -0700 Subject: KVM: x86: Hardcode the PIT IRQ source ID to '2' Hardcode the PIT's source IRQ ID to '2' instead of "finding" that bit 2 is always the first available bit in irq_sources_bitmap. Bits 0 and 1 are set/reserved by kvm_arch_init_vm(), i.e. long before kvm_create_pit() can be invoked, and KVM allows at most one in-kernel PIT instance, i.e. it's impossible for the PIT to find a different free bit (there are no other users of kvm_request_irq_source_id(). Delete the now-defunct irq_sources_bitmap and all its associated code. Acked-by: Kai Huang Link: https://lore.kernel.org/r/20250611213557.294358-11-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/i8254.c | 55 ++++++----------------------------------- arch/x86/kvm/i8254.h | 1 - arch/x86/kvm/x86.c | 6 ----- include/linux/kvm_host.h | 1 + 5 files changed, 8 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bf4459d637cc..4d68680f7051 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1397,7 +1397,6 @@ struct kvm_arch { bool pause_in_guest; bool cstate_in_guest; - unsigned long irq_sources_bitmap; s64 kvmclock_offset; /* diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 2bb223bf0dac..fa8187608cfc 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -248,8 +248,8 @@ static void pit_do_work(struct kthread_work *work) if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0)) return; - kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false); - kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false); + kvm_set_irq(kvm, KVM_PIT_IRQ_SOURCE_ID, 0, 1, false); + kvm_set_irq(kvm, KVM_PIT_IRQ_SOURCE_ID, 0, 0, false); /* * Provides NMI watchdog support via Virtual Wire mode. @@ -641,47 +641,11 @@ static void kvm_pit_reset(struct kvm_pit *pit) kvm_pit_reset_reinject(pit); } -static int kvm_request_irq_source_id(struct kvm *kvm) +static void kvm_pit_clear_all(struct kvm *kvm) { - unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; - int irq_source_id; - - mutex_lock(&kvm->irq_lock); - irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); - - if (irq_source_id >= BITS_PER_LONG) { - pr_warn("exhausted allocatable IRQ sources!\n"); - irq_source_id = -EFAULT; - goto unlock; - } - - ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); - ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); - set_bit(irq_source_id, bitmap); -unlock: - mutex_unlock(&kvm->irq_lock); - - return irq_source_id; -} - -static void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) -{ - ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); - ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); - mutex_lock(&kvm->irq_lock); - if (irq_source_id < 0 || - irq_source_id >= BITS_PER_LONG) { - pr_err("IRQ source ID out of range!\n"); - goto unlock; - } - clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); - if (!irqchip_full(kvm)) - goto unlock; - - kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); - kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); -unlock: + kvm_ioapic_clear_all(kvm->arch.vioapic, KVM_PIT_IRQ_SOURCE_ID); + kvm_pic_clear_all(kvm->arch.vpic, KVM_PIT_IRQ_SOURCE_ID); mutex_unlock(&kvm->irq_lock); } @@ -788,10 +752,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) if (!pit) return NULL; - pit->irq_source_id = kvm_request_irq_source_id(kvm); - if (pit->irq_source_id < 0) - goto fail_request; - mutex_init(&pit->pit_state.lock); pid = get_pid(task_tgid(current)); @@ -843,8 +803,7 @@ fail_register_pit: kvm_pit_set_reinject(pit, false); kthread_destroy_worker(pit->worker); fail_kthread: - kvm_free_irq_source_id(kvm, pit->irq_source_id); -fail_request: + kvm_pit_clear_all(kvm); kfree(pit); return NULL; } @@ -861,7 +820,7 @@ void kvm_free_pit(struct kvm *kvm) kvm_pit_set_reinject(pit, false); hrtimer_cancel(&pit->pit_state.timer); kthread_destroy_worker(pit->worker); - kvm_free_irq_source_id(kvm, pit->irq_source_id); + kvm_pit_clear_all(kvm); kfree(pit); } } diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index 338095829ec8..b9c1feb379a7 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -44,7 +44,6 @@ struct kvm_pit { struct kvm_io_device speaker_dev; struct kvm *kvm; struct kvm_kpit_state pit_state; - int irq_source_id; struct kvm_irq_mask_notifier mask_notifier; struct kthread_worker *worker; struct kthread_work expired; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 53f2ce2d40de..1d744730985e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12669,12 +12669,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); atomic_set(&kvm->arch.noncoherent_dma_count, 0); - /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ - set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); - /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ - set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, - &kvm->arch.irq_sources_bitmap); - raw_spin_lock_init(&kvm->arch.tsc_write_lock); mutex_init(&kvm->arch.apic_map_lock); seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cba8fc4529e8..4ff5ea29e343 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -190,6 +190,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 +#define KVM_PIT_IRQ_SOURCE_ID 2 extern struct mutex kvm_lock; extern struct list_head vm_list; -- cgit v1.2.3 From 628a27731e3f36de7ddce226f7e09ee70e40ed66 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 14:35:53 -0700 Subject: KVM: x86: Add CONFIG_KVM_IOAPIC to allow disabling in-kernel I/O APIC Add a Kconfig to allow building KVM without support for emulating a I/O APIC, PIC, and PIT, which is desirable for deployments that effectively don't support a fully in-kernel IRQ chip, i.e. never expect any VMM to create an in-kernel I/O APIC. E.g. compiling out support eliminates a few thousand lines of guest-facing code and gives security folks warm fuzzies. As a bonus, wrapping relevant paths with CONFIG_KVM_IOAPIC #ifdefs makes it much easier for readers to understand which bits and pieces exist specifically for fully in-kernel IRQ chips. Opportunistically convert all two in-kernel uses of __KVM_HAVE_IOAPIC to CONFIG_KVM_IOAPIC, e.g. rather than add a second #ifdef to generate a stub for kvm_arch_post_irq_routing_update(). Acked-by: Kai Huang Link: https://lore.kernel.org/r/20250611213557.294358-15-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/Kconfig | 10 ++++++++++ arch/x86/kvm/Makefile | 5 +++-- arch/x86/kvm/i8254.h | 2 ++ arch/x86/kvm/irq.c | 8 ++++++++ arch/x86/kvm/irq.h | 9 +++++++++ arch/x86/kvm/irq_comm.c | 2 ++ arch/x86/kvm/lapic.c | 7 ++++++- arch/x86/kvm/trace.h | 2 ++ arch/x86/kvm/x86.c | 22 ++++++++++++++++++---- include/linux/kvm_host.h | 2 +- include/trace/events/kvm.h | 4 ++-- 12 files changed, 65 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a4649a234f05..8d511eb03933 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1375,9 +1375,11 @@ struct kvm_arch { atomic_t noncoherent_dma_count; #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE atomic_t assigned_device_count; +#ifdef CONFIG_KVM_IOAPIC struct kvm_pic *vpic; struct kvm_ioapic *vioapic; struct kvm_pit *vpit; +#endif atomic_t vapics_in_nmi_mode; struct mutex apic_map_lock; struct kvm_apic_map __rcu *apic_map; diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 2eeffcec5382..2c86673155c9 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -166,6 +166,16 @@ config KVM_AMD_SEV Encrypted State (SEV-ES), and Secure Encrypted Virtualization with Secure Nested Paging (SEV-SNP) technologies on AMD processors. +config KVM_IOAPIC + bool "I/O APIC, PIC, and PIT emulation" + default y + depends on KVM + help + Provides support for KVM to emulate an I/O APIC, PIC, and PIT, i.e. + for full in-kernel APIC emulation. + + If unsure, say Y. + config KVM_SMM bool "System Management Mode emulation" default y diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index a5d362c7b504..92c737257789 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -5,12 +5,13 @@ ccflags-$(CONFIG_KVM_WERROR) += -Werror include $(srctree)/virt/kvm/Makefile.kvm -kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ - i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ +kvm-y += x86.o emulate.o irq.o lapic.o \ + irq_comm.o cpuid.o pmu.o mtrr.o \ debugfs.o mmu/mmu.o mmu/page_track.o \ mmu/spte.o kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o +kvm-$(CONFIG_KVM_IOAPIC) += i8259.o i8254.o ioapic.o kvm-$(CONFIG_KVM_HYPERV) += hyperv.o kvm-$(CONFIG_KVM_XEN) += xen.o kvm-$(CONFIG_KVM_SMM) += smm.o diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h index b9c1feb379a7..e8bd59ad8a7c 100644 --- a/arch/x86/kvm/i8254.h +++ b/arch/x86/kvm/i8254.h @@ -8,6 +8,7 @@ #include +#ifdef CONFIG_KVM_IOAPIC struct kvm_kpit_channel_state { u32 count; /* can be 65536 */ u16 latched_count; @@ -64,5 +65,6 @@ int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags); void kvm_free_pit(struct kvm *kvm); +#endif /* CONFIG_KVM_IOAPIC */ #endif diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index fb3bad0f4965..4c219e9f52b0 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -76,8 +76,10 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v) if (!kvm_apic_accept_pic_intr(v)) return 0; +#ifdef CONFIG_KVM_IOAPIC if (pic_in_kernel(v->kvm)) return v->kvm->arch.vpic->output; +#endif WARN_ON_ONCE(!irqchip_split(v->kvm)); return pending_userspace_extint(v); @@ -136,8 +138,10 @@ int kvm_cpu_get_extint(struct kvm_vcpu *v) return v->kvm->arch.xen.upcall_vector; #endif +#ifdef CONFIG_KVM_IOAPIC if (pic_in_kernel(v->kvm)) return kvm_pic_read_irq(v->kvm); /* PIC */ +#endif WARN_ON_ONCE(!irqchip_split(v->kvm)); return get_userspace_extint(v); @@ -171,7 +175,9 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) void __kvm_migrate_timers(struct kvm_vcpu *vcpu) { __kvm_migrate_apic_timer(vcpu); +#ifdef CONFIG_KVM_IOAPIC __kvm_migrate_pit_timer(vcpu); +#endif kvm_x86_call(migrate_timers)(vcpu); } @@ -187,6 +193,7 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) return irqchip_in_kernel(kvm); } +#ifdef CONFIG_KVM_IOAPIC #define IOAPIC_ROUTING_ENTRY(irq) \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } } @@ -273,3 +280,4 @@ int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) kvm_pic_update_irq(pic); return r; } +#endif diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 7b8b54462f95..5e62c1f79ce6 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -18,6 +18,8 @@ #include #include "lapic.h" +#ifdef CONFIG_KVM_IOAPIC + #define PIC_NUM_PINS 16 #define SELECT_PIC(irq) \ ((irq) < 8 ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE) @@ -79,12 +81,19 @@ static inline int irqchip_full(struct kvm *kvm) smp_rmb(); return mode == KVM_IRQCHIP_KERNEL; } +#else /* CONFIG_KVM_IOAPIC */ +static __always_inline int irqchip_full(struct kvm *kvm) +{ + return false; +} +#endif static inline int pic_in_kernel(struct kvm *kvm) { return irqchip_full(kvm); } + static inline int irqchip_split(struct kvm *kvm) { int mode = kvm->arch.irqchip_mode; diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 13d84c25e503..14fc8db0206c 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -208,6 +208,7 @@ int kvm_set_routing_entry(struct kvm *kvm, * check kvm_arch_can_set_irq_routing() before calling this function. */ switch (ue->type) { +#ifdef CONFIG_KVM_IOAPIC case KVM_IRQ_ROUTING_IRQCHIP: if (irqchip_split(kvm)) return -EINVAL; @@ -231,6 +232,7 @@ int kvm_set_routing_entry(struct kvm *kvm, } e->irqchip.irqchip = ue->u.irqchip.irqchip; break; +#endif case KVM_IRQ_ROUTING_MSI: e->set = kvm_set_msi; e->msi.address_lo = ue->u.msi.address_lo; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 73418dc0ebb2..4cf8c1f753d3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1455,7 +1455,7 @@ static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector) static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) { - int trigger_mode; + int __maybe_unused trigger_mode; /* Eoi the ioapic only if the ioapic doesn't own the vector. */ if (!kvm_ioapic_handles_vector(apic, vector)) @@ -1476,12 +1476,14 @@ static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) return; } +#ifdef CONFIG_KVM_IOAPIC if (apic_test_vector(vector, apic->regs + APIC_TMR)) trigger_mode = IOAPIC_LEVEL_TRIG; else trigger_mode = IOAPIC_EDGE_TRIG; kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); +#endif } static int apic_set_eoi(struct kvm_lapic *apic) @@ -3146,8 +3148,11 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); } kvm_make_request(KVM_REQ_EVENT, vcpu); + +#ifdef CONFIG_KVM_IOAPIC if (ioapic_in_kernel(vcpu->kvm)) kvm_rtc_eoi_tracking_restore_one(vcpu); +#endif vcpu->arch.apic_arb_prio = 0; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 4ef17990574d..ababdba2c186 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -270,6 +270,7 @@ TRACE_EVENT(kvm_cpuid, {0x6, "SIPI"}, \ {0x7, "ExtINT"} +#ifdef CONFIG_KVM_IOAPIC TRACE_EVENT(kvm_ioapic_set_irq, TP_PROTO(__u64 e, int pin, bool coalesced), TP_ARGS(e, pin, coalesced), @@ -314,6 +315,7 @@ TRACE_EVENT(kvm_ioapic_delayed_eoi_inj, (__entry->e & (1<<15)) ? "level" : "edge", (__entry->e & (1<<16)) ? "|masked" : "") ); +#endif TRACE_EVENT(kvm_msi_set_irq, TP_PROTO(__u64 address, __u64 data), diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1d744730985e..78dfa6c1cb01 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4632,17 +4632,20 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: +#ifdef CONFIG_KVM_IOAPIC case KVM_CAP_PIT: + case KVM_CAP_PIT2: + case KVM_CAP_PIT_STATE2: + case KVM_CAP_REINJECT_CONTROL: +#endif case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: - case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD_NO_LENGTH: - case KVM_CAP_PIT2: - case KVM_CAP_PIT_STATE2: + case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_VCPU_EVENTS: #ifdef CONFIG_KVM_HYPERV @@ -6937,9 +6940,11 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; + +#ifdef CONFIG_KVM_IOAPIC /* * This union makes it completely explicit to gcc-3.x - * that these two variables' stack usage should be + * that these three variables' stack usage should be * combined, not added together. */ union { @@ -6947,6 +6952,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; +#endif switch (ioctl) { case KVM_SET_TSS_ADDR: @@ -6970,6 +6976,7 @@ set_identity_unlock: case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); break; +#ifdef CONFIG_KVM_IOAPIC case KVM_CREATE_IRQCHIP: { mutex_lock(&kvm->lock); @@ -7136,6 +7143,7 @@ set_pit2_out: r = kvm_vm_ioctl_reinject(kvm, &control); break; } +#endif case KVM_SET_BOOT_CPU_ID: r = 0; mutex_lock(&kvm->lock); @@ -10595,8 +10603,10 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) if (irqchip_split(vcpu->kvm)) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); +#ifdef CONFIG_KVM_IOAPIC else if (ioapic_in_kernel(vcpu->kvm)) kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); +#endif if (is_guest_mode(vcpu)) vcpu->arch.load_eoi_exitmap_pending = true; @@ -12799,7 +12809,9 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm) cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); +#ifdef CONFIG_KVM_IOAPIC kvm_free_pit(kvm); +#endif kvm_mmu_pre_destroy_vm(kvm); static_call_cond(kvm_x86_vm_pre_destroy)(kvm); @@ -12823,8 +12835,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm) } kvm_destroy_vcpus(kvm); kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); +#ifdef CONFIG_KVM_IOAPIC kvm_pic_destroy(kvm); kvm_ioapic_destroy(kvm); +#endif kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); kvm_mmu_uninit_vm(kvm); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4ff5ea29e343..3b5575d0b574 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1023,7 +1023,7 @@ void kvm_unlock_all_vcpus(struct kvm *kvm); void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); -#ifdef __KVM_HAVE_IOAPIC +#ifdef CONFIG_KVM_IOAPIC void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); #else static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 96e581900c8e..1065a81ca57f 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -84,14 +84,14 @@ TRACE_EVENT(kvm_set_irq, ); #endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */ -#if defined(__KVM_HAVE_IOAPIC) +#ifdef CONFIG_KVM_IOAPIC #define kvm_irqchips \ {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \ {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \ {KVM_IRQCHIP_IOAPIC, "IOAPIC"} -#endif /* defined(__KVM_HAVE_IOAPIC) */ +#endif /* CONFIG_KVM_IOAPIC */ #if defined(CONFIG_HAVE_KVM_IRQCHIP) -- cgit v1.2.3 From cb210737675ef4c1ad88721e84558eeb2f199312 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:06 -0700 Subject: KVM: Pass new routing entries and irqfd when updating IRTEs When updating IRTEs in response to a GSI routing or IRQ bypass change, pass the new/current routing information along with the associated irqfd. This will allow KVM x86 to harden, simplify, and deduplicate its code. Since adding/removing a bypass producer is now conveniently protected with irqfds.lock, i.e. can't run concurrently with kvm_irq_routing_update(), use the routing information cached in the irqfd instead of looking up the information in the current GSI routing tables. Opportunistically convert an existing printk() to pr_info() and put its string onto a single line (old code that strictly adhered to 80 chars). Link: https://lore.kernel.org/r/20250611224604.313496-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/arm64/kvm/arm.c | 7 ++++--- arch/x86/include/asm/kvm_host.h | 6 ++++-- arch/x86/kvm/svm/avic.c | 18 +++++++----------- arch/x86/kvm/svm/svm.h | 5 +++-- arch/x86/kvm/vmx/posted_intr.c | 19 ++++++++----------- arch/x86/kvm/vmx/posted_intr.h | 8 ++++++-- arch/x86/kvm/x86.c | 36 ++++++++++++++++++++---------------- include/linux/kvm_host.h | 7 +++++-- virt/kvm/eventfd.c | 11 +++++------ 9 files changed, 62 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 38a91bb5d4c7..a9a39e0375f7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2771,8 +2771,9 @@ bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, return memcmp(&old->msi, &new->msi, sizeof(new->msi)); } -int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set) +int kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) { /* * Remapping the vLPI requires taking the its_lock mutex to resolve @@ -2781,7 +2782,7 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, * * Unmap the vLPI and fall back to software LPI injection. */ - return kvm_vgic_v4_unset_forwarding(kvm, host_irq); + return kvm_vgic_v4_unset_forwarding(irqfd->kvm, irqfd->producer->irq); } void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 44fd9ccc0624..2a14564dd08a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -297,6 +297,7 @@ enum x86_intercept_stage; */ #define KVM_APIC_PV_EOI_PENDING 1 +struct kvm_kernel_irqfd; struct kvm_kernel_irq_routing_entry; /* @@ -1845,8 +1846,9 @@ struct kvm_x86_ops { void (*vcpu_blocking)(struct kvm_vcpu *vcpu); void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); - int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set); + int (*pi_update_irte)(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, + unsigned int host_irq, uint32_t guest_irq, + struct kvm_kernel_irq_routing_entry *new); void (*pi_start_assignment)(struct kvm *kvm); void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu); void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 067f8e3f5a0d..49b73907de92 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -886,21 +887,14 @@ get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, return 0; } -/* - * avic_pi_update_irte - set IRTE for Posted-Interrupts - * - * @kvm: kvm - * @host_irq: host irq of the interrupt - * @guest_irq: gsi of the interrupt - * @set: set or unset PI - * returns 0 on success, < 0 on failure - */ -int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set) +int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, + unsigned int host_irq, uint32_t guest_irq, + struct kvm_kernel_irq_routing_entry *new) { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; bool enable_remapped_mode = true; + bool set = !!new; int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || !kvm_arch_has_irq_bypass()) @@ -926,6 +920,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, if (e->type != KVM_IRQ_ROUTING_MSI) continue; + WARN_ON_ONCE(new && memcmp(e, new, sizeof(*new))); + /** * Here, we setup with legacy mode in the following cases: * 1. When cannot target interrupt to a specific vcpu. diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index e6f3c6a153a0..b35fce30d923 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -736,8 +736,9 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void avic_vcpu_put(struct kvm_vcpu *vcpu); void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu); void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); -int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set); +int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, + unsigned int host_irq, uint32_t guest_irq, + struct kvm_kernel_irq_routing_entry *new); void avic_vcpu_blocking(struct kvm_vcpu *vcpu); void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); void avic_ring_doorbell(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 5c615e5845bf..110fb19848ab 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -2,6 +2,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include @@ -294,17 +295,9 @@ void vmx_pi_start_assignment(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK); } -/* - * vmx_pi_update_irte - set IRTE for Posted-Interrupts - * - * @kvm: kvm - * @host_irq: host irq of the interrupt - * @guest_irq: gsi of the interrupt - * @set: set or unset PI - * returns 0 on success, < 0 on failure - */ -int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set) +int vmx_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, + unsigned int host_irq, uint32_t guest_irq, + struct kvm_kernel_irq_routing_entry *new) { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; @@ -312,6 +305,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; + bool set = !!new; int idx, ret = 0; if (!vmx_can_use_vtd_pi(kvm)) @@ -329,6 +323,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { if (e->type != KVM_IRQ_ROUTING_MSI) continue; + + WARN_ON_ONCE(new && memcmp(e, new, sizeof(*new))); + /* * VT-d PI cannot support posting multicast/broadcast * interrupts to a vCPU, we still use interrupt remapping diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h index 80499ea0e674..a94afcb55f7f 100644 --- a/arch/x86/kvm/vmx/posted_intr.h +++ b/arch/x86/kvm/vmx/posted_intr.h @@ -3,6 +3,9 @@ #define __KVM_X86_VMX_POSTED_INTR_H #include +#include +#include + #include void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu); @@ -11,8 +14,9 @@ void pi_wakeup_handler(void); void __init pi_init_cpu(int cpu); void pi_apicv_pre_state_restore(struct kvm_vcpu *vcpu); bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu); -int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set); +int vmx_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, + unsigned int host_irq, uint32_t guest_irq, + struct kvm_kernel_irq_routing_entry *new); void vmx_pi_start_assignment(struct kvm *kvm); static inline int pi_find_highest_vector(struct pi_desc *pi_desc) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02237dbb7f32..e1304b002062 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13507,31 +13507,31 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); struct kvm *kvm = irqfd->kvm; - int ret; + int ret = 0; kvm_arch_start_assignment(irqfd->kvm); spin_lock_irq(&kvm->irqfds.lock); irqfd->producer = prod; - ret = kvm_x86_call(pi_update_irte)(irqfd->kvm, - prod->irq, irqfd->gsi, 1); - if (ret) - kvm_arch_end_assignment(irqfd->kvm); - + if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) { + ret = kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, prod->irq, + irqfd->gsi, &irqfd->irq_entry); + if (ret) + kvm_arch_end_assignment(irqfd->kvm); + } spin_unlock_irq(&kvm->irqfds.lock); - return ret; } void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { - int ret; struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); struct kvm *kvm = irqfd->kvm; + int ret; WARN_ON(irqfd->producer != prod); @@ -13544,11 +13544,13 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, spin_lock_irq(&kvm->irqfds.lock); irqfd->producer = NULL; - ret = kvm_x86_call(pi_update_irte)(irqfd->kvm, - prod->irq, irqfd->gsi, 0); - if (ret) - printk(KERN_INFO "irq bypass consumer (eventfd %p) unregistration" - " fails: %d\n", irqfd->consumer.eventfd, ret); + if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) { + ret = kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, prod->irq, + irqfd->gsi, NULL); + if (ret) + pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n", + irqfd->consumer.eventfd, ret); + } spin_unlock_irq(&kvm->irqfds.lock); @@ -13556,10 +13558,12 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, kvm_arch_end_assignment(irqfd->kvm); } -int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set) +int kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) { - return kvm_x86_call(pi_update_irte)(kvm, host_irq, guest_irq, set); + return kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, irqfd->producer->irq, + irqfd->gsi, new); } bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3b5575d0b574..a4160c1c0c6b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2401,6 +2401,8 @@ struct kvm_vcpu *kvm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) +struct kvm_kernel_irqfd; + bool kvm_arch_has_irq_bypass(void); int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); @@ -2408,8 +2410,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); -int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set); +int kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new); bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, struct kvm_kernel_irq_routing_entry *); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 5bc6abe30748..bd1766da6895 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -285,9 +285,9 @@ void __attribute__((weak)) kvm_arch_irq_bypass_start( { } -int __attribute__((weak)) kvm_arch_update_irqfd_routing( - struct kvm *kvm, unsigned int host_irq, - uint32_t guest_irq, bool set) +int __weak kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) { return 0; } @@ -618,9 +618,8 @@ void kvm_irq_routing_update(struct kvm *kvm) #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) if (irqfd->producer && kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) { - int ret = kvm_arch_update_irqfd_routing( - irqfd->kvm, irqfd->producer->irq, - irqfd->gsi, 1); + int ret = kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry); + WARN_ON(ret); } #endif -- cgit v1.2.3 From 05c5e23657e1d61c271c2f4a3a21d4d630b18a9b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:07 -0700 Subject: KVM: SVM: Track per-vCPU IRTEs using kvm_kernel_irqfd structure Track the IRTEs that are posting to an SVM vCPU via the associated irqfd structure and GSI routing instead of dynamically allocating a separate data structure. In addition to eliminating an atomic allocation, this will allow hoisting much of the IRTE update logic to common x86. Cc: Sairaj Kodilkar Link: https://lore.kernel.org/r/20250611224604.313496-6-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/avic.c | 71 ++++++++++++++++++----------------------------- arch/x86/kvm/svm/svm.h | 10 ++++--- include/linux/kvm_irqfd.h | 3 ++ 3 files changed, 36 insertions(+), 48 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 49b73907de92..accc36958a75 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -76,14 +76,6 @@ static bool next_vm_id_wrapped = 0; static DEFINE_SPINLOCK(svm_vm_data_hash_lock); bool x2avic_enabled; -/* - * This is a wrapper of struct amd_iommu_ir_data. - */ -struct amd_svm_iommu_ir { - struct list_head node; /* Used by SVM for per-vcpu ir_list */ - void *data; /* Storing pointer to struct amd_ir_data */ -}; - static void avic_activate_vmcb(struct vcpu_svm *svm) { struct vmcb *vmcb = svm->vmcb01.ptr; @@ -747,8 +739,8 @@ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) { int ret = 0; unsigned long flags; - struct amd_svm_iommu_ir *ir; struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_kernel_irqfd *irqfd; if (!kvm_arch_has_assigned_device(vcpu->kvm)) return 0; @@ -762,11 +754,11 @@ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) if (list_empty(&svm->ir_list)) goto out; - list_for_each_entry(ir, &svm->ir_list, node) { + list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { if (activate) - ret = amd_iommu_activate_guest_mode(ir->data); + ret = amd_iommu_activate_guest_mode(irqfd->irq_bypass_data); else - ret = amd_iommu_deactivate_guest_mode(ir->data); + ret = amd_iommu_deactivate_guest_mode(irqfd->irq_bypass_data); if (ret) break; } @@ -775,27 +767,30 @@ out: return ret; } -static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) +static void svm_ir_list_del(struct vcpu_svm *svm, + struct kvm_kernel_irqfd *irqfd, + struct amd_iommu_pi_data *pi) { unsigned long flags; - struct amd_svm_iommu_ir *cur; + struct kvm_kernel_irqfd *cur; spin_lock_irqsave(&svm->ir_list_lock, flags); - list_for_each_entry(cur, &svm->ir_list, node) { - if (cur->data != pi->ir_data) + list_for_each_entry(cur, &svm->ir_list, vcpu_list) { + if (cur->irq_bypass_data != pi->ir_data) + continue; + if (WARN_ON_ONCE(cur != irqfd)) continue; - list_del(&cur->node); - kfree(cur); + list_del(&irqfd->vcpu_list); break; } spin_unlock_irqrestore(&svm->ir_list_lock, flags); } -static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) +static int svm_ir_list_add(struct vcpu_svm *svm, + struct kvm_kernel_irqfd *irqfd, + struct amd_iommu_pi_data *pi) { - int ret = 0; unsigned long flags; - struct amd_svm_iommu_ir *ir; u64 entry; if (WARN_ON_ONCE(!pi->ir_data)) @@ -812,25 +807,14 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); struct vcpu_svm *prev_svm; - if (!prev_vcpu) { - ret = -EINVAL; - goto out; - } + if (!prev_vcpu) + return -EINVAL; prev_svm = to_svm(prev_vcpu); - svm_ir_list_del(prev_svm, pi); + svm_ir_list_del(prev_svm, irqfd, pi); } - /** - * Allocating new amd_iommu_pi_data, which will get - * add to the per-vcpu ir_list. - */ - ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT); - if (!ir) { - ret = -ENOMEM; - goto out; - } - ir->data = pi->ir_data; + irqfd->irq_bypass_data = pi->ir_data; spin_lock_irqsave(&svm->ir_list_lock, flags); @@ -845,10 +829,9 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK, true, pi->ir_data); - list_add(&ir->node, &svm->ir_list); + list_add(&irqfd->vcpu_list, &svm->ir_list); spin_unlock_irqrestore(&svm->ir_list_lock, flags); -out: - return ret; + return 0; } /* @@ -952,7 +935,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, * scheduling information in IOMMU irte. */ if (!ret && pi.is_guest_mode) - svm_ir_list_add(svm, &pi); + svm_ir_list_add(svm, irqfd, &pi); } if (!ret && svm) { @@ -993,7 +976,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, vcpu = kvm_get_vcpu_by_id(kvm, id); if (vcpu) - svm_ir_list_del(to_svm(vcpu), &pi); + svm_ir_list_del(to_svm(vcpu), irqfd, &pi); } } out: @@ -1005,8 +988,8 @@ static inline int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) { int ret = 0; - struct amd_svm_iommu_ir *ir; struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_kernel_irqfd *irqfd; lockdep_assert_held(&svm->ir_list_lock); @@ -1020,8 +1003,8 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) if (list_empty(&svm->ir_list)) return 0; - list_for_each_entry(ir, &svm->ir_list, node) { - ret = amd_iommu_update_ga(cpu, r, ir->data); + list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { + ret = amd_iommu_update_ga(cpu, r, irqfd->irq_bypass_data); if (ret) return ret; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index b35fce30d923..cc27877d69ae 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -310,10 +310,12 @@ struct vcpu_svm { u64 *avic_physical_id_cache; /* - * Per-vcpu list of struct amd_svm_iommu_ir: - * This is used mainly to store interrupt remapping information used - * when update the vcpu affinity. This avoids the need to scan for - * IRTE and try to match ga_tag in the IOMMU driver. + * Per-vCPU list of irqfds that are eligible to post IRQs directly to + * the vCPU (a.k.a. device posted IRQs, a.k.a. IRQ bypass). The list + * is used to reconfigure IRTEs when the vCPU is loaded/put (to set the + * target pCPU), when AVIC is toggled on/off (to (de)activate bypass), + * and if the irqfd becomes ineligible for posting (to put the IRTE + * back into remapped mode). */ struct list_head ir_list; spinlock_t ir_list_lock; diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index 8ad43692e3bb..6510a48e62aa 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -59,6 +59,9 @@ struct kvm_kernel_irqfd { struct work_struct shutdown; struct irq_bypass_consumer consumer; struct irq_bypass_producer *producer; + + struct list_head vcpu_list; + void *irq_bypass_data; }; #endif /* __LINUX_KVM_IRQFD_H */ -- cgit v1.2.3 From 0a917e9d4b7070fafcbf7a8ec32d2aa444b4e757 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:08 -0700 Subject: KVM: SVM: Delete IRTE link from previous vCPU before setting new IRTE Delete the previous per-vCPU IRTE link prior to modifying the IRTE. If forcing the IRTE back to remapped mode fails, the IRQ is already broken; keeping stale metadata won't change that, and the IOMMU should be sufficiently paranoid to sanitize the IRTE when the IRQ is freed and reallocated. This will allow hoisting the vCPU tracking to common x86, which in turn will allow most of the IRTE update code to be deduplicated. Tested-by: Sairaj Kodilkar Link: https://lore.kernel.org/r/20250611224604.313496-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/avic.c | 60 ++++++++++------------------------------------- include/linux/kvm_irqfd.h | 1 + 2 files changed, 14 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index accc36958a75..7f7af8ff627d 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -767,23 +767,19 @@ out: return ret; } -static void svm_ir_list_del(struct vcpu_svm *svm, - struct kvm_kernel_irqfd *irqfd, - struct amd_iommu_pi_data *pi) +static void svm_ir_list_del(struct kvm_kernel_irqfd *irqfd) { + struct kvm_vcpu *vcpu = irqfd->irq_bypass_vcpu; unsigned long flags; - struct kvm_kernel_irqfd *cur; - spin_lock_irqsave(&svm->ir_list_lock, flags); - list_for_each_entry(cur, &svm->ir_list, vcpu_list) { - if (cur->irq_bypass_data != pi->ir_data) - continue; - if (WARN_ON_ONCE(cur != irqfd)) - continue; - list_del(&irqfd->vcpu_list); - break; - } - spin_unlock_irqrestore(&svm->ir_list_lock, flags); + if (!vcpu) + return; + + spin_lock_irqsave(&to_svm(vcpu)->ir_list_lock, flags); + list_del(&irqfd->vcpu_list); + spin_unlock_irqrestore(&to_svm(vcpu)->ir_list_lock, flags); + + irqfd->irq_bypass_vcpu = NULL; } static int svm_ir_list_add(struct vcpu_svm *svm, @@ -796,24 +792,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, if (WARN_ON_ONCE(!pi->ir_data)) return -EINVAL; - /** - * In some cases, the existing irte is updated and re-set, - * so we need to check here if it's already been * added - * to the ir_list. - */ - if (pi->prev_ga_tag) { - struct kvm *kvm = svm->vcpu.kvm; - u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag); - struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); - struct vcpu_svm *prev_svm; - - if (!prev_vcpu) - return -EINVAL; - - prev_svm = to_svm(prev_vcpu); - svm_ir_list_del(prev_svm, irqfd, pi); - } - + irqfd->irq_bypass_vcpu = &svm->vcpu; irqfd->irq_bypass_data = pi->ir_data; spin_lock_irqsave(&svm->ir_list_lock, flags); @@ -905,6 +884,8 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, WARN_ON_ONCE(new && memcmp(e, new, sizeof(*new))); + svm_ir_list_del(irqfd); + /** * Here, we setup with legacy mode in the following cases: * 1. When cannot target interrupt to a specific vcpu. @@ -963,21 +944,6 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, pi.prev_ga_tag = 0; pi.is_guest_mode = false; ret = irq_set_vcpu_affinity(host_irq, &pi); - - /** - * Check if the posted interrupt was previously - * setup with the guest_mode by checking if the ga_tag - * was cached. If so, we need to clean up the per-vcpu - * ir_list. - */ - if (!ret && pi.prev_ga_tag) { - int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag); - struct kvm_vcpu *vcpu; - - vcpu = kvm_get_vcpu_by_id(kvm, id); - if (vcpu) - svm_ir_list_del(to_svm(vcpu), irqfd, &pi); - } } out: srcu_read_unlock(&kvm->irq_srcu, idx); diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index 6510a48e62aa..361c07f4466d 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -60,6 +60,7 @@ struct kvm_kernel_irqfd { struct irq_bypass_consumer consumer; struct irq_bypass_producer *producer; + struct kvm_vcpu *irq_bypass_vcpu; struct list_head vcpu_list; void *irq_bypass_data; }; -- cgit v1.2.3 From 1da19c5ce0533796179d9e1b55e64bf78478c4c1 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:09 -0700 Subject: iommu/amd: KVM: SVM: Delete now-unused cached/previous GA tag fields Delete the amd_ir_data.prev_ga_tag field now that all usage is superfluous. Reviewed-by: Vasant Hegde Tested-by: Sairaj Kodilkar Link: https://lore.kernel.org/r/20250611224604.313496-8-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/avic.c | 2 -- drivers/iommu/amd/amd_iommu_types.h | 1 - drivers/iommu/amd/iommu.c | 10 ---------- include/linux/amd-iommu.h | 2 +- 4 files changed, 1 insertion(+), 14 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 7f7af8ff627d..38cdfb052a3a 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -939,9 +939,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, /** * Here, pi is used to: * - Tell IOMMU to use legacy mode for this interrupt. - * - Retrieve ga_tag of prior interrupt remapping data. */ - pi.prev_ga_tag = 0; pi.is_guest_mode = false; ret = irq_set_vcpu_affinity(host_irq, &pi); } diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index ccbab3a4811a..053a0f05768c 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -1054,7 +1054,6 @@ struct irq_2_irte { }; struct amd_ir_data { - u32 cached_ga_tag; struct amd_iommu *iommu; struct irq_2_irte irq_2_irte; struct msi_msg msi_entry; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 3117d99cf83d..ed96050d4933 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3887,23 +3887,13 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) ir_data->cfg = irqd_cfg(data); pi_data->ir_data = ir_data; - pi_data->prev_ga_tag = ir_data->cached_ga_tag; if (pi_data->is_guest_mode) { ir_data->ga_root_ptr = (pi_data->base >> 12); ir_data->ga_vector = vcpu_pi_info->vector; ir_data->ga_tag = pi_data->ga_tag; ret = amd_iommu_activate_guest_mode(ir_data); - if (!ret) - ir_data->cached_ga_tag = pi_data->ga_tag; } else { ret = amd_iommu_deactivate_guest_mode(ir_data); - - /* - * This communicates the ga_tag back to the caller - * so that it can do all the necessary clean up. - */ - if (!ret) - ir_data->cached_ga_tag = 0; } return ret; diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 062fbd4c9b77..1f9b13d803c5 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -19,8 +19,8 @@ struct amd_iommu; */ struct amd_iommu_pi_data { u32 ga_tag; - u32 prev_ga_tag; u64 base; + bool is_guest_mode; struct vcpu_data *vcpu_data; void *ir_data; -- cgit v1.2.3 From c4cdbaf9d81c8387da8b9b91567d4b0eb1b8a549 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:24 -0700 Subject: iommu/amd: KVM: SVM: Use pi_desc_addr to derive ga_root_ptr Use vcpu_data.pi_desc_addr instead of amd_iommu_pi_data.base to get the GA root pointer. KVM is the only source of amd_iommu_pi_data.base, and KVM's one and only path for writing amd_iommu_pi_data.base computes the exact same value for vcpu_data.pi_desc_addr and amd_iommu_pi_data.base, and fills amd_iommu_pi_data.base if and only if vcpu_data.pi_desc_addr is valid, i.e. amd_iommu_pi_data.base is fully redundant. Cc: Maxim Levitsky Reviewed-by: Joao Martins Reviewed-by: Vasant Hegde Tested-by: Sairaj Kodilkar Link: https://lore.kernel.org/r/20250611224604.313496-23-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/avic.c | 7 +++++-- drivers/iommu/amd/iommu.c | 2 +- include/linux/amd-iommu.h | 2 -- 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 375a29022000..d95742faae11 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -894,8 +894,11 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, enable_remapped_mode = false; - /* Try to enable guest_mode in IRTE */ - pi.base = avic_get_backing_page_address(svm); + /* + * Try to enable guest_mode in IRTE. Note, the address + * of the vCPU's AVIC backing page is passed to the + * IOMMU via vcpu_info->pi_desc_addr. + */ pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id, svm->vcpu.vcpu_id); pi.is_guest_mode = true; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index ed96050d4933..bdfb57af7111 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3888,7 +3888,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) pi_data->ir_data = ir_data; if (pi_data->is_guest_mode) { - ir_data->ga_root_ptr = (pi_data->base >> 12); + ir_data->ga_root_ptr = (vcpu_pi_info->pi_desc_addr >> 12); ir_data->ga_vector = vcpu_pi_info->vector; ir_data->ga_tag = pi_data->ga_tag; ret = amd_iommu_activate_guest_mode(ir_data); diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 1f9b13d803c5..deeefc92a5cf 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -19,8 +19,6 @@ struct amd_iommu; */ struct amd_iommu_pi_data { u32 ga_tag; - u64 base; - bool is_guest_mode; struct vcpu_data *vcpu_data; void *ir_data; -- cgit v1.2.3 From 53527ea1b70224d16d29edbd5c850456469f00ec Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:34 -0700 Subject: iommu: KVM: Split "struct vcpu_data" into separate AMD vs. Intel structs Split the vcpu_data structure that serves as a handoff from KVM to IOMMU drivers into vendor specific structures. Overloading a single structure makes the code hard to read and maintain, is *very* misleading as it suggests that mixing vendors is actually supported, and bastardizing Intel's posted interrupt descriptor address when AMD's IOMMU already has its own structure is quite unnecessary. Tested-by: Sairaj Kodilkar Link: https://lore.kernel.org/r/20250611224604.313496-33-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/irq_remapping.h | 15 ++++++++++++++- arch/x86/kvm/svm/avic.c | 21 ++++++++------------- arch/x86/kvm/vmx/posted_intr.c | 4 ++-- drivers/iommu/amd/iommu.c | 12 ++++-------- drivers/iommu/intel/irq_remapping.c | 10 +++++----- include/linux/amd-iommu.h | 12 ------------ 6 files changed, 33 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 5036f13ab69f..2dbc9cb61c2f 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -26,7 +26,20 @@ enum { IRQ_REMAP_X2APIC_MODE, }; -struct vcpu_data { +/* + * This is mainly used to communicate information back-and-forth + * between SVM and IOMMU for setting up and tearing down posted + * interrupt + */ +struct amd_iommu_pi_data { + u64 vapic_addr; /* Physical address of the vCPU's vAPIC. */ + u32 ga_tag; + u32 vector; /* Guest vector of the interrupt */ + bool is_guest_mode; + void *ir_data; +}; + +struct intel_iommu_pi_data { u64 pi_desc_addr; /* Physical address of PI Descriptor */ u32 vector; /* Guest vector of the interrupt */ }; diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index eed5c58ac07f..dc1526fef18d 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -823,23 +823,18 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, */ if (vcpu && kvm_vcpu_apicv_active(vcpu)) { /* - * Try to enable guest_mode in IRTE. Note, the address - * of the vCPU's AVIC backing page is passed to the - * IOMMU via vcpu_info->pi_desc_addr. + * Try to enable guest_mode in IRTE. */ - struct vcpu_data vcpu_info = { - .pi_desc_addr = avic_get_backing_page_address(to_svm(vcpu)), - .vector = vector, - }; - - struct amd_iommu_pi_data pi = { - .ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id, vcpu->vcpu_id), + struct amd_iommu_pi_data pi_data = { + .ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id, + vcpu->vcpu_id), .is_guest_mode = true, - .vcpu_data = &vcpu_info, + .vapic_addr = avic_get_backing_page_address(to_svm(vcpu)), + .vector = vector, }; int ret; - ret = irq_set_vcpu_affinity(host_irq, &pi); + ret = irq_set_vcpu_affinity(host_irq, &pi_data); if (ret) return ret; @@ -850,7 +845,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, * we can reference to them directly when we update vcpu * scheduling information in IOMMU irte. */ - return svm_ir_list_add(to_svm(vcpu), irqfd, &pi); + return svm_ir_list_add(to_svm(vcpu), irqfd, &pi_data); } return irq_set_vcpu_affinity(host_irq, NULL); } diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 687ffde3b61c..3a23c30f73cb 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -303,12 +303,12 @@ int vmx_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, struct kvm_vcpu *vcpu, u32 vector) { if (vcpu) { - struct vcpu_data vcpu_info = { + struct intel_iommu_pi_data pi_data = { .pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)), .vector = vector, }; - return irq_set_vcpu_affinity(host_irq, &vcpu_info); + return irq_set_vcpu_affinity(host_irq, &pi_data); } else { return irq_set_vcpu_affinity(host_irq, NULL); } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index e8e259c07265..8366d32252cd 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3860,10 +3860,10 @@ int amd_iommu_deactivate_guest_mode(void *data) } EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); -static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) +static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info) { int ret; - struct amd_iommu_pi_data *pi_data = vcpu_info; + struct amd_iommu_pi_data *pi_data = info; struct amd_ir_data *ir_data = data->chip_data; struct irq_2_irte *irte_info = &ir_data->irq_2_irte; struct iommu_dev_data *dev_data; @@ -3886,14 +3886,10 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) ir_data->cfg = irqd_cfg(data); if (pi_data) { - struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; - pi_data->ir_data = ir_data; - WARN_ON_ONCE(!pi_data->is_guest_mode); - - ir_data->ga_root_ptr = (vcpu_pi_info->pi_desc_addr >> 12); - ir_data->ga_vector = vcpu_pi_info->vector; + ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12); + ir_data->ga_vector = pi_data->vector; ir_data->ga_tag = pi_data->ga_tag; ret = amd_iommu_activate_guest_mode(ir_data); } else { diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index cf7b6882ec75..2fc451253dc3 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -1244,10 +1244,10 @@ static void intel_ir_compose_msi_msg(struct irq_data *irq_data, static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info) { struct intel_ir_data *ir_data = data->chip_data; - struct vcpu_data *vcpu_pi_info = info; + struct intel_iommu_pi_data *pi_data = info; /* stop posting interrupts, back to the default mode */ - if (!vcpu_pi_info) { + if (!pi_data) { __intel_ir_reconfigure_irte(data, true); } else { struct irte irte_pi; @@ -1265,10 +1265,10 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info) /* Update the posted mode fields */ irte_pi.p_pst = 1; irte_pi.p_urgent = 0; - irte_pi.p_vector = vcpu_pi_info->vector; - irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >> + irte_pi.p_vector = pi_data->vector; + irte_pi.pda_l = (pi_data->pi_desc_addr >> (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); - irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) & + irte_pi.pda_h = (pi_data->pi_desc_addr >> 32) & ~(-1UL << PDA_HIGH_BIT); ir_data->irq_2_iommu.posted_vcpu = true; diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index deeefc92a5cf..99b4fa9a0296 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -12,18 +12,6 @@ struct amd_iommu; -/* - * This is mainly used to communicate information back-and-forth - * between SVM and IOMMU for setting up and tearing down posted - * interrupt - */ -struct amd_iommu_pi_data { - u32 ga_tag; - bool is_guest_mode; - struct vcpu_data *vcpu_data; - void *ir_data; -}; - #ifdef CONFIG_AMD_IOMMU struct task_struct; -- cgit v1.2.3 From b33252b9d17238a4a9fa5a29af6e6a2922a6c2b0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:35 -0700 Subject: KVM: Don't WARN if updating IRQ bypass route fails Don't bother WARNing if updating an IRTE route fails now that vendor code provides much more precise WARNs. The generic WARN doesn't provide enough information to actually debug the problem, and has obviously done nothing to surface the myriad bugs in KVM x86's implementation. Drop all of the associated return code plumbing that existed just so that common KVM could WARN. Link: https://lore.kernel.org/r/20250611224604.313496-34-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/arm64/kvm/arm.c | 6 +++--- arch/arm64/kvm/vgic/vgic-v4.c | 8 +++----- arch/x86/kvm/irq.c | 8 ++++---- include/kvm/arm_vgic.h | 2 +- include/linux/kvm_host.h | 6 +++--- virt/kvm/eventfd.c | 15 ++++++--------- 6 files changed, 20 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a9a39e0375f7..94fb2f096a20 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2771,9 +2771,9 @@ bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, return memcmp(&old->msi, &new->msi, sizeof(new->msi)); } -int kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, - struct kvm_kernel_irq_routing_entry *old, - struct kvm_kernel_irq_routing_entry *new) +void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) { /* * Remapping the vLPI requires taking the its_lock mutex to resolve diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 911170d4a9c8..ef3481963122 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -527,18 +527,17 @@ static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq) return NULL; } -int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) +void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) { struct vgic_irq *irq; unsigned long flags; - int ret = 0; if (!vgic_supports_direct_msis(kvm)) - return 0; + return; irq = __vgic_host_irq_get_vlpi(kvm, host_irq); if (!irq) - return 0; + return; raw_spin_lock_irqsave(&irq->irq_lock, flags); WARN_ON(irq->hw && irq->host_irq != host_irq); @@ -550,5 +549,4 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(kvm, irq); - return 0; } diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index cd9b56c6a5c3..b7b0fbc218b8 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -606,11 +606,11 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, kvm_arch_end_assignment(irqfd->kvm); } -int kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, - struct kvm_kernel_irq_routing_entry *old, - struct kvm_kernel_irq_routing_entry *new) +void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) { - return kvm_pi_update_irte(irqfd, new); + kvm_pi_update_irte(irqfd, new); } bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4a34f7f0a864..b2a04481de1a 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -434,7 +434,7 @@ struct kvm_kernel_irq_routing_entry; int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq, struct kvm_kernel_irq_routing_entry *irq_entry); -int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq); +void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq); int vgic_v4_load(struct kvm_vcpu *vcpu); void vgic_v4_commit(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a4160c1c0c6b..8e74ac0f90b1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2410,9 +2410,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, struct irq_bypass_producer *); void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); -int kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, - struct kvm_kernel_irq_routing_entry *old, - struct kvm_kernel_irq_routing_entry *new); +void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new); bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, struct kvm_kernel_irq_routing_entry *); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index bd1766da6895..719b242fc935 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -285,11 +285,11 @@ void __attribute__((weak)) kvm_arch_irq_bypass_start( { } -int __weak kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, - struct kvm_kernel_irq_routing_entry *old, - struct kvm_kernel_irq_routing_entry *new) +void __weak kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, + struct kvm_kernel_irq_routing_entry *old, + struct kvm_kernel_irq_routing_entry *new) { - return 0; + } bool __attribute__((weak)) kvm_arch_irqfd_route_changed( @@ -617,11 +617,8 @@ void kvm_irq_routing_update(struct kvm *kvm) #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) if (irqfd->producer && - kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) { - int ret = kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry); - - WARN_ON(ret); - } + kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) + kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry); #endif } -- cgit v1.2.3 From 77bb184ab880171a1cedfbed9ab05977e6ae2258 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:36 -0700 Subject: KVM: Fold kvm_arch_irqfd_route_changed() into kvm_arch_update_irqfd_routing() Fold kvm_arch_irqfd_route_changed() into kvm_arch_update_irqfd_routing(). Calling arch code to know whether or not to call arch code is absurd. Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20250611224604.313496-35-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/arm64/kvm/arm.c | 15 +++++---------- arch/x86/kvm/irq.c | 15 +++++---------- include/linux/kvm_host.h | 2 -- virt/kvm/eventfd.c | 10 +--------- 4 files changed, 11 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 94fb2f096a20..04f2116927b1 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2761,20 +2761,15 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq); } -bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, - struct kvm_kernel_irq_routing_entry *new) -{ - if (old->type != KVM_IRQ_ROUTING_MSI || - new->type != KVM_IRQ_ROUTING_MSI) - return true; - - return memcmp(&old->msi, &new->msi, sizeof(new->msi)); -} - void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, struct kvm_kernel_irq_routing_entry *old, struct kvm_kernel_irq_routing_entry *new) { + if (old->type == KVM_IRQ_ROUTING_MSI && + new->type == KVM_IRQ_ROUTING_MSI && + !memcmp(&old->msi, &new->msi, sizeof(new->msi))) + return; + /* * Remapping the vLPI requires taking the its_lock mutex to resolve * the new translation. We're in spinlock land at this point, so no diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index b7b0fbc218b8..23e0acc07cb6 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -610,17 +610,12 @@ void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, struct kvm_kernel_irq_routing_entry *old, struct kvm_kernel_irq_routing_entry *new) { - kvm_pi_update_irte(irqfd, new); -} - -bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, - struct kvm_kernel_irq_routing_entry *new) -{ - if (old->type != KVM_IRQ_ROUTING_MSI || - new->type != KVM_IRQ_ROUTING_MSI) - return true; + if (old->type == KVM_IRQ_ROUTING_MSI && + new->type == KVM_IRQ_ROUTING_MSI && + !memcmp(&old->msi, &new->msi, sizeof(new->msi))) + return; - return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); + kvm_pi_update_irte(irqfd, new); } #ifdef CONFIG_KVM_IOAPIC diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8e74ac0f90b1..fb9ec06aa807 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2413,8 +2413,6 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, struct kvm_kernel_irq_routing_entry *old, struct kvm_kernel_irq_routing_entry *new); -bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, - struct kvm_kernel_irq_routing_entry *); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 719b242fc935..59b1e64697f1 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -291,13 +291,6 @@ void __weak kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, { } - -bool __attribute__((weak)) kvm_arch_irqfd_route_changed( - struct kvm_kernel_irq_routing_entry *old, - struct kvm_kernel_irq_routing_entry *new) -{ - return true; -} #endif static int @@ -616,8 +609,7 @@ void kvm_irq_routing_update(struct kvm *kvm) irqfd_update(kvm, irqfd); #if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) - if (irqfd->producer && - kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) + if (irqfd->producer) kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry); #endif } -- cgit v1.2.3 From 08d9ccdd1a5c75d7aca7ac3af56f723d780dd6ac Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:43 -0700 Subject: iommu/amd: KVM: SVM: Infer IsRun from validity of pCPU destination Infer whether or not a vCPU should be marked running from the validity of the pCPU on which it is running. amd_iommu_update_ga() already skips the IRTE update if the pCPU is invalid, i.e. passing %true for is_run with an invalid pCPU would be a blatant and egregrious KVM bug. Tested-by: Sairaj Kodilkar Link: https://lore.kernel.org/r/20250611224604.313496-42-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/avic.c | 11 +++++------ drivers/iommu/amd/iommu.c | 14 +++++++++----- include/linux/amd-iommu.h | 6 ++---- 3 files changed, 16 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 42fd1868c32f..1960bb06c4b9 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -833,7 +833,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, entry = svm->avic_physical_id_entry; if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK, - true, pi_data.ir_data); + pi_data.ir_data); irqfd->irq_bypass_data = pi_data.ir_data; list_add(&irqfd->vcpu_list, &svm->ir_list); @@ -842,8 +842,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, return irq_set_vcpu_affinity(host_irq, NULL); } -static inline int -avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) +static inline int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu) { int ret = 0; struct vcpu_svm *svm = to_svm(vcpu); @@ -862,7 +861,7 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) return 0; list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { - ret = amd_iommu_update_ga(cpu, r, irqfd->irq_bypass_data); + ret = amd_iommu_update_ga(cpu, irqfd->irq_bypass_data); if (ret) return ret; } @@ -924,7 +923,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry); - avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true); + avic_update_iommu_vcpu_affinity(vcpu, h_physical_id); spin_unlock_irqrestore(&svm->ir_list_lock, flags); } @@ -964,7 +963,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) */ spin_lock_irqsave(&svm->ir_list_lock, flags); - avic_update_iommu_vcpu_affinity(vcpu, -1, 0); + avic_update_iommu_vcpu_affinity(vcpu, -1); entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; svm->avic_physical_id_entry = entry; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 0a0d73483195..3c4c81eb201b 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3990,15 +3990,17 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) * Update the pCPU information for an IRTE that is configured to post IRQs to * a vCPU, without issuing an IOMMU invalidation for the IRTE. * - * This API is intended to be used when a vCPU is scheduled in/out (or stops - * running for any reason), to do a fast update of IsRun and (conditionally) - * Destination. + * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination + * with the pCPU's APIC ID and set IsRun, else clear IsRun. I.e. treat vCPUs + * that are associated with a pCPU as running. This API is intended to be used + * when a vCPU is scheduled in/out (or stops running for any reason), to do a + * fast update of IsRun and (conditionally) Destination. * * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached * and thus don't require an invalidation to ensure the IOMMU consumes fresh * information. */ -int amd_iommu_update_ga(int cpu, bool is_run, void *data) +int amd_iommu_update_ga(int cpu, void *data) { struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; @@ -4015,8 +4017,10 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data) APICID_TO_IRTE_DEST_LO(cpu); entry->hi.fields.destination = APICID_TO_IRTE_DEST_HI(cpu); + entry->lo.fields_vapic.is_run = true; + } else { + entry->lo.fields_vapic.is_run = false; } - entry->lo.fields_vapic.is_run = is_run; return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, ir_data->irq_2_irte.index, entry); diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 99b4fa9a0296..fe0e16ffe0e5 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -30,8 +30,7 @@ static inline void amd_iommu_detect(void) { } /* IOMMU AVIC Function */ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); -extern int -amd_iommu_update_ga(int cpu, bool is_run, void *data); +extern int amd_iommu_update_ga(int cpu, void *data); extern int amd_iommu_activate_guest_mode(void *data); extern int amd_iommu_deactivate_guest_mode(void *data); @@ -44,8 +43,7 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) return 0; } -static inline int -amd_iommu_update_ga(int cpu, bool is_run, void *data) +static inline int amd_iommu_update_ga(int cpu, void *data) { return 0; } -- cgit v1.2.3 From f965255dc5033387ac7858787c6c792fd789ac34 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:45:45 -0700 Subject: iommu/amd: KVM: SVM: Set pCPU info in IRTE when setting vCPU affinity Now that setting vCPU affinity is guarded with ir_list_lock, i.e. now that avic_physical_id_entry can be safely accessed, set the pCPU info straight-away when setting vCPU affinity. Putting the IRTE into posted mode, and then immediately updating the IRTE a second time if the target vCPU is running is wasteful and confusing. This also fixes a flaw where a posted IRQ that arrives between putting the IRTE into guest_mode and setting the correct destination could cause the IOMMU to ring the doorbell on the wrong pCPU. Link: https://lore.kernel.org/r/20250611224604.313496-44-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/irq_remapping.h | 1 + arch/x86/kvm/svm/avic.c | 26 ++++++++++++++------------ drivers/iommu/amd/iommu.c | 6 ++++-- include/linux/amd-iommu.h | 4 ++-- 4 files changed, 21 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 2dbc9cb61c2f..4c75a17632f6 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -35,6 +35,7 @@ struct amd_iommu_pi_data { u64 vapic_addr; /* Physical address of the vCPU's vAPIC. */ u32 ga_tag; u32 vector; /* Guest vector of the interrupt */ + int cpu; bool is_guest_mode; void *ir_data; }; diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 1960bb06c4b9..35c5fb831c28 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -727,6 +727,7 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu) static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) { + int apic_id = kvm_cpu_get_apicid(vcpu->cpu); int ret = 0; unsigned long flags; struct vcpu_svm *svm = to_svm(vcpu); @@ -746,7 +747,7 @@ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate) list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) { if (activate) - ret = amd_iommu_activate_guest_mode(irqfd->irq_bypass_data); + ret = amd_iommu_activate_guest_mode(irqfd->irq_bypass_data, apic_id); else ret = amd_iommu_deactivate_guest_mode(irqfd->irq_bypass_data); if (ret) @@ -810,6 +811,18 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, */ guard(spinlock_irqsave)(&svm->ir_list_lock); + /* + * Update the target pCPU for IOMMU doorbells if the vCPU is + * running. If the vCPU is NOT running, i.e. is blocking or + * scheduled out, KVM will update the pCPU info when the vCPU + * is awakened and/or scheduled in. See also avic_vcpu_load(). + */ + entry = svm->avic_physical_id_entry; + if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) + pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK; + else + pi_data.cpu = -1; + ret = irq_set_vcpu_affinity(host_irq, &pi_data); if (ret) return ret; @@ -824,17 +837,6 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, return -EIO; } - /* - * Update the target pCPU for IOMMU doorbells if the vCPU is - * running. If the vCPU is NOT running, i.e. is blocking or - * scheduled out, KVM will update the pCPU info when the vCPU - * is awakened and/or scheduled in. See also avic_vcpu_load(). - */ - entry = svm->avic_physical_id_entry; - if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) - amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK, - pi_data.ir_data); - irqfd->irq_bypass_data = pi_data.ir_data; list_add(&irqfd->vcpu_list, &svm->ir_list); return 0; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 2eaba64be68a..6352930ad011 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3850,7 +3850,7 @@ int amd_iommu_update_ga(int cpu, void *data) } EXPORT_SYMBOL(amd_iommu_update_ga); -int amd_iommu_activate_guest_mode(void *data) +int amd_iommu_activate_guest_mode(void *data, int cpu) { struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; @@ -3871,6 +3871,8 @@ int amd_iommu_activate_guest_mode(void *data) entry->hi.fields.vector = ir_data->ga_vector; entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; + __amd_iommu_update_ga(entry, cpu); + return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, ir_data->irq_2_irte.index, entry); } @@ -3937,7 +3939,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info) ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12); ir_data->ga_vector = pi_data->vector; ir_data->ga_tag = pi_data->ga_tag; - ret = amd_iommu_activate_guest_mode(ir_data); + ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu); } else { ret = amd_iommu_deactivate_guest_mode(ir_data); } diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index fe0e16ffe0e5..c9f2df0c4596 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -32,7 +32,7 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); extern int amd_iommu_update_ga(int cpu, void *data); -extern int amd_iommu_activate_guest_mode(void *data); +extern int amd_iommu_activate_guest_mode(void *data, int cpu); extern int amd_iommu_deactivate_guest_mode(void *data); #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ @@ -48,7 +48,7 @@ static inline int amd_iommu_update_ga(int cpu, void *data) return 0; } -static inline int amd_iommu_activate_guest_mode(void *data) +static inline int amd_iommu_activate_guest_mode(void *data, int cpu) { return 0; } -- cgit v1.2.3 From b9e53f9ff4a88f01b22524878c9a381a6c5f65ff Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 11 Jun 2025 15:46:03 -0700 Subject: iommu/amd: KVM: SVM: Allow KVM to control need for GA log interrupts Add plumbing to the AMD IOMMU driver to allow KVM to control whether or not an IRTE is configured to generate GA log interrupts. KVM only needs a notification if the target vCPU is blocking, so the vCPU can be awakened. If a vCPU is preempted or exits to userspace, KVM clears is_run, but will set the vCPU back to running when userspace does KVM_RUN and/or the vCPU task is scheduled back in, i.e. KVM doesn't need a notification. Unconditionally pass "true" in all KVM paths to isolate the IOMMU changes from the KVM changes insofar as possible. Opportunistically swap the ordering of parameters for amd_iommu_update_ga() so that the match amd_iommu_activate_guest_mode(). Note, as of this writing, the AMD IOMMU manual doesn't list GALogIntr as a non-cached field, but per AMD hardware architects, it's not cached and can be safely updated without an invalidation. Link: https://lore.kernel.org/all/b29b8c22-2fd4-4b5e-b755-9198874157c7@amd.com Cc: Vasant Hegde Cc: Joao Martins Link: https://lore.kernel.org/r/20250611224604.313496-62-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/irq_remapping.h | 1 + arch/x86/kvm/svm/avic.c | 10 ++++++---- drivers/iommu/amd/iommu.c | 28 +++++++++++++++++----------- include/linux/amd-iommu.h | 9 ++++----- 4 files changed, 28 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 4c75a17632f6..5a0d42464d44 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -36,6 +36,7 @@ struct amd_iommu_pi_data { u32 ga_tag; u32 vector; /* Guest vector of the interrupt */ int cpu; + bool ga_log_intr; bool is_guest_mode; void *ir_data; }; diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 5803f778999f..02f266901bfe 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -785,10 +785,12 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm, * is awakened and/or scheduled in. See also avic_vcpu_load(). */ entry = svm->avic_physical_id_entry; - if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) + if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK) { pi_data.cpu = entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK; - else + } else { pi_data.cpu = -1; + pi_data.ga_log_intr = true; + } ret = irq_set_vcpu_affinity(host_irq, &pi_data); if (ret) @@ -850,9 +852,9 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, void *data = irqfd->irq_bypass_data; if (!(action & AVIC_TOGGLE_ON_OFF)) - WARN_ON_ONCE(amd_iommu_update_ga(cpu, data)); + WARN_ON_ONCE(amd_iommu_update_ga(data, cpu, true)); else if (cpu >= 0) - WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu)); + WARN_ON_ONCE(amd_iommu_activate_guest_mode(data, cpu, true)); else WARN_ON_ONCE(amd_iommu_deactivate_guest_mode(data)); } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4b0cc249771f..c50d4a8a51be 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3804,7 +3804,8 @@ static const struct irq_domain_ops amd_ir_domain_ops = { .deactivate = irq_remapping_deactivate, }; -static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu) +static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu, + bool ga_log_intr) { if (cpu >= 0) { entry->lo.fields_vapic.destination = @@ -3812,8 +3813,10 @@ static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu) entry->hi.fields.destination = APICID_TO_IRTE_DEST_HI(cpu); entry->lo.fields_vapic.is_run = true; + entry->lo.fields_vapic.ga_log_intr = false; } else { entry->lo.fields_vapic.is_run = false; + entry->lo.fields_vapic.ga_log_intr = ga_log_intr; } } @@ -3822,16 +3825,19 @@ static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu) * a vCPU, without issuing an IOMMU invalidation for the IRTE. * * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination - * with the pCPU's APIC ID and set IsRun, else clear IsRun. I.e. treat vCPUs - * that are associated with a pCPU as running. This API is intended to be used - * when a vCPU is scheduled in/out (or stops running for any reason), to do a - * fast update of IsRun and (conditionally) Destination. + * with the pCPU's APIC ID, set IsRun, and clear GALogIntr. If the vCPU isn't + * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based + * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is + * blocking and requires a notification wake event). I.e. treat vCPUs that are + * associated with a pCPU as running. This API is intended to be used when a + * vCPU is scheduled in/out (or stops running for any reason), to do a fast + * update of IsRun, GALogIntr, and (conditionally) Destination. * * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached * and thus don't require an invalidation to ensure the IOMMU consumes fresh * information. */ -int amd_iommu_update_ga(int cpu, void *data) +int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr) { struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; @@ -3845,14 +3851,14 @@ int amd_iommu_update_ga(int cpu, void *data) if (!ir_data->iommu) return -ENODEV; - __amd_iommu_update_ga(entry, cpu); + __amd_iommu_update_ga(entry, cpu, ga_log_intr); return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, ir_data->irq_2_irte.index, entry); } EXPORT_SYMBOL(amd_iommu_update_ga); -int amd_iommu_activate_guest_mode(void *data, int cpu) +int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr) { struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; @@ -3871,12 +3877,11 @@ int amd_iommu_activate_guest_mode(void *data, int cpu) entry->lo.fields_vapic.valid = valid; entry->lo.fields_vapic.guest_mode = 1; - entry->lo.fields_vapic.ga_log_intr = 1; entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; entry->hi.fields.vector = ir_data->ga_vector; entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; - __amd_iommu_update_ga(entry, cpu); + __amd_iommu_update_ga(entry, cpu, ga_log_intr); return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, ir_data->irq_2_irte.index, entry); @@ -3947,7 +3952,8 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info) ir_data->ga_vector = pi_data->vector; ir_data->ga_tag = pi_data->ga_tag; if (pi_data->is_guest_mode) - ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu); + ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu, + pi_data->ga_log_intr); else ret = amd_iommu_deactivate_guest_mode(ir_data); } else { diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index c9f2df0c4596..8cced632ecd0 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -30,9 +30,8 @@ static inline void amd_iommu_detect(void) { } /* IOMMU AVIC Function */ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); -extern int amd_iommu_update_ga(int cpu, void *data); - -extern int amd_iommu_activate_guest_mode(void *data, int cpu); +extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr); +extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr); extern int amd_iommu_deactivate_guest_mode(void *data); #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ @@ -43,12 +42,12 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) return 0; } -static inline int amd_iommu_update_ga(int cpu, void *data) +static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr) { return 0; } -static inline int amd_iommu_activate_guest_mode(void *data, int cpu) +static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr) { return 0; } -- cgit v1.2.3 From 283ed5001d6852f85c09ed2522331b2b197ba937 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 May 2025 16:52:11 -0700 Subject: KVM: Use a local struct to do the initial vfs_poll() on an irqfd Use a function-local struct for the poll_table passed to vfs_poll(), as nothing in the vfs_poll() callchain grabs a long-term reference to the structure, i.e. its lifetime doesn't need to be tied to the irqfd. Using a local structure will also allow propagating failures out of the polling callback without further polluting kvm_kernel_irqfd. Opportunstically rename irqfd_ptable_queue_proc() to kvm_irqfd_register() to capture what it actually does. Tested-by: K Prateek Nayak Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250522235223.3178519-2-seanjc@google.com Signed-off-by: Sean Christopherson --- include/linux/kvm_irqfd.h | 1 - virt/kvm/eventfd.c | 26 +++++++++++++++++--------- 2 files changed, 17 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index 361c07f4466d..ef8c134ded8a 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -55,7 +55,6 @@ struct kvm_kernel_irqfd { /* Used for setup/shutdown */ struct eventfd_ctx *eventfd; struct list_head list; - poll_table pt; struct work_struct shutdown; struct irq_bypass_consumer consumer; struct irq_bypass_producer *producer; diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 59b1e64697f1..0b655376734e 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -245,12 +245,17 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) return ret; } -static void -irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, - poll_table *pt) +struct kvm_irqfd_pt { + struct kvm_kernel_irqfd *irqfd; + poll_table pt; +}; + +static void kvm_irqfd_register(struct file *file, wait_queue_head_t *wqh, + poll_table *pt) { - struct kvm_kernel_irqfd *irqfd = - container_of(pt, struct kvm_kernel_irqfd, pt); + struct kvm_irqfd_pt *p = container_of(pt, struct kvm_irqfd_pt, pt); + struct kvm_kernel_irqfd *irqfd = p->irqfd; + add_wait_queue_priority(wqh, &irqfd->wait); } @@ -298,6 +303,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) { struct kvm_kernel_irqfd *irqfd, *tmp; struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; + struct kvm_irqfd_pt irqfd_pt; int ret; __poll_t events; int idx; @@ -387,7 +393,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) * a callback whenever someone signals the underlying eventfd */ init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); - init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); spin_lock_irq(&kvm->irqfds.lock); @@ -409,11 +414,14 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) spin_unlock_irq(&kvm->irqfds.lock); /* - * Check if there was an event already pending on the eventfd - * before we registered, and trigger it as if we didn't miss it. + * Register the irqfd with the eventfd by polling on the eventfd. If + * there was en event pending on the eventfd prior to registering, + * manually trigger IRQ injection. */ - events = vfs_poll(fd_file(f), &irqfd->pt); + irqfd_pt.irqfd = irqfd; + init_poll_funcptr(&irqfd_pt.pt, kvm_irqfd_register); + events = vfs_poll(fd_file(f), &irqfd_pt.pt); if (events & EPOLLIN) schedule_work(&irqfd->inject); -- cgit v1.2.3 From 0d09582b3a607436fd91d6ce813048a048ecbf10 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 May 2025 16:52:18 -0700 Subject: sched/wait: Add a waitqueue helper for fully exclusive priority waiters Add a waitqueue helper to add a priority waiter that requires exclusive wakeups, i.e. that requires that it be the _only_ priority waiter. The API will be used by KVM to ensure that at most one of KVM's irqfds is bound to a single eventfd (across the entire kernel). Open code the helper instead of using __add_wait_queue() so that the common path doesn't need to "handle" impossible failures. Cc: K Prateek Nayak Reviewed-by: K Prateek Nayak Tested-by: K Prateek Nayak Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20250522235223.3178519-9-seanjc@google.com Signed-off-by: Sean Christopherson --- include/linux/wait.h | 2 ++ kernel/sched/wait.c | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) (limited to 'include/linux') diff --git a/include/linux/wait.h b/include/linux/wait.h index 965a19809c7e..09855d819418 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -164,6 +164,8 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); +extern int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head, + struct wait_queue_entry *wq_entry); extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 4ab3ab195277..15632c89c4f2 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -47,6 +47,24 @@ void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_ } EXPORT_SYMBOL_GPL(add_wait_queue_priority); +int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head, + struct wait_queue_entry *wq_entry) +{ + struct list_head *head = &wq_head->head; + + wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY; + + guard(spinlock_irqsave)(&wq_head->lock); + + if (!list_empty(head) && + (list_first_entry(head, typeof(*wq_entry), entry)->flags & WQ_FLAG_PRIORITY)) + return -EBUSY; + + list_add(&wq_entry->entry, head); + return 0; +} +EXPORT_SYMBOL_GPL(add_wait_queue_priority_exclusive); + void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { unsigned long flags; -- cgit v1.2.3 From bbc13ae593e0ea47357ff6e4740c533c16c2ae1e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 22 May 2025 18:17:56 -0700 Subject: VFIO: KVM: x86: Drop kvm_arch_{start,end}_assignment() Drop kvm_arch_{start,end}_assignment() and all associated code now that KVM x86 no longer consumes assigned_device_count. Tracking whether or not a VFIO-assigned device is formally associated with a VM is fundamentally flawed, as such an association is optional for general usage, i.e. is prone to false negatives. E.g. prior to commit 2edd9cb79fb3 ("kvm: detect assigned device via irqbypass manager"), device passthrough via VFIO would fail to enable IRQ bypass if userspace omitted the formal VFIO<=>KVM binding. And device drivers that *need* the VFIO<=>KVM connection, e.g. KVM-GT, shouldn't be relying on generic x86 tracking infrastructure. Cc: Jim Mattson Link: https://lore.kernel.org/r/20250523011756.3243624-6-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 -- arch/x86/kvm/x86.c | 18 ------------------ include/linux/kvm_host.h | 18 ------------------ virt/kvm/vfio.c | 3 --- 4 files changed, 41 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7262bb11032e..c8ffb23d8441 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1381,8 +1381,6 @@ struct kvm_arch { #define __KVM_HAVE_ARCH_NONCOHERENT_DMA atomic_t noncoherent_dma_count; -#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE - atomic_t assigned_device_count; unsigned long nr_possible_bypass_irqs; #ifdef CONFIG_KVM_IOAPIC diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1508b77622b9..d898b8082e15 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13438,24 +13438,6 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); } -void kvm_arch_start_assignment(struct kvm *kvm) -{ - atomic_inc(&kvm->arch.assigned_device_count); -} -EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); - -void kvm_arch_end_assignment(struct kvm *kvm) -{ - atomic_dec(&kvm->arch.assigned_device_count); -} -EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); - -bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) -{ - return raw_atomic_read(&kvm->arch.assigned_device_count); -} -EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); - static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm) { /* diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb9ec06aa807..15656b7fba6c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1690,24 +1690,6 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) return false; } #endif -#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE -void kvm_arch_start_assignment(struct kvm *kvm); -void kvm_arch_end_assignment(struct kvm *kvm); -bool kvm_arch_has_assigned_device(struct kvm *kvm); -#else -static inline void kvm_arch_start_assignment(struct kvm *kvm) -{ -} - -static inline void kvm_arch_end_assignment(struct kvm *kvm) -{ -} - -static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) -{ - return false; -} -#endif static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) { diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 196a102e34fb..be50514bbd11 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -175,7 +175,6 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd) kvf->file = get_file(filp); list_add_tail(&kvf->node, &kv->file_list); - kvm_arch_start_assignment(dev->kvm); kvm_vfio_file_set_kvm(kvf->file, dev->kvm); kvm_vfio_update_coherency(dev); @@ -205,7 +204,6 @@ static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd) continue; list_del(&kvf->node); - kvm_arch_end_assignment(dev->kvm); #ifdef CONFIG_SPAPR_TCE_IOMMU kvm_spapr_tce_release_vfio_group(dev->kvm, kvf); #endif @@ -336,7 +334,6 @@ static void kvm_vfio_release(struct kvm_device *dev) fput(kvf->file); list_del(&kvf->node); kfree(kvf); - kvm_arch_end_assignment(dev->kvm); } kvm_vfio_update_coherency(dev); -- cgit v1.2.3 From 7ec80fb3f025825e860b433685fb801d6de34bf3 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:10 +0200 Subject: irqchip/gic-v5: Add GICv5 PPI support The GICv5 CPU interface implements support for PE-Private Peripheral Interrupts (PPI), that are handled (enabled/prioritized/delivered) entirely within the CPU interface hardware. To enable PPI interrupts, implement the baseline GICv5 host kernel driver infrastructure required to handle interrupts on a GICv5 system. Add the exception handling code path and definitions for GICv5 instructions. Add GICv5 PPI handling code as a specific IRQ domain to: - Set-up PPI priority - Manage PPI configuration and state - Manage IRQ flow handler - IRQs allocation/free - Hook-up a PPI specific IRQchip to provide the relevant methods PPI IRQ priority is chosen as the minimum allowed priority by the system design (after probing the number of priority bits implemented by the CPU interface). Co-developed-by: Sascha Bischoff Signed-off-by: Sascha Bischoff Co-developed-by: Timothy Hayes Signed-off-by: Timothy Hayes Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: Thomas Gleixner Cc: Catalin Marinas Cc: Marc Zyngier Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-20-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- MAINTAINERS | 2 + arch/arm64/include/asm/sysreg.h | 19 ++ drivers/irqchip/Kconfig | 5 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-gic-v5.c | 464 +++++++++++++++++++++++++++++++++++++ include/linux/irqchip/arm-gic-v5.h | 19 ++ 6 files changed, 510 insertions(+) create mode 100644 drivers/irqchip/irq-gic-v5.c create mode 100644 include/linux/irqchip/arm-gic-v5.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index c5452096c593..b1202987eef5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1970,6 +1970,8 @@ M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5*.yaml +F: drivers/irqchip/irq-gic-v5*.[ch] +F: include/linux/irqchip/arm-gic-v5.h ARM HDLCD DRM DRIVER M: Liviu Dudau diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 9b5fc6389715..36b82d74db37 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1082,6 +1082,25 @@ #define GCS_CAP(x) ((((unsigned long)x) & GCS_CAP_ADDR_MASK) | \ GCS_CAP_VALID_TOKEN) +/* + * Definitions for GICv5 instructions + */ +#define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0) +#define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7) +#define GICV5_OP_GICR_CDIA sys_insn(1, 0, 12, 3, 0) + +/* Definitions for GIC CDDI */ +#define GICV5_GIC_CDDI_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDDI_ID_MASK GENMASK_ULL(23, 0) + +/* Definitions for GICR CDIA */ +#define GICV5_GIC_CDIA_VALID_MASK BIT_ULL(32) +#define GICV5_GICR_CDIA_VALID(r) FIELD_GET(GICV5_GIC_CDIA_VALID_MASK, r) +#define GICV5_GIC_CDIA_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDIA_ID_MASK GENMASK_ULL(23, 0) + +#define gicr_insn(insn) read_sysreg_s(GICV5_OP_GICR_##insn) +#define gic_insn(v, insn) write_sysreg_s(v, GICV5_OP_GIC_##insn) #define ARM64_FEATURE_FIELD_BITS 4 diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 0d196e447142..3e4fb08b7a4d 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -54,6 +54,11 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_V5 + bool + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + config ARM_NVIC bool select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 23ca4959e6ce..3d75659d99eb 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o irq-gic-v3-its-msi-parent.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o +obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c new file mode 100644 index 000000000000..0bb940212e20 --- /dev/null +++ b/drivers/irqchip/irq-gic-v5.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ + +#define pr_fmt(fmt) "GICv5: " fmt + +#include +#include + +#include +#include + +#include +#include + +static u8 pri_bits __ro_after_init = 5; + +#define GICV5_IRQ_PRI_MASK 0x1f +#define GICV5_IRQ_PRI_MI (GICV5_IRQ_PRI_MASK & GENMASK(4, 5 - pri_bits)) + +#define PPI_NR 128 + +static bool gicv5_cpuif_has_gcie(void) +{ + return this_cpu_has_cap(ARM64_HAS_GICV5_CPUIF); +} + +struct gicv5_chip_data { + struct fwnode_handle *fwnode; + struct irq_domain *ppi_domain; +}; + +static struct gicv5_chip_data gicv5_global_data __read_mostly; + +static void gicv5_ppi_priority_init(void) +{ + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR0_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR1_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR2_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR3_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR4_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR5_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR6_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR7_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR8_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR9_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR10_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR11_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR12_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR13_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR14_EL1); + write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR15_EL1); + + /* + * Context syncronization required to make sure system register writes + * effects are synchronised. + */ + isb(); +} + +static void gicv5_ppi_irq_mask(struct irq_data *d) +{ + u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); + + if (d->hwirq < 64) + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, hwirq_id_bit, 0); + else + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, hwirq_id_bit, 0); + + /* + * We must ensure that the disable takes effect immediately to + * guarantee that the lazy-disabled IRQ mechanism works. + * A context synchronization event is required to guarantee it. + * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1. + */ + isb(); +} + +static void gicv5_ppi_irq_unmask(struct irq_data *d) +{ + u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); + + if (d->hwirq < 64) + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER0_EL1, 0, hwirq_id_bit); + else + sysreg_clear_set_s(SYS_ICC_PPI_ENABLER1_EL1, 0, hwirq_id_bit); + /* + * We must ensure that the enable takes effect in finite time - a + * context synchronization event is required to guarantee it, we + * can not take for granted that would happen (eg a core going straight + * into idle after enabling a PPI). + * Reference: I_ZLTKB/R_YRGMH GICv5 specification - section 2.9.1. + */ + isb(); +} + +static void gicv5_hwirq_eoi(u32 hwirq_id, u8 hwirq_type) +{ + u64 cddi; + + cddi = FIELD_PREP(GICV5_GIC_CDDI_ID_MASK, hwirq_id) | + FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, hwirq_type); + + gic_insn(cddi, CDDI); + + gic_insn(0, CDEOI); +} + +static void gicv5_ppi_irq_eoi(struct irq_data *d) +{ + gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_PPI); +} + +enum ppi_reg { + PPI_PENDING, + PPI_ACTIVE, + PPI_HM +}; + +static __always_inline u64 read_ppi_sysreg_s(unsigned int irq, + const enum ppi_reg which) +{ + switch (which) { + case PPI_PENDING: + return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SPENDR0_EL1) : + read_sysreg_s(SYS_ICC_PPI_SPENDR1_EL1); + case PPI_ACTIVE: + return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_SACTIVER0_EL1) : + read_sysreg_s(SYS_ICC_PPI_SACTIVER1_EL1); + case PPI_HM: + return irq < 64 ? read_sysreg_s(SYS_ICC_PPI_HMR0_EL1) : + read_sysreg_s(SYS_ICC_PPI_HMR1_EL1); + default: + BUILD_BUG_ON(1); + } +} + +static __always_inline void write_ppi_sysreg_s(unsigned int irq, bool set, + const enum ppi_reg which) +{ + u64 bit = BIT_ULL(irq % 64); + + switch (which) { + case PPI_PENDING: + if (set) { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_SPENDR0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_SPENDR1_EL1); + } else { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_CPENDR0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_CPENDR1_EL1); + } + return; + case PPI_ACTIVE: + if (set) { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_SACTIVER1_EL1); + } else { + if (irq < 64) + write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER0_EL1); + else + write_sysreg_s(bit, SYS_ICC_PPI_CACTIVER1_EL1); + } + return; + default: + BUILD_BUG_ON(1); + } +} + +static int gicv5_ppi_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); + + switch (which) { + case IRQCHIP_STATE_PENDING: + *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_PENDING) & hwirq_id_bit); + return 0; + case IRQCHIP_STATE_ACTIVE: + *state = !!(read_ppi_sysreg_s(d->hwirq, PPI_ACTIVE) & hwirq_id_bit); + return 0; + default: + pr_debug("Unexpected PPI irqchip state\n"); + return -EINVAL; + } +} + +static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + switch (which) { + case IRQCHIP_STATE_PENDING: + write_ppi_sysreg_s(d->hwirq, state, PPI_PENDING); + return 0; + case IRQCHIP_STATE_ACTIVE: + write_ppi_sysreg_s(d->hwirq, state, PPI_ACTIVE); + return 0; + default: + pr_debug("Unexpected PPI irqchip state\n"); + return -EINVAL; + } +} + +static bool gicv5_ppi_irq_is_level(irq_hw_number_t hwirq) +{ + u64 bit = BIT_ULL(hwirq % 64); + + return !!(read_ppi_sysreg_s(hwirq, PPI_HM) & bit); +} + +static const struct irq_chip gicv5_ppi_irq_chip = { + .name = "GICv5-PPI", + .irq_mask = gicv5_ppi_irq_mask, + .irq_unmask = gicv5_ppi_irq_unmask, + .irq_eoi = gicv5_ppi_irq_eoi, + .irq_get_irqchip_state = gicv5_ppi_irq_get_irqchip_state, + .irq_set_irqchip_state = gicv5_ppi_irq_set_irqchip_state, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int gicv5_irq_ppi_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) +{ + if (!is_of_node(fwspec->fwnode)) + return -EINVAL; + + if (fwspec->param_count < 3) + return -EINVAL; + + if (fwspec->param[0] != GICV5_HWIRQ_TYPE_PPI) + return -EINVAL; + + *hwirq = fwspec->param[1]; + + /* + * Handling mode is hardcoded for PPIs, set the type using + * HW reported value. + */ + *type = gicv5_ppi_irq_is_level(*hwirq) ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_EDGE_RISING; + + return 0; +} + +static int gicv5_irq_ppi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + int ret; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; + + ret = gicv5_irq_ppi_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_status_flags(virq, IRQ_LEVEL); + + irq_set_percpu_devid(virq); + irq_domain_set_info(domain, virq, hwirq, &gicv5_ppi_irq_chip, NULL, + handle_percpu_devid_irq, NULL, NULL); + + return 0; +} + +static void gicv5_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return; + + d = irq_domain_get_irq_data(domain, virq); + + irq_set_handler(virq, NULL); + irq_domain_reset_irq_data(d); +} + +static int gicv5_irq_ppi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + if (fwspec->fwnode != d->fwnode) + return 0; + + if (fwspec->param[0] != GICV5_HWIRQ_TYPE_PPI) + return 0; + + return (d == gicv5_global_data.ppi_domain); +} + +static const struct irq_domain_ops gicv5_irq_ppi_domain_ops = { + .translate = gicv5_irq_ppi_domain_translate, + .alloc = gicv5_irq_ppi_domain_alloc, + .free = gicv5_irq_domain_free, + .select = gicv5_irq_ppi_domain_select +}; + +static void handle_irq_per_domain(u32 hwirq) +{ + u8 hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, hwirq); + u32 hwirq_id = FIELD_GET(GICV5_HWIRQ_ID, hwirq); + struct irq_domain *domain; + + switch (hwirq_type) { + case GICV5_HWIRQ_TYPE_PPI: + domain = gicv5_global_data.ppi_domain; + break; + default: + pr_err_once("Unknown IRQ type, bail out\n"); + return; + } + + if (generic_handle_domain_irq(domain, hwirq_id)) { + pr_err_once("Could not handle, hwirq = 0x%x", hwirq_id); + gicv5_hwirq_eoi(hwirq_id, hwirq_type); + } +} + +static void __exception_irq_entry gicv5_handle_irq(struct pt_regs *regs) +{ + bool valid; + u32 hwirq; + u64 ia; + + ia = gicr_insn(CDIA); + valid = GICV5_GICR_CDIA_VALID(ia); + + if (!valid) + return; + + /* + * Ensure that the CDIA instruction effects (ie IRQ activation) are + * completed before handling the interrupt. + */ + gsb_ack(); + + /* + * Ensure instruction ordering between an acknowledgment and subsequent + * instructions in the IRQ handler using an ISB. + */ + isb(); + + hwirq = FIELD_GET(GICV5_HWIRQ_INTID, ia); + + handle_irq_per_domain(hwirq); +} + +static void gicv5_cpu_disable_interrupts(void) +{ + u64 cr0; + + cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 0); + write_sysreg_s(cr0, SYS_ICC_CR0_EL1); +} + +static void gicv5_cpu_enable_interrupts(void) +{ + u64 cr0, pcr; + + write_sysreg_s(0, SYS_ICC_PPI_ENABLER0_EL1); + write_sysreg_s(0, SYS_ICC_PPI_ENABLER1_EL1); + + gicv5_ppi_priority_init(); + + pcr = FIELD_PREP(ICC_PCR_EL1_PRIORITY, GICV5_IRQ_PRI_MI); + write_sysreg_s(pcr, SYS_ICC_PCR_EL1); + + cr0 = FIELD_PREP(ICC_CR0_EL1_EN, 1); + write_sysreg_s(cr0, SYS_ICC_CR0_EL1); +} + +static int gicv5_starting_cpu(unsigned int cpu) +{ + if (WARN(!gicv5_cpuif_has_gcie(), + "GICv5 system components present but CPU does not have FEAT_GCIE")) + return -ENODEV; + + gicv5_cpu_enable_interrupts(); + + return 0; +} + +static void __init gicv5_free_domains(void) +{ + if (gicv5_global_data.ppi_domain) + irq_domain_remove(gicv5_global_data.ppi_domain); + + gicv5_global_data.ppi_domain = NULL; +} + +static int __init gicv5_init_domains(struct fwnode_handle *handle) +{ + struct irq_domain *d; + + d = irq_domain_create_linear(handle, PPI_NR, &gicv5_irq_ppi_domain_ops, NULL); + if (!d) + return -ENOMEM; + + irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED); + gicv5_global_data.ppi_domain = d; + gicv5_global_data.fwnode = handle; + + return 0; +} + +static void gicv5_set_cpuif_pribits(void) +{ + u64 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1); + + switch (FIELD_GET(ICC_IDR0_EL1_PRI_BITS, icc_idr0)) { + case ICC_IDR0_EL1_PRI_BITS_4BITS: + pri_bits = 4; + break; + case ICC_IDR0_EL1_PRI_BITS_5BITS: + pri_bits = 5; + break; + default: + pr_err("Unexpected ICC_IDR0_EL1_PRI_BITS value, default to 4"); + pri_bits = 4; + break; + } +} + +static int __init gicv5_of_init(struct device_node *node, struct device_node *parent) +{ + int ret = gicv5_init_domains(of_fwnode_handle(node)); + if (ret) + return ret; + + gicv5_set_cpuif_pribits(); + + ret = gicv5_starting_cpu(smp_processor_id()); + if (ret) + goto out_dom; + + ret = set_handle_irq(gicv5_handle_irq); + if (ret) + goto out_int; + + return 0; + +out_int: + gicv5_cpu_disable_interrupts(); +out_dom: + gicv5_free_domains(); + + return ret; +} +IRQCHIP_DECLARE(gic_v5, "arm,gic-v5", gicv5_of_init); diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h new file mode 100644 index 000000000000..b08ec0308a9b --- /dev/null +++ b/include/linux/irqchip/arm-gic-v5.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 ARM Limited, All Rights Reserved. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H +#define __LINUX_IRQCHIP_ARM_GIC_V5_H + +#include + +/* + * INTID handling + */ +#define GICV5_HWIRQ_ID GENMASK(23, 0) +#define GICV5_HWIRQ_TYPE GENMASK(31, 29) +#define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0) + +#define GICV5_HWIRQ_TYPE_PPI UL(0x1) + +#endif -- cgit v1.2.3 From 5cb1b6dab2def316671ea2565291a86ad58b884c Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:11 +0200 Subject: irqchip/gic-v5: Add GICv5 IRS/SPI support The GICv5 Interrupt Routing Service (IRS) component implements interrupt management and routing in the GICv5 architecture. A GICv5 system comprises one or more IRSes, that together handle the interrupt routing and state for the system. An IRS supports Shared Peripheral Interrupts (SPIs), that are interrupt sources directly connected to the IRS; they do not rely on memory for storage. The number of supported SPIs is fixed for a given implementation and can be probed through IRS IDR registers. SPI interrupt state and routing are managed through GICv5 instructions. Each core (PE in GICv5 terms) in a GICv5 system is identified with an Interrupt AFFinity ID (IAFFID). An IRS manages a set of cores that are connected to it. Firmware provides a topology description that the driver uses to detect to which IRS a CPU (ie an IAFFID) is associated with. Use probeable information and firmware description to initialize the IRSes and implement GICv5 IRS SPIs support through an SPI-specific IRQ domain. The GICv5 IRS driver: - Probes IRSes in the system to detect SPI ranges - Associates an IRS with a set of cores connected to it - Adds an IRQchip structure for SPI handling SPIs priority is set to a value corresponding to the lowest permissible priority in the system (taking into account the implemented priority bits of the IRS and CPU interface). Since all IRQs are set to the same priority value, the value itself does not matter as long as it is a valid one. Co-developed-by: Sascha Bischoff Signed-off-by: Sascha Bischoff Co-developed-by: Timothy Hayes Signed-off-by: Timothy Hayes Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: Thomas Gleixner Cc: Catalin Marinas Cc: Marc Zyngier Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-21-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/sysreg.h | 36 +++ drivers/irqchip/Makefile | 2 +- drivers/irqchip/irq-gic-v5-irs.c | 434 +++++++++++++++++++++++++++++++++++++ drivers/irqchip/irq-gic-v5.c | 347 +++++++++++++++++++++++++++-- include/linux/irqchip/arm-gic-v5.h | 140 ++++++++++++ 5 files changed, 937 insertions(+), 22 deletions(-) create mode 100644 drivers/irqchip/irq-gic-v5-irs.c (limited to 'include/linux') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 36b82d74db37..efd2e7a1fbe2 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1085,14 +1085,50 @@ /* * Definitions for GICv5 instructions */ +#define GICV5_OP_GIC_CDAFF sys_insn(1, 0, 12, 1, 3) #define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0) +#define GICV5_OP_GIC_CDDIS sys_insn(1, 0, 12, 1, 0) +#define GICV5_OP_GIC_CDEN sys_insn(1, 0, 12, 1, 1) #define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7) +#define GICV5_OP_GIC_CDPEND sys_insn(1, 0, 12, 1, 4) +#define GICV5_OP_GIC_CDPRI sys_insn(1, 0, 12, 1, 2) +#define GICV5_OP_GIC_CDRCFG sys_insn(1, 0, 12, 1, 5) #define GICV5_OP_GICR_CDIA sys_insn(1, 0, 12, 3, 0) +/* Definitions for GIC CDAFF */ +#define GICV5_GIC_CDAFF_IAFFID_MASK GENMASK_ULL(47, 32) +#define GICV5_GIC_CDAFF_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDAFF_IRM_MASK BIT_ULL(28) +#define GICV5_GIC_CDAFF_ID_MASK GENMASK_ULL(23, 0) + /* Definitions for GIC CDDI */ #define GICV5_GIC_CDDI_TYPE_MASK GENMASK_ULL(31, 29) #define GICV5_GIC_CDDI_ID_MASK GENMASK_ULL(23, 0) +/* Definitions for GIC CDDIS */ +#define GICV5_GIC_CDDIS_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDDIS_TYPE(r) FIELD_GET(GICV5_GIC_CDDIS_TYPE_MASK, r) +#define GICV5_GIC_CDDIS_ID_MASK GENMASK_ULL(23, 0) +#define GICV5_GIC_CDDIS_ID(r) FIELD_GET(GICV5_GIC_CDDIS_ID_MASK, r) + +/* Definitions for GIC CDEN */ +#define GICV5_GIC_CDEN_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDEN_ID_MASK GENMASK_ULL(23, 0) + +/* Definitions for GIC CDPEND */ +#define GICV5_GIC_CDPEND_PENDING_MASK BIT_ULL(32) +#define GICV5_GIC_CDPEND_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDPEND_ID_MASK GENMASK_ULL(23, 0) + +/* Definitions for GIC CDPRI */ +#define GICV5_GIC_CDPRI_PRIORITY_MASK GENMASK_ULL(39, 35) +#define GICV5_GIC_CDPRI_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDPRI_ID_MASK GENMASK_ULL(23, 0) + +/* Definitions for GIC CDRCFG */ +#define GICV5_GIC_CDRCFG_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDRCFG_ID_MASK GENMASK_ULL(23, 0) + /* Definitions for GICR CDIA */ #define GICV5_GIC_CDIA_VALID_MASK BIT_ULL(32) #define GICV5_GICR_CDIA_VALID(r) FIELD_GET(GICV5_GIC_CDIA_VALID_MASK, r) diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 3d75659d99eb..7a0e6cee09e1 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o irq-gic-v3-its-msi-parent.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o -obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o +obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c new file mode 100644 index 000000000000..fba8efceb26e --- /dev/null +++ b/drivers/irqchip/irq-gic-v5-irs.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ + +#define pr_fmt(fmt) "GICv5 IRS: " fmt + +#include +#include + +#include +#include + +#define IRS_FLAGS_NON_COHERENT BIT(0) + +static DEFINE_PER_CPU_READ_MOSTLY(struct gicv5_irs_chip_data *, per_cpu_irs_data); +static LIST_HEAD(irs_nodes); + +static u32 irs_readl_relaxed(struct gicv5_irs_chip_data *irs_data, + const u32 reg_offset) +{ + return readl_relaxed(irs_data->irs_base + reg_offset); +} + +static void irs_writel_relaxed(struct gicv5_irs_chip_data *irs_data, + const u32 val, const u32 reg_offset) +{ + writel_relaxed(val, irs_data->irs_base + reg_offset); +} + +struct iaffid_entry { + u16 iaffid; + bool valid; +}; + +static DEFINE_PER_CPU(struct iaffid_entry, cpu_iaffid); + +int gicv5_irs_cpu_to_iaffid(int cpuid, u16 *iaffid) +{ + if (!per_cpu(cpu_iaffid, cpuid).valid) { + pr_err("IAFFID for CPU %d has not been initialised\n", cpuid); + return -ENODEV; + } + + *iaffid = per_cpu(cpu_iaffid, cpuid).iaffid; + + return 0; +} + +struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id) +{ + struct gicv5_irs_chip_data *irs_data; + u32 min, max; + + list_for_each_entry(irs_data, &irs_nodes, entry) { + if (!irs_data->spi_range) + continue; + + min = irs_data->spi_min; + max = irs_data->spi_min + irs_data->spi_range - 1; + if (spi_id >= min && spi_id <= max) + return irs_data; + } + + return NULL; +} + +static int gicv5_irs_wait_for_spi_op(struct gicv5_irs_chip_data *irs_data) +{ + u32 statusr; + int ret; + + ret = gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_SPI_STATUSR, + GICV5_IRS_SPI_STATUSR_IDLE, &statusr); + if (ret) + return ret; + + return !!FIELD_GET(GICV5_IRS_SPI_STATUSR_V, statusr) ? 0 : -EIO; +} + +static int gicv5_irs_wait_for_irs_pe(struct gicv5_irs_chip_data *irs_data, + bool selr) +{ + bool valid = true; + u32 statusr; + int ret; + + ret = gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_PE_STATUSR, + GICV5_IRS_PE_STATUSR_IDLE, &statusr); + if (ret) + return ret; + + if (selr) + valid = !!FIELD_GET(GICV5_IRS_PE_STATUSR_V, statusr); + + return valid ? 0 : -EIO; +} + +static int gicv5_irs_wait_for_pe_selr(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_irs_wait_for_irs_pe(irs_data, true); +} + +static int gicv5_irs_wait_for_pe_cr0(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_irs_wait_for_irs_pe(irs_data, false); +} + +int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gicv5_irs_chip_data *irs_data = d->chip_data; + u32 selr, cfgr; + bool level; + int ret; + + /* + * There is no distinction between HIGH/LOW for level IRQs + * and RISING/FALLING for edge IRQs in the architecture, + * hence consider them equivalent. + */ + switch (type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + level = false; + break; + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_LEVEL_LOW: + level = true; + break; + default: + return -EINVAL; + } + + guard(raw_spinlock)(&irs_data->spi_config_lock); + + selr = FIELD_PREP(GICV5_IRS_SPI_SELR_ID, d->hwirq); + irs_writel_relaxed(irs_data, selr, GICV5_IRS_SPI_SELR); + ret = gicv5_irs_wait_for_spi_op(irs_data); + if (ret) + return ret; + + cfgr = FIELD_PREP(GICV5_IRS_SPI_CFGR_TM, level); + irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_SPI_CFGR); + + return gicv5_irs_wait_for_spi_op(irs_data); +} + +static int gicv5_irs_wait_for_idle(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_CR0, + GICV5_IRS_CR0_IDLE, NULL); +} + +int gicv5_irs_register_cpu(int cpuid) +{ + struct gicv5_irs_chip_data *irs_data; + u32 selr, cr0; + u16 iaffid; + int ret; + + ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid); + if (ret) { + pr_err("IAFFID for CPU %d has not been initialised\n", cpuid); + return ret; + } + + irs_data = per_cpu(per_cpu_irs_data, cpuid); + if (!irs_data) { + pr_err("No IRS associated with CPU %u\n", cpuid); + return -ENXIO; + } + + selr = FIELD_PREP(GICV5_IRS_PE_SELR_IAFFID, iaffid); + irs_writel_relaxed(irs_data, selr, GICV5_IRS_PE_SELR); + + ret = gicv5_irs_wait_for_pe_selr(irs_data); + if (ret) { + pr_err("IAFFID 0x%x used in IRS_PE_SELR is invalid\n", iaffid); + return -ENXIO; + } + + cr0 = FIELD_PREP(GICV5_IRS_PE_CR0_DPS, 0x1); + irs_writel_relaxed(irs_data, cr0, GICV5_IRS_PE_CR0); + + ret = gicv5_irs_wait_for_pe_cr0(irs_data); + if (ret) + return ret; + + pr_debug("CPU %d enabled PE IAFFID 0x%x\n", cpuid, iaffid); + + return 0; +} + +static void __init gicv5_irs_init_bases(struct gicv5_irs_chip_data *irs_data, + void __iomem *irs_base, + struct fwnode_handle *handle) +{ + struct device_node *np = to_of_node(handle); + u32 cr0, cr1; + + irs_data->fwnode = handle; + irs_data->irs_base = irs_base; + + if (of_property_read_bool(np, "dma-noncoherent")) { + /* + * A non-coherent IRS implies that some cache levels cannot be + * used coherently by the cores and GIC. Our only option is to mark + * memory attributes for the GIC as non-cacheable; by default, + * non-cacheable memory attributes imply outer-shareable + * shareability, the value written into IRS_CR1_SH is ignored. + */ + cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_NO_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_WA, GICV5_NO_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPET_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMT_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_WA, GICV5_NO_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_NON_CACHE) | + FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_NON_CACHE); + irs_data->flags |= IRS_FLAGS_NON_COHERENT; + } else { + cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_WA, GICV5_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMD_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VPET_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_VMT_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_WA, GICV5_WRITE_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_IRS_CR1_SH, GICV5_INNER_SHARE); + } + + irs_writel_relaxed(irs_data, cr1, GICV5_IRS_CR1); + + cr0 = FIELD_PREP(GICV5_IRS_CR0_IRSEN, 0x1); + irs_writel_relaxed(irs_data, cr0, GICV5_IRS_CR0); + gicv5_irs_wait_for_idle(irs_data); +} + +static int __init gicv5_irs_of_init_affinity(struct device_node *node, + struct gicv5_irs_chip_data *irs_data, + u8 iaffid_bits) +{ + /* + * Detect IAFFID<->CPU mappings from the device tree and + * record IRS<->CPU topology information. + */ + u16 iaffid_mask = GENMASK(iaffid_bits - 1, 0); + int ret, i, ncpus, niaffids; + + ncpus = of_count_phandle_with_args(node, "cpus", NULL); + if (ncpus < 0) + return -EINVAL; + + niaffids = of_property_count_elems_of_size(node, "arm,iaffids", + sizeof(u16)); + if (niaffids != ncpus) + return -EINVAL; + + u16 *iaffids __free(kfree) = kcalloc(niaffids, sizeof(*iaffids), GFP_KERNEL); + if (!iaffids) + return -ENOMEM; + + ret = of_property_read_u16_array(node, "arm,iaffids", iaffids, niaffids); + if (ret) + return ret; + + for (i = 0; i < ncpus; i++) { + struct device_node *cpu_node; + int cpu; + + cpu_node = of_parse_phandle(node, "cpus", i); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + of_node_put(cpu_node); + if (WARN_ON(cpu < 0)) + continue; + + if (iaffids[i] & ~iaffid_mask) { + pr_warn("CPU %d iaffid 0x%x exceeds IRS iaffid bits\n", + cpu, iaffids[i]); + continue; + } + + per_cpu(cpu_iaffid, cpu).iaffid = iaffids[i]; + per_cpu(cpu_iaffid, cpu).valid = true; + + /* We also know that the CPU is connected to this IRS */ + per_cpu(per_cpu_irs_data, cpu) = irs_data; + } + + return ret; +} + +static void irs_setup_pri_bits(u32 idr1) +{ + switch (FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, idr1)) { + case GICV5_IRS_IDR1_PRIORITY_BITS_1BITS: + gicv5_global_data.irs_pri_bits = 1; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_2BITS: + gicv5_global_data.irs_pri_bits = 2; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_3BITS: + gicv5_global_data.irs_pri_bits = 3; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_4BITS: + gicv5_global_data.irs_pri_bits = 4; + break; + case GICV5_IRS_IDR1_PRIORITY_BITS_5BITS: + gicv5_global_data.irs_pri_bits = 5; + break; + default: + pr_warn("Detected wrong IDR priority bits value 0x%lx\n", + FIELD_GET(GICV5_IRS_IDR1_PRIORITY_BITS, idr1)); + gicv5_global_data.irs_pri_bits = 1; + break; + } +} + +static int __init gicv5_irs_init(struct device_node *node) +{ + struct gicv5_irs_chip_data *irs_data; + void __iomem *irs_base; + u32 idr, spi_count; + u8 iaffid_bits; + int ret; + + irs_data = kzalloc(sizeof(*irs_data), GFP_KERNEL); + if (!irs_data) + return -ENOMEM; + + raw_spin_lock_init(&irs_data->spi_config_lock); + + ret = of_property_match_string(node, "reg-names", "ns-config"); + if (ret < 0) { + pr_err("%pOF: ns-config reg-name not present\n", node); + goto out_err; + } + + irs_base = of_io_request_and_map(node, ret, of_node_full_name(node)); + if (IS_ERR(irs_base)) { + pr_err("%pOF: unable to map GICv5 IRS registers\n", node); + ret = PTR_ERR(irs_base); + goto out_err; + } + + gicv5_irs_init_bases(irs_data, irs_base, &node->fwnode); + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1); + iaffid_bits = FIELD_GET(GICV5_IRS_IDR1_IAFFID_BITS, idr) + 1; + + ret = gicv5_irs_of_init_affinity(node, irs_data, iaffid_bits); + if (ret) { + pr_err("Failed to parse CPU IAFFIDs from the device tree!\n"); + goto out_iomem; + } + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR7); + irs_data->spi_min = FIELD_GET(GICV5_IRS_IDR7_SPI_BASE, idr); + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR6); + irs_data->spi_range = FIELD_GET(GICV5_IRS_IDR6_SPI_IRS_RANGE, idr); + + if (irs_data->spi_range) { + pr_info("%s detected SPI range [%u-%u]\n", + of_node_full_name(node), + irs_data->spi_min, + irs_data->spi_min + + irs_data->spi_range - 1); + } + + /* + * Do the global setting only on the first IRS. + * Global properties (iaffid_bits, global spi count) are guaranteed to + * be consistent across IRSes by the architecture. + */ + if (list_empty(&irs_nodes)) { + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR1); + irs_setup_pri_bits(idr); + + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR5); + + spi_count = FIELD_GET(GICV5_IRS_IDR5_SPI_RANGE, idr); + gicv5_global_data.global_spi_count = spi_count; + + pr_debug("Detected %u SPIs globally\n", spi_count); + } + + list_add_tail(&irs_data->entry, &irs_nodes); + + return 0; + +out_iomem: + iounmap(irs_base); +out_err: + kfree(irs_data); + return ret; +} + +void __init gicv5_irs_remove(void) +{ + struct gicv5_irs_chip_data *irs_data, *tmp_data; + + list_for_each_entry_safe(irs_data, tmp_data, &irs_nodes, entry) { + iounmap(irs_data->irs_base); + list_del(&irs_data->entry); + kfree(irs_data); + } +} + +int __init gicv5_irs_of_probe(struct device_node *parent) +{ + struct device_node *np; + int ret; + + for_each_available_child_of_node(parent, np) { + if (!of_device_is_compatible(np, "arm,gic-v5-irs")) + continue; + + ret = gicv5_irs_init(np); + if (ret) + pr_err("Failed to init IRS %s\n", np->full_name); + } + + return list_empty(&irs_nodes) ? -ENODEV : 0; +} diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c index 0bb940212e20..9c55ddcfa0df 100644 --- a/drivers/irqchip/irq-gic-v5.c +++ b/drivers/irqchip/irq-gic-v5.c @@ -26,12 +26,7 @@ static bool gicv5_cpuif_has_gcie(void) return this_cpu_has_cap(ARM64_HAS_GICV5_CPUIF); } -struct gicv5_chip_data { - struct fwnode_handle *fwnode; - struct irq_domain *ppi_domain; -}; - -static struct gicv5_chip_data gicv5_global_data __read_mostly; +struct gicv5_chip_data gicv5_global_data __read_mostly; static void gicv5_ppi_priority_init(void) { @@ -59,6 +54,30 @@ static void gicv5_ppi_priority_init(void) isb(); } +static void gicv5_hwirq_init(irq_hw_number_t hwirq, u8 priority, u8 hwirq_type) +{ + u64 cdpri, cdaff; + u16 iaffid; + int ret; + + if (hwirq_type == GICV5_HWIRQ_TYPE_SPI) { + cdpri = FIELD_PREP(GICV5_GIC_CDPRI_PRIORITY_MASK, priority) | + FIELD_PREP(GICV5_GIC_CDPRI_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDPRI_ID_MASK, hwirq); + gic_insn(cdpri, CDPRI); + + ret = gicv5_irs_cpu_to_iaffid(smp_processor_id(), &iaffid); + + if (WARN_ON_ONCE(ret)) + return; + + cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) | + FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, hwirq); + gic_insn(cdaff, CDAFF); + } +} + static void gicv5_ppi_irq_mask(struct irq_data *d) { u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); @@ -77,6 +96,32 @@ static void gicv5_ppi_irq_mask(struct irq_data *d) isb(); } +static void gicv5_iri_irq_mask(struct irq_data *d, u8 hwirq_type) +{ + u64 cddis; + + cddis = FIELD_PREP(GICV5_GIC_CDDIS_ID_MASK, d->hwirq) | + FIELD_PREP(GICV5_GIC_CDDIS_TYPE_MASK, hwirq_type); + + gic_insn(cddis, CDDIS); + /* + * We must make sure that GIC CDDIS write effects are propagated + * immediately to make sure the disable takes effect to guarantee + * that the lazy-disabled IRQ mechanism works. + * Rule R_XCLJC states that the effects of a GIC system instruction + * complete in finite time. + * The GSB ensures completion of the GIC instruction and prevents + * loads, stores and GIC instructions from executing part of their + * functionality before the GSB SYS. + */ + gsb_sys(); +} + +static void gicv5_spi_irq_mask(struct irq_data *d) +{ + gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_SPI); +} + static void gicv5_ppi_irq_unmask(struct irq_data *d) { u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); @@ -95,6 +140,25 @@ static void gicv5_ppi_irq_unmask(struct irq_data *d) isb(); } +static void gicv5_iri_irq_unmask(struct irq_data *d, u8 hwirq_type) +{ + u64 cden; + + cden = FIELD_PREP(GICV5_GIC_CDEN_ID_MASK, d->hwirq) | + FIELD_PREP(GICV5_GIC_CDEN_TYPE_MASK, hwirq_type); + /* + * Rule R_XCLJC states that the effects of a GIC system instruction + * complete in finite time and that's the only requirement when + * unmasking an SPI IRQ. + */ + gic_insn(cden, CDEN); +} + +static void gicv5_spi_irq_unmask(struct irq_data *d) +{ + gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_SPI); +} + static void gicv5_hwirq_eoi(u32 hwirq_id, u8 hwirq_type) { u64 cddi; @@ -112,6 +176,46 @@ static void gicv5_ppi_irq_eoi(struct irq_data *d) gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_PPI); } +static void gicv5_spi_irq_eoi(struct irq_data *d) +{ + gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_SPI); +} + +static int gicv5_iri_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force, u8 hwirq_type) +{ + int ret, cpuid; + u16 iaffid; + u64 cdaff; + + if (force) + cpuid = cpumask_first(mask_val); + else + cpuid = cpumask_any_and(mask_val, cpu_online_mask); + + ret = gicv5_irs_cpu_to_iaffid(cpuid, &iaffid); + if (ret) + return ret; + + cdaff = FIELD_PREP(GICV5_GIC_CDAFF_IAFFID_MASK, iaffid) | + FIELD_PREP(GICV5_GIC_CDAFF_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDAFF_ID_MASK, d->hwirq); + gic_insn(cdaff, CDAFF); + + irq_data_update_effective_affinity(d, cpumask_of(cpuid)); + + return IRQ_SET_MASK_OK_DONE; +} + +static int gicv5_spi_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + return gicv5_iri_irq_set_affinity(d, mask_val, force, + GICV5_HWIRQ_TYPE_SPI); +} + enum ppi_reg { PPI_PENDING, PPI_ACTIVE, @@ -192,6 +296,46 @@ static int gicv5_ppi_irq_get_irqchip_state(struct irq_data *d, } } +static int gicv5_iri_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state, u8 hwirq_type) +{ + u64 icsr, cdrcfg; + + cdrcfg = d->hwirq | FIELD_PREP(GICV5_GIC_CDRCFG_TYPE_MASK, hwirq_type); + + gic_insn(cdrcfg, CDRCFG); + isb(); + icsr = read_sysreg_s(SYS_ICC_ICSR_EL1); + + if (FIELD_GET(ICC_ICSR_EL1_F, icsr)) { + pr_err("ICSR_EL1 is invalid\n"); + return -EINVAL; + } + + switch (which) { + case IRQCHIP_STATE_PENDING: + *state = !!(FIELD_GET(ICC_ICSR_EL1_Pending, icsr)); + return 0; + + case IRQCHIP_STATE_ACTIVE: + *state = !!(FIELD_GET(ICC_ICSR_EL1_Active, icsr)); + return 0; + + default: + pr_debug("Unexpected irqchip_irq_state\n"); + return -EINVAL; + } +} + +static int gicv5_spi_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + return gicv5_iri_irq_get_irqchip_state(d, which, state, + GICV5_HWIRQ_TYPE_SPI); +} + static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool state) @@ -209,6 +353,45 @@ static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d, } } +static void gicv5_iri_irq_write_pending_state(struct irq_data *d, bool state, + u8 hwirq_type) +{ + u64 cdpend; + + cdpend = FIELD_PREP(GICV5_GIC_CDPEND_TYPE_MASK, hwirq_type) | + FIELD_PREP(GICV5_GIC_CDPEND_ID_MASK, d->hwirq) | + FIELD_PREP(GICV5_GIC_CDPEND_PENDING_MASK, state); + + gic_insn(cdpend, CDPEND); +} + +static void gicv5_spi_irq_write_pending_state(struct irq_data *d, bool state) +{ + gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_SPI); +} + +static int gicv5_spi_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + switch (which) { + case IRQCHIP_STATE_PENDING: + gicv5_spi_irq_write_pending_state(d, state); + break; + default: + pr_debug("Unexpected irqchip_irq_state\n"); + return -EINVAL; + } + + return 0; +} + +static int gicv5_spi_irq_retrigger(struct irq_data *data) +{ + return !gicv5_spi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, + true); +} + static bool gicv5_ppi_irq_is_level(irq_hw_number_t hwirq) { u64 bit = BIT_ULL(hwirq % 64); @@ -227,10 +410,26 @@ static const struct irq_chip gicv5_ppi_irq_chip = { IRQCHIP_MASK_ON_SUSPEND, }; -static int gicv5_irq_ppi_domain_translate(struct irq_domain *d, - struct irq_fwspec *fwspec, - irq_hw_number_t *hwirq, - unsigned int *type) +static const struct irq_chip gicv5_spi_irq_chip = { + .name = "GICv5-SPI", + .irq_mask = gicv5_spi_irq_mask, + .irq_unmask = gicv5_spi_irq_unmask, + .irq_eoi = gicv5_spi_irq_eoi, + .irq_set_type = gicv5_spi_irq_set_type, + .irq_set_affinity = gicv5_spi_irq_set_affinity, + .irq_retrigger = gicv5_spi_irq_retrigger, + .irq_get_irqchip_state = gicv5_spi_irq_get_irqchip_state, + .irq_set_irqchip_state = gicv5_spi_irq_set_irqchip_state, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static __always_inline int gicv5_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type, + const u8 hwirq_type) { if (!is_of_node(fwspec->fwnode)) return -EINVAL; @@ -238,20 +437,39 @@ static int gicv5_irq_ppi_domain_translate(struct irq_domain *d, if (fwspec->param_count < 3) return -EINVAL; - if (fwspec->param[0] != GICV5_HWIRQ_TYPE_PPI) + if (fwspec->param[0] != hwirq_type) return -EINVAL; *hwirq = fwspec->param[1]; - /* - * Handling mode is hardcoded for PPIs, set the type using - * HW reported value. - */ - *type = gicv5_ppi_irq_is_level(*hwirq) ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_EDGE_RISING; + switch (hwirq_type) { + case GICV5_HWIRQ_TYPE_PPI: + /* + * Handling mode is hardcoded for PPIs, set the type using + * HW reported value. + */ + *type = gicv5_ppi_irq_is_level(*hwirq) ? IRQ_TYPE_LEVEL_LOW : + IRQ_TYPE_EDGE_RISING; + break; + case GICV5_HWIRQ_TYPE_SPI: + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + break; + default: + BUILD_BUG_ON(1); + } return 0; } +static int gicv5_irq_ppi_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) +{ + return gicv5_irq_domain_translate(d, fwspec, hwirq, type, + GICV5_HWIRQ_TYPE_PPI); +} + static int gicv5_irq_ppi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -310,6 +528,63 @@ static const struct irq_domain_ops gicv5_irq_ppi_domain_ops = { .select = gicv5_irq_ppi_domain_select }; +static int gicv5_irq_spi_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) +{ + return gicv5_irq_domain_translate(d, fwspec, hwirq, type, + GICV5_HWIRQ_TYPE_SPI); +} + +static int gicv5_irq_spi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct gicv5_irs_chip_data *chip_data; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int ret; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; + + ret = gicv5_irq_spi_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + irqd = irq_desc_get_irq_data(irq_to_desc(virq)); + chip_data = gicv5_irs_lookup_by_spi_id(hwirq); + + irq_domain_set_info(domain, virq, hwirq, &gicv5_spi_irq_chip, chip_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(virq); + irqd_set_single_target(irqd); + + gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_SPI); + + return 0; +} + +static int gicv5_irq_spi_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + if (fwspec->fwnode != d->fwnode) + return 0; + + if (fwspec->param[0] != GICV5_HWIRQ_TYPE_SPI) + return 0; + + return (d == gicv5_global_data.spi_domain); +} + +static const struct irq_domain_ops gicv5_irq_spi_domain_ops = { + .translate = gicv5_irq_spi_domain_translate, + .alloc = gicv5_irq_spi_domain_alloc, + .free = gicv5_irq_domain_free, + .select = gicv5_irq_spi_domain_select +}; static void handle_irq_per_domain(u32 hwirq) { u8 hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, hwirq); @@ -320,6 +595,9 @@ static void handle_irq_per_domain(u32 hwirq) case GICV5_HWIRQ_TYPE_PPI: domain = gicv5_global_data.ppi_domain; break; + case GICV5_HWIRQ_TYPE_SPI: + domain = gicv5_global_data.spi_domain; + break; default: pr_err_once("Unknown IRQ type, bail out\n"); return; @@ -392,19 +670,23 @@ static int gicv5_starting_cpu(unsigned int cpu) gicv5_cpu_enable_interrupts(); - return 0; + return gicv5_irs_register_cpu(cpu); } static void __init gicv5_free_domains(void) { if (gicv5_global_data.ppi_domain) irq_domain_remove(gicv5_global_data.ppi_domain); + if (gicv5_global_data.spi_domain) + irq_domain_remove(gicv5_global_data.spi_domain); gicv5_global_data.ppi_domain = NULL; + gicv5_global_data.spi_domain = NULL; } static int __init gicv5_init_domains(struct fwnode_handle *handle) { + u32 spi_count = gicv5_global_data.global_spi_count; struct irq_domain *d; d = irq_domain_create_linear(handle, PPI_NR, &gicv5_irq_ppi_domain_ops, NULL); @@ -413,6 +695,20 @@ static int __init gicv5_init_domains(struct fwnode_handle *handle) irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED); gicv5_global_data.ppi_domain = d; + + if (spi_count) { + d = irq_domain_create_linear(handle, spi_count, + &gicv5_irq_spi_domain_ops, NULL); + + if (!d) { + gicv5_free_domains(); + return -ENOMEM; + } + + gicv5_global_data.spi_domain = d; + irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED); + } + gicv5_global_data.fwnode = handle; return 0; @@ -424,26 +720,33 @@ static void gicv5_set_cpuif_pribits(void) switch (FIELD_GET(ICC_IDR0_EL1_PRI_BITS, icc_idr0)) { case ICC_IDR0_EL1_PRI_BITS_4BITS: - pri_bits = 4; + gicv5_global_data.cpuif_pri_bits = 4; break; case ICC_IDR0_EL1_PRI_BITS_5BITS: - pri_bits = 5; + gicv5_global_data.cpuif_pri_bits = 5; break; default: pr_err("Unexpected ICC_IDR0_EL1_PRI_BITS value, default to 4"); - pri_bits = 4; + gicv5_global_data.cpuif_pri_bits = 4; break; } } static int __init gicv5_of_init(struct device_node *node, struct device_node *parent) { - int ret = gicv5_init_domains(of_fwnode_handle(node)); + int ret = gicv5_irs_of_probe(node); if (ret) return ret; + ret = gicv5_init_domains(of_fwnode_handle(node)); + if (ret) + goto out_irs; + gicv5_set_cpuif_pribits(); + pri_bits = min_not_zero(gicv5_global_data.cpuif_pri_bits, + gicv5_global_data.irs_pri_bits); + ret = gicv5_starting_cpu(smp_processor_id()); if (ret) goto out_dom; @@ -458,6 +761,8 @@ out_int: gicv5_cpu_disable_interrupts(); out_dom: gicv5_free_domains(); +out_irs: + gicv5_irs_remove(); return ret; } diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h index b08ec0308a9b..1064a69ab33f 100644 --- a/include/linux/irqchip/arm-gic-v5.h +++ b/include/linux/irqchip/arm-gic-v5.h @@ -5,6 +5,8 @@ #ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H #define __LINUX_IRQCHIP_ARM_GIC_V5_H +#include + #include /* @@ -15,5 +17,143 @@ #define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0) #define GICV5_HWIRQ_TYPE_PPI UL(0x1) +#define GICV5_HWIRQ_TYPE_SPI UL(0x3) + +/* + * Tables attributes + */ +#define GICV5_NO_READ_ALLOC 0b0 +#define GICV5_READ_ALLOC 0b1 +#define GICV5_NO_WRITE_ALLOC 0b0 +#define GICV5_WRITE_ALLOC 0b1 + +#define GICV5_NON_CACHE 0b00 +#define GICV5_WB_CACHE 0b01 +#define GICV5_WT_CACHE 0b10 + +#define GICV5_NON_SHARE 0b00 +#define GICV5_OUTER_SHARE 0b10 +#define GICV5_INNER_SHARE 0b11 + +/* + * IRS registers + */ +#define GICV5_IRS_IDR1 0x0004 +#define GICV5_IRS_IDR2 0x0008 +#define GICV5_IRS_IDR5 0x0014 +#define GICV5_IRS_IDR6 0x0018 +#define GICV5_IRS_IDR7 0x001c +#define GICV5_IRS_CR0 0x0080 +#define GICV5_IRS_CR1 0x0084 +#define GICV5_IRS_SPI_SELR 0x0108 +#define GICV5_IRS_SPI_CFGR 0x0114 +#define GICV5_IRS_SPI_STATUSR 0x0118 +#define GICV5_IRS_PE_SELR 0x0140 +#define GICV5_IRS_PE_STATUSR 0x0144 +#define GICV5_IRS_PE_CR0 0x0148 + +#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20) +#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16) + +#define GICV5_IRS_IDR1_PRIORITY_BITS_1BITS 0b000 +#define GICV5_IRS_IDR1_PRIORITY_BITS_2BITS 0b001 +#define GICV5_IRS_IDR1_PRIORITY_BITS_3BITS 0b010 +#define GICV5_IRS_IDR1_PRIORITY_BITS_4BITS 0b011 +#define GICV5_IRS_IDR1_PRIORITY_BITS_5BITS 0b100 + +#define GICV5_IRS_IDR2_ISTMD_SZ GENMASK(19, 15) +#define GICV5_IRS_IDR2_ISTMD BIT(14) +#define GICV5_IRS_IDR2_IST_L2SZ GENMASK(13, 11) +#define GICV5_IRS_IDR2_IST_LEVELS BIT(10) +#define GICV5_IRS_IDR2_MIN_LPI_ID_BITS GENMASK(9, 6) +#define GICV5_IRS_IDR2_LPI BIT(5) +#define GICV5_IRS_IDR2_ID_BITS GENMASK(4, 0) + +#define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0) +#define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0) +#define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0) +#define GICV5_IRS_CR0_IDLE BIT(1) +#define GICV5_IRS_CR0_IRSEN BIT(0) + +#define GICV5_IRS_CR1_VPED_WA BIT(15) +#define GICV5_IRS_CR1_VPED_RA BIT(14) +#define GICV5_IRS_CR1_VMD_WA BIT(13) +#define GICV5_IRS_CR1_VMD_RA BIT(12) +#define GICV5_IRS_CR1_VPET_WA BIT(11) +#define GICV5_IRS_CR1_VPET_RA BIT(10) +#define GICV5_IRS_CR1_VMT_WA BIT(9) +#define GICV5_IRS_CR1_VMT_RA BIT(8) +#define GICV5_IRS_CR1_IST_WA BIT(7) +#define GICV5_IRS_CR1_IST_RA BIT(6) +#define GICV5_IRS_CR1_IC GENMASK(5, 4) +#define GICV5_IRS_CR1_OC GENMASK(3, 2) +#define GICV5_IRS_CR1_SH GENMASK(1, 0) + +#define GICV5_IRS_SPI_STATUSR_V BIT(1) +#define GICV5_IRS_SPI_STATUSR_IDLE BIT(0) + +#define GICV5_IRS_SPI_SELR_ID GENMASK(23, 0) + +#define GICV5_IRS_SPI_CFGR_TM BIT(0) + +#define GICV5_IRS_PE_SELR_IAFFID GENMASK(15, 0) + +#define GICV5_IRS_PE_STATUSR_V BIT(1) +#define GICV5_IRS_PE_STATUSR_IDLE BIT(0) + +#define GICV5_IRS_PE_CR0_DPS BIT(0) + +/* + * Global Data structures and functions + */ +struct gicv5_chip_data { + struct fwnode_handle *fwnode; + struct irq_domain *ppi_domain; + struct irq_domain *spi_domain; + u32 global_spi_count; + u8 cpuif_pri_bits; + u8 irs_pri_bits; +}; + +extern struct gicv5_chip_data gicv5_global_data __read_mostly; + +struct gicv5_irs_chip_data { + struct list_head entry; + struct fwnode_handle *fwnode; + void __iomem *irs_base; + u32 flags; + u32 spi_min; + u32 spi_range; + raw_spinlock_t spi_config_lock; +}; + +static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset, + const char *reg_s, u32 mask, + u32 *val) +{ + void __iomem *reg = addr + offset; + u32 tmp; + int ret; + + ret = readl_poll_timeout_atomic(reg, tmp, tmp & mask, 1, 10 * USEC_PER_MSEC); + if (unlikely(ret == -ETIMEDOUT)) { + pr_err_ratelimited("%s timeout...\n", reg_s); + return ret; + } + + if (val) + *val = tmp; + + return 0; +} + +#define gicv5_wait_for_op_atomic(base, reg, mask, val) \ + gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val) +int gicv5_irs_of_probe(struct device_node *parent); +void gicv5_irs_remove(void); +int gicv5_irs_register_cpu(int cpuid); +int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid); +struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id); +int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type); #endif -- cgit v1.2.3 From 0f0101325876859eb463792d4df3fd22b6539d29 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:12 +0200 Subject: irqchip/gic-v5: Add GICv5 LPI/IPI support An IRS supports Logical Peripheral Interrupts (LPIs) and implement Linux IPIs on top of it. LPIs are used for interrupt signals that are translated by a GICv5 ITS (Interrupt Translation Service) but also for software generated IRQs - namely interrupts that are not driven by a HW signal, ie IPIs. LPIs rely on memory storage for interrupt routing and state. LPIs state and routing information is kept in the Interrupt State Table (IST). IRSes provide support for 1- or 2-level IST tables configured to support a maximum number of interrupts that depend on the OS configuration and the HW capabilities. On systems that provide 2-level IST support, always allow the maximum number of LPIs; On systems with only 1-level support, limit the number of LPIs to 2^12 to prevent wasting memory (presumably a system that supports a 1-level only IST is not expecting a large number of interrupts). On a 2-level IST system, L2 entries are allocated on demand. The IST table memory is allocated using the kmalloc() interface; the allocation required may be smaller than a page and must be made up of contiguous physical pages if larger than a page. On systems where the IRS is not cache-coherent with the CPUs, cache mainteinance operations are executed to clean and invalidate the allocated memory to the point of coherency making it visible to the IRS components. On GICv5 systems, IPIs are implemented using LPIs. Add an LPI IRQ domain and implement an IPI-specific IRQ domain created as a child/subdomain of the LPI domain to allocate the required number of LPIs needed to implement the IPIs. IPIs are backed by LPIs, add LPIs allocation/de-allocation functions. The LPI INTID namespace is managed using an IDA to alloc/free LPI INTIDs. Associate an IPI irqchip with IPI IRQ descriptors to provide core code with the irqchip.ipi_send_single() method required to raise an IPI. Co-developed-by: Sascha Bischoff Signed-off-by: Sascha Bischoff Co-developed-by: Timothy Hayes Signed-off-by: Timothy Hayes Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Will Deacon Cc: Thomas Gleixner Cc: Catalin Marinas Cc: Marc Zyngier Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-22-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/smp.h | 17 ++ arch/arm64/include/asm/sysreg.h | 6 + arch/arm64/kernel/smp.c | 17 -- drivers/irqchip/irq-gic-v5-irs.c | 364 +++++++++++++++++++++++++++++++++++++ drivers/irqchip/irq-gic-v5.c | 299 +++++++++++++++++++++++++++++- include/linux/irqchip/arm-gic-v5.h | 63 ++++++- 6 files changed, 746 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index d6fd6efb66a6..d48ef6d5abcc 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -50,6 +50,23 @@ struct seq_file; */ extern void smp_init_cpus(void); +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CPU_STOP, + IPI_CPU_STOP_NMI, + IPI_TIMER, + IPI_IRQ_WORK, + NR_IPI, + /* + * Any enum >= NR_IPI and < MAX_IPI is special and not tracable + * with trace_ipi_* + */ + IPI_CPU_BACKTRACE = NR_IPI, + IPI_KGDB_ROUNDUP, + MAX_IPI +}; + /* * Register IPI interrupts with the arch SMP code */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index efd2e7a1fbe2..948007cd3684 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1088,6 +1088,7 @@ #define GICV5_OP_GIC_CDAFF sys_insn(1, 0, 12, 1, 3) #define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0) #define GICV5_OP_GIC_CDDIS sys_insn(1, 0, 12, 1, 0) +#define GICV5_OP_GIC_CDHM sys_insn(1, 0, 12, 2, 1) #define GICV5_OP_GIC_CDEN sys_insn(1, 0, 12, 1, 1) #define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7) #define GICV5_OP_GIC_CDPEND sys_insn(1, 0, 12, 1, 4) @@ -1115,6 +1116,11 @@ #define GICV5_GIC_CDEN_TYPE_MASK GENMASK_ULL(31, 29) #define GICV5_GIC_CDEN_ID_MASK GENMASK_ULL(23, 0) +/* Definitions for GIC CDHM */ +#define GICV5_GIC_CDHM_HM_MASK BIT_ULL(32) +#define GICV5_GIC_CDHM_TYPE_MASK GENMASK_ULL(31, 29) +#define GICV5_GIC_CDHM_ID_MASK GENMASK_ULL(23, 0) + /* Definitions for GIC CDPEND */ #define GICV5_GIC_CDPEND_PENDING_MASK BIT_ULL(32) #define GICV5_GIC_CDPEND_TYPE_MASK GENMASK_ULL(31, 29) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 2c501e917d38..4797e2c70014 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -64,23 +64,6 @@ struct secondary_data secondary_data; /* Number of CPUs which aren't online, but looping in kernel text. */ static int cpus_stuck_in_kernel; -enum ipi_msg_type { - IPI_RESCHEDULE, - IPI_CALL_FUNC, - IPI_CPU_STOP, - IPI_CPU_STOP_NMI, - IPI_TIMER, - IPI_IRQ_WORK, - NR_IPI, - /* - * Any enum >= NR_IPI and < MAX_IPI is special and not tracable - * with trace_ipi_* - */ - IPI_CPU_BACKTRACE = NR_IPI, - IPI_KGDB_ROUNDUP, - MAX_IPI -}; - static int ipi_irq_base __ro_after_init; static int nr_ipi __ro_after_init = NR_IPI; diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c index fba8efceb26e..f00a4a6fece7 100644 --- a/drivers/irqchip/irq-gic-v5-irs.c +++ b/drivers/irqchip/irq-gic-v5-irs.c @@ -5,12 +5,20 @@ #define pr_fmt(fmt) "GICv5 IRS: " fmt +#include #include #include #include #include +/* + * Hardcoded ID_BITS limit for systems supporting only a 1-level IST + * table. Systems supporting only a 1-level IST table aren't expected + * to require more than 2^12 LPIs. Tweak as required. + */ +#define LPI_ID_BITS_LINEAR 12 + #define IRS_FLAGS_NON_COHERENT BIT(0) static DEFINE_PER_CPU_READ_MOSTLY(struct gicv5_irs_chip_data *, per_cpu_irs_data); @@ -28,6 +36,331 @@ static void irs_writel_relaxed(struct gicv5_irs_chip_data *irs_data, writel_relaxed(val, irs_data->irs_base + reg_offset); } +static u64 irs_readq_relaxed(struct gicv5_irs_chip_data *irs_data, + const u32 reg_offset) +{ + return readq_relaxed(irs_data->irs_base + reg_offset); +} + +static void irs_writeq_relaxed(struct gicv5_irs_chip_data *irs_data, + const u64 val, const u32 reg_offset) +{ + writeq_relaxed(val, irs_data->irs_base + reg_offset); +} + +/* + * The polling wait (in gicv5_wait_for_op_s_atomic()) on a GIC register + * provides the memory barriers (through MMIO accessors) + * required to synchronize CPU and GIC access to IST memory. + */ +static int gicv5_irs_ist_synchronise(struct gicv5_irs_chip_data *irs_data) +{ + return gicv5_wait_for_op_atomic(irs_data->irs_base, GICV5_IRS_IST_STATUSR, + GICV5_IRS_IST_STATUSR_IDLE, NULL); +} + +static int __init gicv5_irs_init_ist_linear(struct gicv5_irs_chip_data *irs_data, + unsigned int lpi_id_bits, + unsigned int istsz) +{ + size_t l2istsz; + u32 n, cfgr; + void *ist; + u64 baser; + int ret; + + /* Taken from GICv5 specifications 10.2.1.13 IRS_IST_BASER */ + n = max(5, lpi_id_bits + 1 + istsz); + + l2istsz = BIT(n + 1); + /* + * Check memory requirements. For a linear IST we cap the + * number of ID bits to a value that should never exceed + * kmalloc interface memory allocation limits, so this + * check is really belt and braces. + */ + if (l2istsz > KMALLOC_MAX_SIZE) { + u8 lpi_id_cap = ilog2(KMALLOC_MAX_SIZE) - 2 + istsz; + + pr_warn("Limiting LPI ID bits from %u to %u\n", + lpi_id_bits, lpi_id_cap); + lpi_id_bits = lpi_id_cap; + l2istsz = KMALLOC_MAX_SIZE; + } + + ist = kzalloc(l2istsz, GFP_KERNEL); + if (!ist) + return -ENOMEM; + + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) + dcache_clean_inval_poc((unsigned long)ist, + (unsigned long)ist + l2istsz); + else + dsb(ishst); + + cfgr = FIELD_PREP(GICV5_IRS_IST_CFGR_STRUCTURE, + GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR) | + FIELD_PREP(GICV5_IRS_IST_CFGR_ISTSZ, istsz) | + FIELD_PREP(GICV5_IRS_IST_CFGR_L2SZ, + GICV5_IRS_IST_CFGR_L2SZ_4K) | + FIELD_PREP(GICV5_IRS_IST_CFGR_LPI_ID_BITS, lpi_id_bits); + irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_IST_CFGR); + + gicv5_global_data.ist.l2 = false; + + baser = (virt_to_phys(ist) & GICV5_IRS_IST_BASER_ADDR_MASK) | + FIELD_PREP(GICV5_IRS_IST_BASER_VALID, 0x1); + irs_writeq_relaxed(irs_data, baser, GICV5_IRS_IST_BASER); + + ret = gicv5_irs_ist_synchronise(irs_data); + if (ret) { + kfree(ist); + return ret; + } + + return 0; +} + +static int __init gicv5_irs_init_ist_two_level(struct gicv5_irs_chip_data *irs_data, + unsigned int lpi_id_bits, + unsigned int istsz, + unsigned int l2sz) +{ + __le64 *l1ist; + u32 cfgr, n; + size_t l1sz; + u64 baser; + int ret; + + /* Taken from GICv5 specifications 10.2.1.13 IRS_IST_BASER */ + n = max(5, lpi_id_bits - ((10 - istsz) + (2 * l2sz)) + 2); + + l1sz = BIT(n + 1); + + l1ist = kzalloc(l1sz, GFP_KERNEL); + if (!l1ist) + return -ENOMEM; + + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) + dcache_clean_inval_poc((unsigned long)l1ist, + (unsigned long)l1ist + l1sz); + else + dsb(ishst); + + cfgr = FIELD_PREP(GICV5_IRS_IST_CFGR_STRUCTURE, + GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL) | + FIELD_PREP(GICV5_IRS_IST_CFGR_ISTSZ, istsz) | + FIELD_PREP(GICV5_IRS_IST_CFGR_L2SZ, l2sz) | + FIELD_PREP(GICV5_IRS_IST_CFGR_LPI_ID_BITS, lpi_id_bits); + irs_writel_relaxed(irs_data, cfgr, GICV5_IRS_IST_CFGR); + + /* + * The L2SZ determine bits required at L2 level. Number of bytes + * required by metadata is reported through istsz - the number of bits + * covered by L2 entries scales accordingly. + */ + gicv5_global_data.ist.l2_size = BIT(11 + (2 * l2sz) + 1); + gicv5_global_data.ist.l2_bits = (10 - istsz) + (2 * l2sz); + gicv5_global_data.ist.l1ist_addr = l1ist; + gicv5_global_data.ist.l2 = true; + + baser = (virt_to_phys(l1ist) & GICV5_IRS_IST_BASER_ADDR_MASK) | + FIELD_PREP(GICV5_IRS_IST_BASER_VALID, 0x1); + irs_writeq_relaxed(irs_data, baser, GICV5_IRS_IST_BASER); + + ret = gicv5_irs_ist_synchronise(irs_data); + if (ret) { + kfree(l1ist); + return ret; + } + + return 0; +} + +/* + * Alloc L2 IST entries on demand. + * + * Locking/serialization is guaranteed by irqdomain core code by + * taking the hierarchical domain struct irq_domain.root->mutex. + */ +int gicv5_irs_iste_alloc(const u32 lpi) +{ + struct gicv5_irs_chip_data *irs_data; + unsigned int index; + u32 l2istr, l2bits; + __le64 *l1ist; + size_t l2size; + void *l2ist; + int ret; + + if (!gicv5_global_data.ist.l2) + return 0; + + irs_data = per_cpu(per_cpu_irs_data, smp_processor_id()); + if (!irs_data) + return -ENOENT; + + l2size = gicv5_global_data.ist.l2_size; + l2bits = gicv5_global_data.ist.l2_bits; + l1ist = gicv5_global_data.ist.l1ist_addr; + index = lpi >> l2bits; + + if (FIELD_GET(GICV5_ISTL1E_VALID, le64_to_cpu(l1ist[index]))) + return 0; + + l2ist = kzalloc(l2size, GFP_KERNEL); + if (!l2ist) + return -ENOMEM; + + l1ist[index] = cpu_to_le64(virt_to_phys(l2ist) & GICV5_ISTL1E_L2_ADDR_MASK); + + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) { + dcache_clean_inval_poc((unsigned long)l2ist, + (unsigned long)l2ist + l2size); + dcache_clean_poc((unsigned long)(l1ist + index), + (unsigned long)(l1ist + index) + sizeof(*l1ist)); + } else { + dsb(ishst); + } + + l2istr = FIELD_PREP(GICV5_IRS_MAP_L2_ISTR_ID, lpi); + irs_writel_relaxed(irs_data, l2istr, GICV5_IRS_MAP_L2_ISTR); + + ret = gicv5_irs_ist_synchronise(irs_data); + if (ret) { + l1ist[index] = 0; + kfree(l2ist); + return ret; + } + + /* + * Make sure we invalidate the cache line pulled before the IRS + * had a chance to update the L1 entry and mark it valid. + */ + if (irs_data->flags & IRS_FLAGS_NON_COHERENT) { + /* + * gicv5_irs_ist_synchronise() includes memory + * barriers (MMIO accessors) required to guarantee that the + * following dcache invalidation is not executed before the + * IST mapping operation has completed. + */ + dcache_inval_poc((unsigned long)(l1ist + index), + (unsigned long)(l1ist + index) + sizeof(*l1ist)); + } + + return 0; +} + +/* + * Try to match the L2 IST size to the pagesize, and if this is not possible + * pick the smallest supported L2 size in order to minimise the requirement for + * physically contiguous blocks of memory as page-sized allocations are + * guaranteed to be physically contiguous, and are by definition the easiest to + * find. + * + * Fall back to the smallest supported size (in the event that the pagesize + * itself is not supported) again serves to make it easier to find physically + * contiguous blocks of memory. + */ +static unsigned int gicv5_irs_l2_sz(u32 idr2) +{ + switch (PAGE_SIZE) { + case SZ_64K: + if (GICV5_IRS_IST_L2SZ_SUPPORT_64KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_64K; + fallthrough; + case SZ_4K: + if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_4K; + fallthrough; + case SZ_16K: + if (GICV5_IRS_IST_L2SZ_SUPPORT_16KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_16K; + break; + } + + if (GICV5_IRS_IST_L2SZ_SUPPORT_4KB(idr2)) + return GICV5_IRS_IST_CFGR_L2SZ_4K; + + return GICV5_IRS_IST_CFGR_L2SZ_64K; +} + +static int __init gicv5_irs_init_ist(struct gicv5_irs_chip_data *irs_data) +{ + u32 lpi_id_bits, idr2_id_bits, idr2_min_lpi_id_bits, l2_iste_sz, l2sz; + u32 l2_iste_sz_split, idr2; + bool two_levels, istmd; + u64 baser; + int ret; + + baser = irs_readq_relaxed(irs_data, GICV5_IRS_IST_BASER); + if (FIELD_GET(GICV5_IRS_IST_BASER_VALID, baser)) { + pr_err("IST is marked as valid already; cannot allocate\n"); + return -EPERM; + } + + idr2 = irs_readl_relaxed(irs_data, GICV5_IRS_IDR2); + two_levels = !!FIELD_GET(GICV5_IRS_IDR2_IST_LEVELS, idr2); + + idr2_id_bits = FIELD_GET(GICV5_IRS_IDR2_ID_BITS, idr2); + idr2_min_lpi_id_bits = FIELD_GET(GICV5_IRS_IDR2_MIN_LPI_ID_BITS, idr2); + + /* + * For two level tables we are always supporting the maximum allowed + * number of IDs. + * + * For 1-level tables, we should support a number of bits that + * is >= min_lpi_id_bits but cap it to LPI_ID_BITS_LINEAR lest + * the level 1-table gets too large and its memory allocation + * may fail. + */ + if (two_levels) { + lpi_id_bits = idr2_id_bits; + } else { + lpi_id_bits = max(LPI_ID_BITS_LINEAR, idr2_min_lpi_id_bits); + lpi_id_bits = min(lpi_id_bits, idr2_id_bits); + } + + /* + * Cap the ID bits according to the CPUIF supported ID bits + */ + lpi_id_bits = min(lpi_id_bits, gicv5_global_data.cpuif_id_bits); + + if (two_levels) + l2sz = gicv5_irs_l2_sz(idr2); + + istmd = !!FIELD_GET(GICV5_IRS_IDR2_ISTMD, idr2); + + l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_4; + + if (istmd) { + l2_iste_sz_split = FIELD_GET(GICV5_IRS_IDR2_ISTMD_SZ, idr2); + + if (lpi_id_bits < l2_iste_sz_split) + l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_8; + else + l2_iste_sz = GICV5_IRS_IST_CFGR_ISTSZ_16; + } + + /* + * Follow GICv5 specification recommendation to opt in for two + * level tables (ref: 10.2.1.14 IRS_IST_CFGR). + */ + if (two_levels && (lpi_id_bits > ((10 - l2_iste_sz) + (2 * l2sz)))) { + ret = gicv5_irs_init_ist_two_level(irs_data, lpi_id_bits, + l2_iste_sz, l2sz); + } else { + ret = gicv5_irs_init_ist_linear(irs_data, lpi_id_bits, + l2_iste_sz); + } + if (ret) + return ret; + + gicv5_init_lpis(BIT(lpi_id_bits)); + + return 0; +} + struct iaffid_entry { u16 iaffid; bool valid; @@ -362,6 +695,13 @@ static int __init gicv5_irs_init(struct device_node *node) goto out_iomem; } + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR2); + if (WARN(!FIELD_GET(GICV5_IRS_IDR2_LPI, idr), + "LPI support not available - no IPIs, can't proceed\n")) { + ret = -ENODEV; + goto out_iomem; + } + idr = irs_readl_relaxed(irs_data, GICV5_IRS_IDR7); irs_data->spi_min = FIELD_GET(GICV5_IRS_IDR7_SPI_BASE, idr); @@ -391,6 +731,8 @@ static int __init gicv5_irs_init(struct device_node *node) spi_count = FIELD_GET(GICV5_IRS_IDR5_SPI_RANGE, idr); gicv5_global_data.global_spi_count = spi_count; + gicv5_init_lpi_domain(); + pr_debug("Detected %u SPIs globally\n", spi_count); } @@ -409,6 +751,9 @@ void __init gicv5_irs_remove(void) { struct gicv5_irs_chip_data *irs_data, *tmp_data; + gicv5_free_lpi_domain(); + gicv5_deinit_lpis(); + list_for_each_entry_safe(irs_data, tmp_data, &irs_nodes, entry) { iounmap(irs_data->irs_base); list_del(&irs_data->entry); @@ -416,6 +761,25 @@ void __init gicv5_irs_remove(void) } } +int __init gicv5_irs_enable(void) +{ + struct gicv5_irs_chip_data *irs_data; + int ret; + + irs_data = list_first_entry_or_null(&irs_nodes, + struct gicv5_irs_chip_data, entry); + if (!irs_data) + return -ENODEV; + + ret = gicv5_irs_init_ist(irs_data); + if (ret) { + pr_err("Failed to init IST\n"); + return ret; + } + + return 0; +} + int __init gicv5_irs_of_probe(struct device_node *parent) { struct device_node *np; diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c index 9c55ddcfa0df..84ed13c4f2b1 100644 --- a/drivers/irqchip/irq-gic-v5.c +++ b/drivers/irqchip/irq-gic-v5.c @@ -5,7 +5,9 @@ #define pr_fmt(fmt) "GICv5: " fmt +#include #include +#include #include #include @@ -28,6 +30,42 @@ static bool gicv5_cpuif_has_gcie(void) struct gicv5_chip_data gicv5_global_data __read_mostly; +static DEFINE_IDA(lpi_ida); +static u32 num_lpis __ro_after_init; + +void __init gicv5_init_lpis(u32 lpis) +{ + num_lpis = lpis; +} + +void __init gicv5_deinit_lpis(void) +{ + num_lpis = 0; +} + +static int alloc_lpi(void) +{ + if (!num_lpis) + return -ENOSPC; + + return ida_alloc_max(&lpi_ida, num_lpis - 1, GFP_KERNEL); +} + +static void release_lpi(u32 lpi) +{ + ida_free(&lpi_ida, lpi); +} + +int gicv5_alloc_lpi(void) +{ + return alloc_lpi(); +} + +void gicv5_free_lpi(u32 lpi) +{ + release_lpi(lpi); +} + static void gicv5_ppi_priority_init(void) { write_sysreg_s(REPEAT_BYTE(GICV5_IRQ_PRI_MI), SYS_ICC_PPI_PRIORITYR0_EL1); @@ -60,7 +98,7 @@ static void gicv5_hwirq_init(irq_hw_number_t hwirq, u8 priority, u8 hwirq_type) u16 iaffid; int ret; - if (hwirq_type == GICV5_HWIRQ_TYPE_SPI) { + if (hwirq_type == GICV5_HWIRQ_TYPE_LPI || hwirq_type == GICV5_HWIRQ_TYPE_SPI) { cdpri = FIELD_PREP(GICV5_GIC_CDPRI_PRIORITY_MASK, priority) | FIELD_PREP(GICV5_GIC_CDPRI_TYPE_MASK, hwirq_type) | FIELD_PREP(GICV5_GIC_CDPRI_ID_MASK, hwirq); @@ -122,6 +160,11 @@ static void gicv5_spi_irq_mask(struct irq_data *d) gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_SPI); } +static void gicv5_lpi_irq_mask(struct irq_data *d) +{ + gicv5_iri_irq_mask(d, GICV5_HWIRQ_TYPE_LPI); +} + static void gicv5_ppi_irq_unmask(struct irq_data *d) { u64 hwirq_id_bit = BIT_ULL(d->hwirq % 64); @@ -149,7 +192,7 @@ static void gicv5_iri_irq_unmask(struct irq_data *d, u8 hwirq_type) /* * Rule R_XCLJC states that the effects of a GIC system instruction * complete in finite time and that's the only requirement when - * unmasking an SPI IRQ. + * unmasking an SPI/LPI IRQ. */ gic_insn(cden, CDEN); } @@ -159,6 +202,11 @@ static void gicv5_spi_irq_unmask(struct irq_data *d) gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_SPI); } +static void gicv5_lpi_irq_unmask(struct irq_data *d) +{ + gicv5_iri_irq_unmask(d, GICV5_HWIRQ_TYPE_LPI); +} + static void gicv5_hwirq_eoi(u32 hwirq_id, u8 hwirq_type) { u64 cddi; @@ -181,6 +229,11 @@ static void gicv5_spi_irq_eoi(struct irq_data *d) gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_SPI); } +static void gicv5_lpi_irq_eoi(struct irq_data *d) +{ + gicv5_hwirq_eoi(d->hwirq, GICV5_HWIRQ_TYPE_LPI); +} + static int gicv5_iri_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force, u8 hwirq_type) @@ -216,6 +269,14 @@ static int gicv5_spi_irq_set_affinity(struct irq_data *d, GICV5_HWIRQ_TYPE_SPI); } +static int gicv5_lpi_irq_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + return gicv5_iri_irq_set_affinity(d, mask_val, force, + GICV5_HWIRQ_TYPE_LPI); +} + enum ppi_reg { PPI_PENDING, PPI_ACTIVE, @@ -336,6 +397,14 @@ static int gicv5_spi_irq_get_irqchip_state(struct irq_data *d, GICV5_HWIRQ_TYPE_SPI); } +static int gicv5_lpi_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + return gicv5_iri_irq_get_irqchip_state(d, which, state, + GICV5_HWIRQ_TYPE_LPI); +} + static int gicv5_ppi_irq_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool state) @@ -370,6 +439,11 @@ static void gicv5_spi_irq_write_pending_state(struct irq_data *d, bool state) gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_SPI); } +static void gicv5_lpi_irq_write_pending_state(struct irq_data *d, bool state) +{ + gicv5_iri_irq_write_pending_state(d, state, GICV5_HWIRQ_TYPE_LPI); +} + static int gicv5_spi_irq_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool state) @@ -386,12 +460,41 @@ static int gicv5_spi_irq_set_irqchip_state(struct irq_data *d, return 0; } +static int gicv5_lpi_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + switch (which) { + case IRQCHIP_STATE_PENDING: + gicv5_lpi_irq_write_pending_state(d, state); + break; + + default: + pr_debug("Unexpected irqchip_irq_state\n"); + return -EINVAL; + } + + return 0; +} + static int gicv5_spi_irq_retrigger(struct irq_data *data) { return !gicv5_spi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); } +static int gicv5_lpi_irq_retrigger(struct irq_data *data) +{ + return !gicv5_lpi_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, + true); +} + +static void gicv5_ipi_send_single(struct irq_data *d, unsigned int cpu) +{ + /* Mark the LPI pending */ + irq_chip_retrigger_hierarchy(d); +} + static bool gicv5_ppi_irq_is_level(irq_hw_number_t hwirq) { u64 bit = BIT_ULL(hwirq % 64); @@ -425,6 +528,32 @@ static const struct irq_chip gicv5_spi_irq_chip = { IRQCHIP_MASK_ON_SUSPEND, }; +static const struct irq_chip gicv5_lpi_irq_chip = { + .name = "GICv5-LPI", + .irq_mask = gicv5_lpi_irq_mask, + .irq_unmask = gicv5_lpi_irq_unmask, + .irq_eoi = gicv5_lpi_irq_eoi, + .irq_set_affinity = gicv5_lpi_irq_set_affinity, + .irq_retrigger = gicv5_lpi_irq_retrigger, + .irq_get_irqchip_state = gicv5_lpi_irq_get_irqchip_state, + .irq_set_irqchip_state = gicv5_lpi_irq_set_irqchip_state, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static const struct irq_chip gicv5_ipi_irq_chip = { + .name = "GICv5-IPI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .ipi_send_single = gicv5_ipi_send_single, + .flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + static __always_inline int gicv5_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, irq_hw_number_t *hwirq, @@ -585,6 +714,130 @@ static const struct irq_domain_ops gicv5_irq_spi_domain_ops = { .free = gicv5_irq_domain_free, .select = gicv5_irq_spi_domain_select }; + +static void gicv5_lpi_config_reset(struct irq_data *d) +{ + u64 cdhm; + + /* + * Reset LPIs handling mode to edge by default and clear pending + * state to make sure we start the LPI with a clean state from + * previous incarnations. + */ + cdhm = FIELD_PREP(GICV5_GIC_CDHM_HM_MASK, 0) | + FIELD_PREP(GICV5_GIC_CDHM_TYPE_MASK, GICV5_HWIRQ_TYPE_LPI) | + FIELD_PREP(GICV5_GIC_CDHM_ID_MASK, d->hwirq); + gic_insn(cdhm, CDHM); + + gicv5_lpi_irq_write_pending_state(d, false); +} + +static int gicv5_irq_lpi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + irq_hw_number_t hwirq; + struct irq_data *irqd; + u32 *lpi = arg; + int ret; + + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; + + hwirq = *lpi; + + irqd = irq_domain_get_irq_data(domain, virq); + + irq_domain_set_info(domain, virq, hwirq, &gicv5_lpi_irq_chip, NULL, + handle_fasteoi_irq, NULL, NULL); + irqd_set_single_target(irqd); + + ret = gicv5_irs_iste_alloc(hwirq); + if (ret < 0) + return ret; + + gicv5_hwirq_init(hwirq, GICV5_IRQ_PRI_MI, GICV5_HWIRQ_TYPE_LPI); + gicv5_lpi_config_reset(irqd); + + return 0; +} + +static const struct irq_domain_ops gicv5_irq_lpi_domain_ops = { + .alloc = gicv5_irq_lpi_domain_alloc, + .free = gicv5_irq_domain_free, +}; + +void __init gicv5_init_lpi_domain(void) +{ + struct irq_domain *d; + + d = irq_domain_create_tree(NULL, &gicv5_irq_lpi_domain_ops, NULL); + gicv5_global_data.lpi_domain = d; +} + +void __init gicv5_free_lpi_domain(void) +{ + irq_domain_remove(gicv5_global_data.lpi_domain); + gicv5_global_data.lpi_domain = NULL; +} + +static int gicv5_irq_ipi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct irq_data *irqd; + int ret, i; + u32 lpi; + + for (i = 0; i < nr_irqs; i++) { + ret = gicv5_alloc_lpi(); + if (ret < 0) + return ret; + + lpi = ret; + + ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi); + if (ret) { + gicv5_free_lpi(lpi); + return ret; + } + + irqd = irq_domain_get_irq_data(domain, virq + i); + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &gicv5_ipi_irq_chip, NULL); + + irqd_set_single_target(irqd); + + irq_set_handler(virq + i, handle_percpu_irq); + } + + return 0; +} + +static void gicv5_irq_ipi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d; + unsigned int i; + + for (i = 0; i < nr_irqs; i++) { + d = irq_domain_get_irq_data(domain, virq + i); + + if (!d) + return; + + gicv5_free_lpi(d->parent_data->hwirq); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + irq_domain_free_irqs_parent(domain, virq + i, 1); + } +} + +static const struct irq_domain_ops gicv5_irq_ipi_domain_ops = { + .alloc = gicv5_irq_ipi_domain_alloc, + .free = gicv5_irq_ipi_domain_free, +}; + static void handle_irq_per_domain(u32 hwirq) { u8 hwirq_type = FIELD_GET(GICV5_HWIRQ_TYPE, hwirq); @@ -598,6 +851,9 @@ static void handle_irq_per_domain(u32 hwirq) case GICV5_HWIRQ_TYPE_SPI: domain = gicv5_global_data.spi_domain; break; + case GICV5_HWIRQ_TYPE_LPI: + domain = gicv5_global_data.lpi_domain; + break; default: pr_err_once("Unknown IRQ type, bail out\n"); return; @@ -679,9 +935,12 @@ static void __init gicv5_free_domains(void) irq_domain_remove(gicv5_global_data.ppi_domain); if (gicv5_global_data.spi_domain) irq_domain_remove(gicv5_global_data.spi_domain); + if (gicv5_global_data.ipi_domain) + irq_domain_remove(gicv5_global_data.ipi_domain); gicv5_global_data.ppi_domain = NULL; gicv5_global_data.spi_domain = NULL; + gicv5_global_data.ipi_domain = NULL; } static int __init gicv5_init_domains(struct fwnode_handle *handle) @@ -709,6 +968,19 @@ static int __init gicv5_init_domains(struct fwnode_handle *handle) irq_domain_update_bus_token(d, DOMAIN_BUS_WIRED); } + if (!WARN(!gicv5_global_data.lpi_domain, + "LPI domain uninitialized, can't set up IPIs")) { + d = irq_domain_create_hierarchy(gicv5_global_data.lpi_domain, + 0, GICV5_IPIS_PER_CPU * nr_cpu_ids, + NULL, &gicv5_irq_ipi_domain_ops, + NULL); + + if (!d) { + gicv5_free_domains(); + return -ENOMEM; + } + gicv5_global_data.ipi_domain = d; + } gicv5_global_data.fwnode = handle; return 0; @@ -732,6 +1004,24 @@ static void gicv5_set_cpuif_pribits(void) } } +static void gicv5_set_cpuif_idbits(void) +{ + u32 icc_idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1); + + switch (FIELD_GET(ICC_IDR0_EL1_ID_BITS, icc_idr0)) { + case ICC_IDR0_EL1_ID_BITS_16BITS: + gicv5_global_data.cpuif_id_bits = 16; + break; + case ICC_IDR0_EL1_ID_BITS_24BITS: + gicv5_global_data.cpuif_id_bits = 24; + break; + default: + pr_err("Unexpected ICC_IDR0_EL1_ID_BITS value, default to 16"); + gicv5_global_data.cpuif_id_bits = 16; + break; + } +} + static int __init gicv5_of_init(struct device_node *node, struct device_node *parent) { int ret = gicv5_irs_of_probe(node); @@ -743,6 +1033,7 @@ static int __init gicv5_of_init(struct device_node *node, struct device_node *pa goto out_irs; gicv5_set_cpuif_pribits(); + gicv5_set_cpuif_idbits(); pri_bits = min_not_zero(gicv5_global_data.cpuif_pri_bits, gicv5_global_data.irs_pri_bits); @@ -755,6 +1046,10 @@ static int __init gicv5_of_init(struct device_node *node, struct device_node *pa if (ret) goto out_int; + ret = gicv5_irs_enable(); + if (ret) + goto out_int; + return 0; out_int: diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h index 1064a69ab33f..680eed794a35 100644 --- a/include/linux/irqchip/arm-gic-v5.h +++ b/include/linux/irqchip/arm-gic-v5.h @@ -7,8 +7,12 @@ #include +#include +#include #include +#define GICV5_IPIS_PER_CPU MAX_IPI + /* * INTID handling */ @@ -17,6 +21,7 @@ #define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0) #define GICV5_HWIRQ_TYPE_PPI UL(0x1) +#define GICV5_HWIRQ_TYPE_LPI UL(0x2) #define GICV5_HWIRQ_TYPE_SPI UL(0x3) /* @@ -36,7 +41,7 @@ #define GICV5_INNER_SHARE 0b11 /* - * IRS registers + * IRS registers and tables structures */ #define GICV5_IRS_IDR1 0x0004 #define GICV5_IRS_IDR2 0x0008 @@ -51,6 +56,10 @@ #define GICV5_IRS_PE_SELR 0x0140 #define GICV5_IRS_PE_STATUSR 0x0144 #define GICV5_IRS_PE_CR0 0x0148 +#define GICV5_IRS_IST_BASER 0x0180 +#define GICV5_IRS_IST_CFGR 0x0190 +#define GICV5_IRS_IST_STATUSR 0x0194 +#define GICV5_IRS_MAP_L2_ISTR 0x01c0 #define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20) #define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16) @@ -72,6 +81,11 @@ #define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0) #define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0) #define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0) + +#define GICV5_IRS_IST_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(11), (r)) +#define GICV5_IRS_IST_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(12), (r)) +#define GICV5_IRS_IST_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(13), (r)) + #define GICV5_IRS_CR0_IDLE BIT(1) #define GICV5_IRS_CR0_IRSEN BIT(0) @@ -103,6 +117,33 @@ #define GICV5_IRS_PE_CR0_DPS BIT(0) +#define GICV5_IRS_IST_STATUSR_IDLE BIT(0) + +#define GICV5_IRS_IST_CFGR_STRUCTURE BIT(16) +#define GICV5_IRS_IST_CFGR_ISTSZ GENMASK(8, 7) +#define GICV5_IRS_IST_CFGR_L2SZ GENMASK(6, 5) +#define GICV5_IRS_IST_CFGR_LPI_ID_BITS GENMASK(4, 0) + +#define GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR 0b0 +#define GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL 0b1 + +#define GICV5_IRS_IST_CFGR_ISTSZ_4 0b00 +#define GICV5_IRS_IST_CFGR_ISTSZ_8 0b01 +#define GICV5_IRS_IST_CFGR_ISTSZ_16 0b10 + +#define GICV5_IRS_IST_CFGR_L2SZ_4K 0b00 +#define GICV5_IRS_IST_CFGR_L2SZ_16K 0b01 +#define GICV5_IRS_IST_CFGR_L2SZ_64K 0b10 + +#define GICV5_IRS_IST_BASER_ADDR_MASK GENMASK_ULL(55, 6) +#define GICV5_IRS_IST_BASER_VALID BIT_ULL(0) + +#define GICV5_IRS_MAP_L2_ISTR_ID GENMASK(23, 0) + +#define GICV5_ISTL1E_VALID BIT_ULL(0) + +#define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12) + /* * Global Data structures and functions */ @@ -110,9 +151,18 @@ struct gicv5_chip_data { struct fwnode_handle *fwnode; struct irq_domain *ppi_domain; struct irq_domain *spi_domain; + struct irq_domain *lpi_domain; + struct irq_domain *ipi_domain; u32 global_spi_count; u8 cpuif_pri_bits; + u8 cpuif_id_bits; u8 irs_pri_bits; + struct { + __le64 *l1ist_addr; + u32 l2_size; + u8 l2_bits; + bool l2; + } ist; }; extern struct gicv5_chip_data gicv5_global_data __read_mostly; @@ -150,10 +200,21 @@ static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset, #define gicv5_wait_for_op_atomic(base, reg, mask, val) \ gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val) +void __init gicv5_init_lpi_domain(void); +void __init gicv5_free_lpi_domain(void); + int gicv5_irs_of_probe(struct device_node *parent); void gicv5_irs_remove(void); +int gicv5_irs_enable(void); int gicv5_irs_register_cpu(int cpuid); int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid); struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id); int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type); +int gicv5_irs_iste_alloc(u32 lpi); + +void gicv5_init_lpis(u32 max); +void gicv5_deinit_lpis(void); + +int gicv5_alloc_lpi(void); +void gicv5_free_lpi(u32 lpi); #endif -- cgit v1.2.3 From 31fd3becb920e0b31b99cf202ace637f75dd7e78 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:14 +0200 Subject: of/irq: Add of_msi_xlate() helper function Add an of_msi_xlate() helper that maps a device ID and returns the device node of the MSI controller the device ID is mapped to. Required by core functions that need an MSI controller device node pointer at the same time as a mapped device ID, of_msi_map_id() is not sufficient for that purpose. Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Rob Herring Cc: Marc Zyngier Reviewed-by: Rob Herring (Arm) Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-24-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- drivers/of/irq.c | 22 +++++++++++++++++----- include/linux/of_irq.h | 5 +++++ 2 files changed, 22 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/irq.c b/drivers/of/irq.c index f8ad79b9b1c9..74aaea61de13 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -670,8 +670,20 @@ err: } } -static u32 __of_msi_map_id(struct device *dev, struct device_node **np, - u32 id_in) +/** + * of_msi_xlate - map a MSI ID and find relevant MSI controller node + * @dev: device for which the mapping is to be done. + * @msi_np: Pointer to store the MSI controller node + * @id_in: Device ID. + * + * Walk up the device hierarchy looking for devices with a "msi-map" + * property. If found, apply the mapping to @id_in. @msi_np pointed + * value must be NULL on entry, if an MSI controller is found @msi_np is + * initialized to the MSI controller node with a reference held. + * + * Returns: The mapped MSI id. + */ +u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in) { struct device *parent_dev; u32 id_out = id_in; @@ -682,7 +694,7 @@ static u32 __of_msi_map_id(struct device *dev, struct device_node **np, */ for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) if (!of_map_id(parent_dev->of_node, id_in, "msi-map", - "msi-map-mask", np, &id_out)) + "msi-map-mask", msi_np, &id_out)) break; return id_out; } @@ -700,7 +712,7 @@ static u32 __of_msi_map_id(struct device *dev, struct device_node **np, */ u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in) { - return __of_msi_map_id(dev, &msi_np, id_in); + return of_msi_xlate(dev, &msi_np, id_in); } /** @@ -719,7 +731,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, { struct device_node *np = NULL; - __of_msi_map_id(dev, &np, id); + of_msi_xlate(dev, &np, id); return irq_find_matching_host(np, bus_token); } diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 6337ad4e5fe8..a480063c9cb1 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -54,6 +54,7 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, u32 bus_token); extern void of_msi_configure(struct device *dev, const struct device_node *np); +extern u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in); u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else static inline void of_irq_init(const struct of_device_id *matches) @@ -100,6 +101,10 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev static inline void of_msi_configure(struct device *dev, struct device_node *np) { } +static inline u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in) +{ + return id_in; +} static inline u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in) { -- cgit v1.2.3 From cd0ec59affb1f31347170bbafadb9c5cc716bca9 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:15 +0200 Subject: PCI/MSI: Add pci_msi_map_rid_ctlr_node() helper function IRQchip drivers need a PCI/MSI function to map a RID to a MSI controller deviceID namespace and at the same time retrieve the struct device_node pointer of the MSI controller the RID is mapped to. Add pci_msi_map_rid_ctlr_node() to achieve this purpose. Cc Bjorn Helgaas Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Thomas Gleixner Cc: Marc Zyngier Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-25-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- drivers/pci/msi/irqdomain.c | 20 ++++++++++++++++++++ include/linux/msi.h | 1 + 2 files changed, 21 insertions(+) (limited to 'include/linux') diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index c05152733993..8a6e80d3963a 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -427,6 +427,26 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) return rid; } +/** + * pci_msi_map_rid_ctlr_node - Get the MSI controller node and MSI requester id (RID) + * @pdev: The PCI device + * @node: Pointer to store the MSI controller device node + * + * Use the firmware data to find the MSI controller node for @pdev. + * If found map the RID and initialize @node with it. @node value must + * be set to NULL on entry. + * + * Returns: The RID. + */ +u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node) +{ + u32 rid = pci_dev_id(pdev); + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + + return of_msi_xlate(&pdev->dev, node, rid); +} + /** * pci_msi_get_device_domain - Get the MSI domain for a given PCI device * @pdev: The PCI device diff --git a/include/linux/msi.h b/include/linux/msi.h index 6863540f4b71..a418e2695b05 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -705,6 +705,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); +u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node); struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); #else /* CONFIG_PCI_MSI */ static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) -- cgit v1.2.3 From 8b65db1e93a227ad9cc1b67cb221b06869f0b35f Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:17 +0200 Subject: irqchip/msi-lib: Add IRQ_DOMAIN_FLAG_FWNODE_PARENT handling In some irqchip implementations the fwnode representing the IRQdomain and the MSI controller fwnode do not match; in particular the IRQdomain fwnode is the MSI controller fwnode parent. To support selecting such IRQ domains, add a flag in core IRQ domain code that explicitly tells the MSI lib to use the parent fwnode while carrying out IRQ domain selection. Update the msi-lib select callback with the resulting logic. Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Thomas Gleixner Cc: Marc Zyngier Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-27-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- drivers/irqchip/irq-msi-lib.c | 5 ++++- include/linux/irqdomain.h | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c index 246c30205af4..454c7f16dd4d 100644 --- a/drivers/irqchip/irq-msi-lib.c +++ b/drivers/irqchip/irq-msi-lib.c @@ -133,11 +133,14 @@ int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, { const struct msi_parent_ops *ops = d->msi_parent_ops; u32 busmask = BIT(bus_token); + struct fwnode_handle *fwh; if (!ops) return 0; - if (fwspec->fwnode != d->fwnode || fwspec->param_count != 0) + fwh = d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode) + : fwspec->fwnode; + if (fwh != d->fwnode || fwspec->param_count != 0) return 0; /* Handle pure domain searches */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 7387d183029b..25c7cbeed393 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -212,6 +212,9 @@ enum { /* Address and data pair is mutable when irq_set_affinity() */ IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11), + /* IRQ domain requires parent fwnode matching */ + IRQ_DOMAIN_FLAG_FWNODE_PARENT = (1 << 12), + /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the -- cgit v1.2.3 From 57d72196dfc8502b7e376ecdffb11c4f8766f26d Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:18 +0200 Subject: irqchip/gic-v5: Add GICv5 ITS support The GICv5 architecture implements Interrupt Translation Service (ITS) components in order to translate events coming from peripherals into interrupt events delivered to the connected IRSes. Events (ie MSI memory writes to ITS translate frame), are translated by the ITS using tables kept in memory. ITS translation tables for peripherals is kept in memory storage (device table [DT] and Interrupt Translation Table [ITT]) that is allocated by the driver on boot. Both tables can be 1- or 2-level; the structure is chosen by the driver after probing the ITS HW parameters and checking the allowed table splits and supported {device/event}_IDbits. DT table entries are allocated on demand (ie when a device is probed); the DT table is sized using the number of supported deviceID bits in that that's a system design decision (ie the number of deviceID bits implemented should reflect the number of devices expected in a system) therefore it makes sense to allocate a DT table that can cater for the maximum number of devices. DT and ITT tables are allocated using the kmalloc interface; the allocation size may be smaller than a page or larger, and must provide contiguous memory pages. LPIs INTIDs backing the device events are allocated one-by-one and only upon Linux IRQ allocation; this to avoid preallocating a large number of LPIs to cover the HW device MSI vector size whereas few MSI entries are actually enabled by a device. ITS cacheability/shareability attributes are programmed according to the provided firmware ITS description. The GICv5 partially reuses the GICv3 ITS MSI parent infrastructure and adds functions required to retrieve the ITS translate frame addresses out of msi-map and msi-parent properties to implement the GICv5 ITS MSI parent callbacks. Co-developed-by: Sascha Bischoff Signed-off-by: Sascha Bischoff Co-developed-by: Timothy Hayes Signed-off-by: Timothy Hayes Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Thomas Gleixner Cc: Marc Zyngier Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-28-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- MAINTAINERS | 1 + drivers/irqchip/Kconfig | 3 + drivers/irqchip/Makefile | 2 +- drivers/irqchip/irq-gic-its-msi-parent.c | 166 ++++ drivers/irqchip/irq-gic-its-msi-parent.h | 1 + drivers/irqchip/irq-gic-v5-irs.c | 24 + drivers/irqchip/irq-gic-v5-its.c | 1206 ++++++++++++++++++++++++++++++ drivers/irqchip/irq-gic-v5.c | 2 + include/linux/irqchip/arm-gic-v5.h | 157 ++++ 9 files changed, 1561 insertions(+), 1 deletion(-) create mode 100644 drivers/irqchip/irq-gic-v5-its.c (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index b1202987eef5..b035802ea41d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1970,6 +1970,7 @@ M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/interrupt-controller/arm,gic-v5*.yaml +F: drivers/irqchip/irq-gic-its-msi-parent.[ch] F: drivers/irqchip/irq-gic-v5*.[ch] F: include/linux/irqchip/arm-gic-v5.h diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index f9eae1a645c9..67d79d33407a 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -62,6 +62,9 @@ config ARM_GIC_V5 bool select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ + select IRQ_MSI_LIB + select ARM_GIC_ITS_PARENT config ARM_NVIC bool diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 3ce6ea9a371b..5459f66e597f 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -37,7 +37,7 @@ obj-$(CONFIG_ARM_GIC_ITS_PARENT) += irq-gic-its-msi-parent.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o -obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o +obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o diff --git a/drivers/irqchip/irq-gic-its-msi-parent.c b/drivers/irqchip/irq-gic-its-msi-parent.c index 8beecfed2b84..eb1473f1448a 100644 --- a/drivers/irqchip/irq-gic-its-msi-parent.c +++ b/drivers/irqchip/irq-gic-its-msi-parent.c @@ -5,6 +5,7 @@ // Copyright (C) 2022 Intel #include +#include #include #include "irq-gic-its-msi-parent.h" @@ -18,6 +19,23 @@ MSI_FLAG_PCI_MSIX | \ MSI_FLAG_MULTI_PCI_MSI) +static int its_translate_frame_address(struct device_node *msi_node, phys_addr_t *pa) +{ + struct resource res; + int ret; + + ret = of_property_match_string(msi_node, "reg-names", "ns-translate"); + if (ret < 0) + return ret; + + ret = of_address_to_resource(msi_node, ret, &res); + if (ret) + return ret; + + *pa = res.start; + return 0; +} + #ifdef CONFIG_PCI_MSI static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data) { @@ -82,8 +100,46 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, msi_info = msi_get_domain_info(domain->parent); return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); } + +static int its_v5_pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct device_node *msi_node = NULL; + struct msi_domain_info *msi_info; + struct pci_dev *pdev; + phys_addr_t pa; + u32 rid; + int ret; + + if (!dev_is_pci(dev)) + return -EINVAL; + + pdev = to_pci_dev(dev); + + rid = pci_msi_map_rid_ctlr_node(pdev, &msi_node); + if (!msi_node) + return -ENODEV; + + ret = its_translate_frame_address(msi_node, &pa); + if (ret) + return -ENODEV; + + of_node_put(msi_node); + + /* ITS specific DeviceID */ + info->scratchpad[0].ul = rid; + /* ITS translate frame physical address */ + info->scratchpad[1].ul = pa; + + /* Always allocate power of two vectors */ + nvec = roundup_pow_of_two(nvec); + + msi_info = msi_get_domain_info(domain->parent); + return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); +} #else /* CONFIG_PCI_MSI */ #define its_pci_msi_prepare NULL +#define its_v5_pci_msi_prepare NULL #endif /* !CONFIG_PCI_MSI */ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, @@ -118,6 +174,53 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, return ret; } +static int of_v5_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev, + u32 *dev_id, phys_addr_t *pa) +{ + int ret, index = 0; + /* + * Retrieve the DeviceID and the ITS translate frame node pointer + * out of the msi-parent property. + */ + do { + struct of_phandle_args args; + + ret = of_parse_phandle_with_args(dev->of_node, + "msi-parent", "#msi-cells", + index, &args); + if (ret) + break; + /* + * The IRQ domain fwnode is the msi controller parent + * in GICv5 (where the msi controller nodes are the + * ITS translate frames). + */ + if (args.np->parent == irq_domain_get_of_node(domain)) { + if (WARN_ON(args.args_count != 1)) + return -EINVAL; + *dev_id = args.args[0]; + + ret = its_translate_frame_address(args.np, pa); + if (ret) + return -ENODEV; + break; + } + index++; + } while (!ret); + + if (ret) { + struct device_node *np = NULL; + + ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id); + if (np) { + ret = its_translate_frame_address(np, pa); + of_node_put(np); + } + } + + return ret; +} + int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) { return -1; @@ -148,6 +251,33 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, dev, nvec, info); } +static int its_v5_pmsi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct msi_domain_info *msi_info; + phys_addr_t pa; + u32 dev_id; + int ret; + + if (!dev->of_node) + return -ENODEV; + + ret = of_v5_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa); + if (ret) + return ret; + + /* ITS specific DeviceID */ + info->scratchpad[0].ul = dev_id; + /* ITS translate frame physical address */ + info->scratchpad[1].ul = pa; + + /* Allocate always as a power of 2 */ + nvec = roundup_pow_of_two(nvec); + + msi_info = msi_get_domain_info(domain->parent); + return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); +} + static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) { struct msi_domain_info *msi_info; @@ -199,6 +329,32 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain, return true; } +static bool its_v5_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + switch (info->bus_token) { + case DOMAIN_BUS_PCI_DEVICE_MSI: + case DOMAIN_BUS_PCI_DEVICE_MSIX: + info->ops->msi_prepare = its_v5_pci_msi_prepare; + info->ops->msi_teardown = its_msi_teardown; + break; + case DOMAIN_BUS_DEVICE_MSI: + case DOMAIN_BUS_WIRED_TO_MSI: + info->ops->msi_prepare = its_v5_pmsi_prepare; + info->ops->msi_teardown = its_msi_teardown; + break; + default: + /* Confused. How did the lib return true? */ + WARN_ON_ONCE(1); + return false; + } + + return true; +} + const struct msi_parent_ops gic_v3_its_msi_parent_ops = { .supported_flags = ITS_MSI_FLAGS_SUPPORTED, .required_flags = ITS_MSI_FLAGS_REQUIRED, @@ -208,3 +364,13 @@ const struct msi_parent_ops gic_v3_its_msi_parent_ops = { .prefix = "ITS-", .init_dev_msi_info = its_init_dev_msi_info, }; + +const struct msi_parent_ops gic_v5_its_msi_parent_ops = { + .supported_flags = ITS_MSI_FLAGS_SUPPORTED, + .required_flags = ITS_MSI_FLAGS_REQUIRED, + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_NEXUS, + .bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI, + .prefix = "ITS-v5-", + .init_dev_msi_info = its_v5_init_dev_msi_info, +}; diff --git a/drivers/irqchip/irq-gic-its-msi-parent.h b/drivers/irqchip/irq-gic-its-msi-parent.h index 75e223e673ce..df016f347337 100644 --- a/drivers/irqchip/irq-gic-its-msi-parent.h +++ b/drivers/irqchip/irq-gic-its-msi-parent.h @@ -7,5 +7,6 @@ #define _IRQ_GIC_ITS_MSI_PARENT_H extern const struct msi_parent_ops gic_v3_its_msi_parent_ops; +extern const struct msi_parent_ops gic_v5_its_msi_parent_ops; #endif /* _IRQ_GIC_ITS_MSI_PARENT_H */ diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c index f00a4a6fece7..f845415f9143 100644 --- a/drivers/irqchip/irq-gic-v5-irs.c +++ b/drivers/irqchip/irq-gic-v5-irs.c @@ -484,6 +484,22 @@ static int gicv5_irs_wait_for_idle(struct gicv5_irs_chip_data *irs_data) GICV5_IRS_CR0_IDLE, NULL); } +void gicv5_irs_syncr(void) +{ + struct gicv5_irs_chip_data *irs_data; + u32 syncr; + + irs_data = list_first_entry_or_null(&irs_nodes, struct gicv5_irs_chip_data, entry); + if (WARN_ON_ONCE(!irs_data)) + return; + + syncr = FIELD_PREP(GICV5_IRS_SYNCR_SYNC, 1); + irs_writel_relaxed(irs_data, syncr, GICV5_IRS_SYNCR); + + gicv5_wait_for_op(irs_data->irs_base, GICV5_IRS_SYNC_STATUSR, + GICV5_IRS_SYNC_STATUSR_IDLE); +} + int gicv5_irs_register_cpu(int cpuid) { struct gicv5_irs_chip_data *irs_data; @@ -780,6 +796,14 @@ int __init gicv5_irs_enable(void) return 0; } +void __init gicv5_irs_its_probe(void) +{ + struct gicv5_irs_chip_data *irs_data; + + list_for_each_entry(irs_data, &irs_nodes, entry) + gicv5_its_of_probe(to_of_node(irs_data->fwnode)); +} + int __init gicv5_irs_of_probe(struct device_node *parent) { struct device_node *np; diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c new file mode 100644 index 000000000000..700df6d0687e --- /dev/null +++ b/drivers/irqchip/irq-gic-v5-its.c @@ -0,0 +1,1206 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ + +#define pr_fmt(fmt) "GICv5 ITS: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "irq-gic-its-msi-parent.h" + +#define ITS_FLAGS_NON_COHERENT BIT(0) + +struct gicv5_its_chip_data { + struct xarray its_devices; + struct mutex dev_alloc_lock; + struct fwnode_handle *fwnode; + struct gicv5_its_devtab_cfg devtab_cfgr; + void __iomem *its_base; + u32 flags; + unsigned int msi_domain_flags; +}; + +struct gicv5_its_dev { + struct gicv5_its_chip_data *its_node; + struct gicv5_its_itt_cfg itt_cfg; + unsigned long *event_map; + u32 device_id; + u32 num_events; + phys_addr_t its_trans_phys_base; +}; + +static u32 its_readl_relaxed(struct gicv5_its_chip_data *its_node, const u32 reg_offset) +{ + return readl_relaxed(its_node->its_base + reg_offset); +} + +static void its_writel_relaxed(struct gicv5_its_chip_data *its_node, const u32 val, + const u32 reg_offset) +{ + writel_relaxed(val, its_node->its_base + reg_offset); +} + +static void its_writeq_relaxed(struct gicv5_its_chip_data *its_node, const u64 val, + const u32 reg_offset) +{ + writeq_relaxed(val, its_node->its_base + reg_offset); +} + +static void gicv5_its_dcache_clean(struct gicv5_its_chip_data *its, void *start, + size_t sz) +{ + void *end = start + sz; + + if (its->flags & ITS_FLAGS_NON_COHERENT) + dcache_clean_inval_poc((unsigned long)start, (unsigned long)end); + else + dsb(ishst); +} + +static void its_write_table_entry(struct gicv5_its_chip_data *its, __le64 *entry, + u64 val) +{ + WRITE_ONCE(*entry, cpu_to_le64(val)); + gicv5_its_dcache_clean(its, entry, sizeof(*entry)); +} + +#define devtab_cfgr_field(its, f) \ + FIELD_GET(GICV5_ITS_DT_CFGR_##f, (its)->devtab_cfgr.cfgr) + +static int gicv5_its_cache_sync(struct gicv5_its_chip_data *its) +{ + return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_STATUSR, + GICV5_ITS_STATUSR_IDLE, NULL); +} + +static void gicv5_its_syncr(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + u64 syncr; + + syncr = FIELD_PREP(GICV5_ITS_SYNCR_SYNC, 1) | + FIELD_PREP(GICV5_ITS_SYNCR_DEVICEID, its_dev->device_id); + + its_writeq_relaxed(its, syncr, GICV5_ITS_SYNCR); + + gicv5_wait_for_op(its->its_base, GICV5_ITS_SYNC_STATUSR, GICV5_ITS_SYNC_STATUSR_IDLE); +} + +/* Number of bits required for each L2 {device/interrupt translation} table size */ +#define ITS_L2SZ_64K_L2_BITS 13 +#define ITS_L2SZ_16K_L2_BITS 11 +#define ITS_L2SZ_4K_L2_BITS 9 + +static unsigned int gicv5_its_l2sz_to_l2_bits(unsigned int sz) +{ + switch (sz) { + case GICV5_ITS_DT_ITT_CFGR_L2SZ_64k: + return ITS_L2SZ_64K_L2_BITS; + case GICV5_ITS_DT_ITT_CFGR_L2SZ_16k: + return ITS_L2SZ_16K_L2_BITS; + case GICV5_ITS_DT_ITT_CFGR_L2SZ_4k: + default: + return ITS_L2SZ_4K_L2_BITS; + } +} + +static int gicv5_its_itt_cache_inv(struct gicv5_its_chip_data *its, u32 device_id, + u16 event_id) +{ + u32 eventr, eidr; + u64 didr; + + didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, device_id); + eidr = FIELD_PREP(GICV5_ITS_EIDR_EVENTID, event_id); + eventr = FIELD_PREP(GICV5_ITS_INV_EVENTR_I, 0x1); + + its_writeq_relaxed(its, didr, GICV5_ITS_DIDR); + its_writel_relaxed(its, eidr, GICV5_ITS_EIDR); + its_writel_relaxed(its, eventr, GICV5_ITS_INV_EVENTR); + + return gicv5_its_cache_sync(its); +} + +static void gicv5_its_free_itt_linear(struct gicv5_its_dev *its_dev) +{ + kfree(its_dev->itt_cfg.linear.itt); +} + +static void gicv5_its_free_itt_two_level(struct gicv5_its_dev *its_dev) +{ + unsigned int i, num_ents = its_dev->itt_cfg.l2.num_l1_ents; + + for (i = 0; i < num_ents; i++) + kfree(its_dev->itt_cfg.l2.l2ptrs[i]); + + kfree(its_dev->itt_cfg.l2.l2ptrs); + kfree(its_dev->itt_cfg.l2.l1itt); +} + +static void gicv5_its_free_itt(struct gicv5_its_dev *its_dev) +{ + if (!its_dev->itt_cfg.l2itt) + gicv5_its_free_itt_linear(its_dev); + else + gicv5_its_free_itt_two_level(its_dev); +} + +static int gicv5_its_create_itt_linear(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev, + unsigned int event_id_bits) +{ + unsigned int num_ents = BIT(event_id_bits); + __le64 *itt; + + itt = kcalloc(num_ents, sizeof(*itt), GFP_KERNEL); + if (!itt) + return -ENOMEM; + + its_dev->itt_cfg.linear.itt = itt; + its_dev->itt_cfg.linear.num_ents = num_ents; + its_dev->itt_cfg.l2itt = false; + its_dev->itt_cfg.event_id_bits = event_id_bits; + + gicv5_its_dcache_clean(its, itt, num_ents * sizeof(*itt)); + + return 0; +} + +/* + * Allocate a two-level ITT. All ITT entries are allocated in one go, unlike + * with the device table. Span may be used to limit the second level table + * size, where possible. + */ +static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev, + unsigned int event_id_bits, + unsigned int itt_l2sz, + unsigned int num_events) +{ + unsigned int l1_bits, l2_bits, span, events_per_l2_table; + unsigned int i, complete_tables, final_span, num_ents; + __le64 *itt_l1, *itt_l2, **l2ptrs; + int ret; + u64 val; + + ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz); + if (ret >= event_id_bits) { + pr_debug("Incorrect l2sz (0x%x) for %u EventID bits. Cannot allocate ITT\n", + itt_l2sz, event_id_bits); + return -EINVAL; + } + + l2_bits = ret; + + l1_bits = event_id_bits - l2_bits; + + num_ents = BIT(l1_bits); + + itt_l1 = kcalloc(num_ents, sizeof(*itt_l1), GFP_KERNEL); + if (!itt_l1) + return -ENOMEM; + + l2ptrs = kcalloc(num_ents, sizeof(*l2ptrs), GFP_KERNEL); + if (!l2ptrs) { + kfree(itt_l1); + return -ENOMEM; + } + + its_dev->itt_cfg.l2.l2ptrs = l2ptrs; + + its_dev->itt_cfg.l2.l2sz = itt_l2sz; + its_dev->itt_cfg.l2.l1itt = itt_l1; + its_dev->itt_cfg.l2.num_l1_ents = num_ents; + its_dev->itt_cfg.l2itt = true; + its_dev->itt_cfg.event_id_bits = event_id_bits; + + /* + * Need to determine how many entries there are per L2 - this is based + * on the number of bits in the table. + */ + events_per_l2_table = BIT(l2_bits); + complete_tables = num_events / events_per_l2_table; + final_span = order_base_2(num_events % events_per_l2_table); + + for (i = 0; i < num_ents; i++) { + size_t l2sz; + + span = i == complete_tables ? final_span : l2_bits; + + itt_l2 = kcalloc(BIT(span), sizeof(*itt_l2), GFP_KERNEL); + if (!itt_l2) { + ret = -ENOMEM; + goto out_free; + } + + its_dev->itt_cfg.l2.l2ptrs[i] = itt_l2; + + l2sz = BIT(span) * sizeof(*itt_l2); + + gicv5_its_dcache_clean(its, itt_l2, l2sz); + + val = (virt_to_phys(itt_l2) & GICV5_ITTL1E_L2_ADDR_MASK) | + FIELD_PREP(GICV5_ITTL1E_SPAN, span) | + FIELD_PREP(GICV5_ITTL1E_VALID, 0x1); + + WRITE_ONCE(itt_l1[i], cpu_to_le64(val)); + } + + gicv5_its_dcache_clean(its, itt_l1, num_ents * sizeof(*itt_l1)); + + return 0; + +out_free: + for (i = i - 1; i >= 0; i--) + kfree(its_dev->itt_cfg.l2.l2ptrs[i]); + + kfree(its_dev->itt_cfg.l2.l2ptrs); + kfree(itt_l1); + return ret; +} + +/* + * Function to check whether the device table or ITT table support + * a two-level table and if so depending on the number of id_bits + * requested, determine whether a two-level table is required. + * + * Return the 2-level size value if a two level table is deemed + * necessary. + */ +static bool gicv5_its_l2sz_two_level(bool devtab, u32 its_idr1, u8 id_bits, u8 *sz) +{ + unsigned int l2_bits, l2_sz; + + if (devtab && !FIELD_GET(GICV5_ITS_IDR1_DT_LEVELS, its_idr1)) + return false; + + if (!devtab && !FIELD_GET(GICV5_ITS_IDR1_ITT_LEVELS, its_idr1)) + return false; + + /* + * Pick an L2 size that matches the pagesize; if a match + * is not found, go for the smallest supported l2 size granule. + * + * This ensures that we will always be able to allocate + * contiguous memory at L2. + */ + switch (PAGE_SIZE) { + case SZ_64K: + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k; + break; + } + fallthrough; + case SZ_4K: + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; + break; + } + fallthrough; + case SZ_16K: + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_16k; + break; + } + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; + break; + } + if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) { + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k; + break; + } + + l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; + break; + } + + l2_bits = gicv5_its_l2sz_to_l2_bits(l2_sz); + + if (l2_bits > id_bits) + return false; + + *sz = l2_sz; + + return true; +} + +static __le64 *gicv5_its_device_get_itte_ref(struct gicv5_its_dev *its_dev, + u16 event_id) +{ + unsigned int l1_idx, l2_idx, l2_bits; + __le64 *l2_itt; + + if (!its_dev->itt_cfg.l2itt) { + __le64 *itt = its_dev->itt_cfg.linear.itt; + + return &itt[event_id]; + } + + l2_bits = gicv5_its_l2sz_to_l2_bits(its_dev->itt_cfg.l2.l2sz); + l1_idx = event_id >> l2_bits; + l2_idx = event_id & GENMASK(l2_bits - 1, 0); + l2_itt = its_dev->itt_cfg.l2.l2ptrs[l1_idx]; + + return &l2_itt[l2_idx]; +} + +static int gicv5_its_device_cache_inv(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + u32 devicer; + u64 didr; + + didr = FIELD_PREP(GICV5_ITS_DIDR_DEVICEID, its_dev->device_id); + devicer = FIELD_PREP(GICV5_ITS_INV_DEVICER_I, 0x1) | + FIELD_PREP(GICV5_ITS_INV_DEVICER_EVENTID_BITS, + its_dev->itt_cfg.event_id_bits) | + FIELD_PREP(GICV5_ITS_INV_DEVICER_L1, 0x0); + its_writeq_relaxed(its, didr, GICV5_ITS_DIDR); + its_writel_relaxed(its, devicer, GICV5_ITS_INV_DEVICER); + + return gicv5_its_cache_sync(its); +} + +/* + * Allocate a level 2 device table entry, update L1 parent to reference it. + * Only used for 2-level device tables, and it is called on demand. + */ +static int gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data *its, + unsigned int l1_index) +{ + __le64 *l2devtab, *l1devtab = its->devtab_cfgr.l2.l1devtab; + u8 span, l2sz, l2_bits; + u64 l1dte; + + if (FIELD_GET(GICV5_DTL1E_VALID, le64_to_cpu(l1devtab[l1_index]))) + return 0; + + span = FIELD_GET(GICV5_DTL1E_SPAN, le64_to_cpu(l1devtab[l1_index])); + l2sz = devtab_cfgr_field(its, L2SZ); + + l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz); + + /* + * Span allows us to create a smaller L2 device table. + * If it is too large, use the number of allowed L2 bits. + */ + if (span > l2_bits) + span = l2_bits; + + l2devtab = kcalloc(BIT(span), sizeof(*l2devtab), GFP_KERNEL); + if (!l2devtab) + return -ENOMEM; + + its->devtab_cfgr.l2.l2ptrs[l1_index] = l2devtab; + + l1dte = FIELD_PREP(GICV5_DTL1E_SPAN, span) | + (virt_to_phys(l2devtab) & GICV5_DTL1E_L2_ADDR_MASK) | + FIELD_PREP(GICV5_DTL1E_VALID, 0x1); + its_write_table_entry(its, &l1devtab[l1_index], l1dte); + + return 0; +} + +static __le64 *gicv5_its_devtab_get_dte_ref(struct gicv5_its_chip_data *its, + u32 device_id, bool alloc) +{ + u8 str = devtab_cfgr_field(its, STRUCTURE); + unsigned int l2sz, l2_bits, l1_idx, l2_idx; + __le64 *l2devtab; + int ret; + + if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) { + l2devtab = its->devtab_cfgr.linear.devtab; + return &l2devtab[device_id]; + } + + l2sz = devtab_cfgr_field(its, L2SZ); + l2_bits = gicv5_its_l2sz_to_l2_bits(l2sz); + l1_idx = device_id >> l2_bits; + l2_idx = device_id & GENMASK(l2_bits - 1, 0); + + if (alloc) { + /* + * Allocate a new L2 device table here before + * continuing. We make the assumption that the span in + * the L1 table has been set correctly, and blindly use + * that value. + */ + ret = gicv5_its_alloc_l2_devtab(its, l1_idx); + if (ret) + return NULL; + } + + l2devtab = its->devtab_cfgr.l2.l2ptrs[l1_idx]; + return &l2devtab[l2_idx]; +} + +/* + * Register a new device in the device table. Allocate an ITT and + * program the L2DTE entry according to the ITT structure that + * was chosen. + */ +static int gicv5_its_device_register(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + u8 event_id_bits, device_id_bits, itt_struct, itt_l2sz; + phys_addr_t itt_phys_base; + bool two_level_itt; + u32 idr1, idr2; + __le64 *dte; + u64 val; + int ret; + + device_id_bits = devtab_cfgr_field(its, DEVICEID_BITS); + + if (its_dev->device_id >= BIT(device_id_bits)) { + pr_err("Supplied DeviceID (%u) outside of Device Table range (%u)!", + its_dev->device_id, (u32)GENMASK(device_id_bits - 1, 0)); + return -EINVAL; + } + + dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, true); + if (!dte) + return -ENOMEM; + + if (FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) + return -EBUSY; + + /* + * Determine how many bits we need, validate those against the max. + * Based on these, determine if we should go for a 1- or 2-level ITT. + */ + event_id_bits = order_base_2(its_dev->num_events); + + idr2 = its_readl_relaxed(its, GICV5_ITS_IDR2); + + if (event_id_bits > FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2)) { + pr_err("Required EventID bits (%u) larger than supported bits (%u)!", + event_id_bits, + (u8)FIELD_GET(GICV5_ITS_IDR2_EVENTID_BITS, idr2)); + return -EINVAL; + } + + idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1); + + /* + * L2 ITT size is programmed into the L2DTE regardless of + * whether a two-level or linear ITT is built, init it. + */ + itt_l2sz = 0; + + two_level_itt = gicv5_its_l2sz_two_level(false, idr1, event_id_bits, + &itt_l2sz); + if (two_level_itt) + ret = gicv5_its_create_itt_two_level(its, its_dev, event_id_bits, + itt_l2sz, + its_dev->num_events); + else + ret = gicv5_its_create_itt_linear(its, its_dev, event_id_bits); + if (ret) + return ret; + + itt_phys_base = two_level_itt ? virt_to_phys(its_dev->itt_cfg.l2.l1itt) : + virt_to_phys(its_dev->itt_cfg.linear.itt); + + itt_struct = two_level_itt ? GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL : + GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR; + + val = FIELD_PREP(GICV5_DTL2E_EVENT_ID_BITS, event_id_bits) | + FIELD_PREP(GICV5_DTL2E_ITT_STRUCTURE, itt_struct) | + (itt_phys_base & GICV5_DTL2E_ITT_ADDR_MASK) | + FIELD_PREP(GICV5_DTL2E_ITT_L2SZ, itt_l2sz) | + FIELD_PREP(GICV5_DTL2E_VALID, 0x1); + + its_write_table_entry(its, dte, val); + + ret = gicv5_its_device_cache_inv(its, its_dev); + if (ret) { + its_write_table_entry(its, dte, 0); + gicv5_its_free_itt(its_dev); + return ret; + } + + return 0; +} + +/* + * Unregister a device in the device table. Lookup the device by ID, free the + * corresponding ITT, mark the device as invalid in the device table. + */ +static int gicv5_its_device_unregister(struct gicv5_its_chip_data *its, + struct gicv5_its_dev *its_dev) +{ + __le64 *dte; + + dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, false); + + if (!FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) { + pr_debug("Device table entry for DeviceID 0x%x is not valid. Nothing to clean up!", + its_dev->device_id); + return -EINVAL; + } + + /* Zero everything - make it clear that this is an invalid entry */ + its_write_table_entry(its, dte, 0); + + gicv5_its_free_itt(its_dev); + + return gicv5_its_device_cache_inv(its, its_dev); +} + +/* + * Allocate a 1-level device table. All entries are allocated, but marked + * invalid. + */ +static int gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data *its, + u8 device_id_bits) +{ + __le64 *devtab; + size_t sz; + u64 baser; + u32 cfgr; + + /* + * We expect a GICv5 implementation requiring a large number of + * deviceID bits to support a 2-level device table. If that's not + * the case, cap the number of deviceIDs supported according to the + * kmalloc limits so that the system can chug along with a linear + * device table. + */ + sz = BIT_ULL(device_id_bits) * sizeof(*devtab); + if (sz > KMALLOC_MAX_SIZE) { + u8 device_id_cap = ilog2(KMALLOC_MAX_SIZE/sizeof(*devtab)); + + pr_warn("Limiting device ID bits from %u to %u\n", + device_id_bits, device_id_cap); + device_id_bits = device_id_cap; + } + + devtab = kcalloc(BIT(device_id_bits), sizeof(*devtab), GFP_KERNEL); + if (!devtab) + return -ENOMEM; + + gicv5_its_dcache_clean(its, devtab, sz); + + cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE, + GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) | + FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, 0) | + FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits); + its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR); + + baser = virt_to_phys(devtab) & GICV5_ITS_DT_BASER_ADDR_MASK; + its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER); + + its->devtab_cfgr.cfgr = cfgr; + its->devtab_cfgr.linear.devtab = devtab; + + return 0; +} + +/* + * Allocate a 2-level device table. L2 entries are not allocated, + * they are allocated on-demand. + */ +static int gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data *its, + u8 device_id_bits, + u8 devtab_l2sz) +{ + unsigned int l1_bits, l2_bits, i; + __le64 *l1devtab, **l2ptrs; + size_t l1_sz; + u64 baser; + u32 cfgr; + + l2_bits = gicv5_its_l2sz_to_l2_bits(devtab_l2sz); + + l1_bits = device_id_bits - l2_bits; + l1_sz = BIT(l1_bits) * sizeof(*l1devtab); + /* + * With 2-level device table support it is highly unlikely + * that we are not able to allocate the required amount of + * device table memory to cover deviceID space; cap the + * deviceID space if we encounter such set-up. + * If this ever becomes a problem we could revisit the policy + * behind level 2 size selection to reduce level-1 deviceID bits. + */ + if (l1_sz > KMALLOC_MAX_SIZE) { + l1_bits = ilog2(KMALLOC_MAX_SIZE/sizeof(*l1devtab)); + + pr_warn("Limiting device ID bits from %u to %u\n", + device_id_bits, l1_bits + l2_bits); + device_id_bits = l1_bits + l2_bits; + l1_sz = KMALLOC_MAX_SIZE; + } + + l1devtab = kcalloc(BIT(l1_bits), sizeof(*l1devtab), GFP_KERNEL); + if (!l1devtab) + return -ENOMEM; + + l2ptrs = kcalloc(BIT(l1_bits), sizeof(*l2ptrs), GFP_KERNEL); + if (!l2ptrs) { + kfree(l1devtab); + return -ENOMEM; + } + + for (i = 0; i < BIT(l1_bits); i++) + l1devtab[i] = cpu_to_le64(FIELD_PREP(GICV5_DTL1E_SPAN, l2_bits)); + + gicv5_its_dcache_clean(its, l1devtab, l1_sz); + + cfgr = FIELD_PREP(GICV5_ITS_DT_CFGR_STRUCTURE, + GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL) | + FIELD_PREP(GICV5_ITS_DT_CFGR_L2SZ, devtab_l2sz) | + FIELD_PREP(GICV5_ITS_DT_CFGR_DEVICEID_BITS, device_id_bits); + its_writel_relaxed(its, cfgr, GICV5_ITS_DT_CFGR); + + baser = virt_to_phys(l1devtab) & GICV5_ITS_DT_BASER_ADDR_MASK; + its_writeq_relaxed(its, baser, GICV5_ITS_DT_BASER); + + its->devtab_cfgr.cfgr = cfgr; + its->devtab_cfgr.l2.l1devtab = l1devtab; + its->devtab_cfgr.l2.l2ptrs = l2ptrs; + + return 0; +} + +/* + * Initialise the device table as either 1- or 2-level depending on what is + * supported by the hardware. + */ +static int gicv5_its_init_devtab(struct gicv5_its_chip_data *its) +{ + u8 device_id_bits, devtab_l2sz; + bool two_level_devtab; + u32 idr1; + + idr1 = its_readl_relaxed(its, GICV5_ITS_IDR1); + + device_id_bits = FIELD_GET(GICV5_ITS_IDR1_DEVICEID_BITS, idr1); + two_level_devtab = gicv5_its_l2sz_two_level(true, idr1, device_id_bits, + &devtab_l2sz); + if (two_level_devtab) + return gicv5_its_alloc_devtab_two_level(its, device_id_bits, + devtab_l2sz); + else + return gicv5_its_alloc_devtab_linear(its, device_id_bits); +} + +static void gicv5_its_deinit_devtab(struct gicv5_its_chip_data *its) +{ + u8 str = devtab_cfgr_field(its, STRUCTURE); + + if (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR) { + kfree(its->devtab_cfgr.linear.devtab); + } else { + kfree(its->devtab_cfgr.l2.l1devtab); + kfree(its->devtab_cfgr.l2.l2ptrs); + } +} + +static void gicv5_its_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d); + u64 addr = its_dev->its_trans_phys_base; + + msg->data = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + msi_msg_set_addr(irq_data_get_msi_desc(d), msg, addr); +} + +static const struct irq_chip gicv5_its_irq_chip = { + .name = "GICv5-ITS-MSI", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .irq_compose_msi_msg = gicv5_its_compose_msi_msg, +}; + +static struct gicv5_its_dev *gicv5_its_find_device(struct gicv5_its_chip_data *its, + u32 device_id) +{ + struct gicv5_its_dev *dev = xa_load(&its->its_devices, device_id); + + return dev ? dev : ERR_PTR(-ENODEV); +} + +static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *its, int nvec, + u32 dev_id) +{ + struct gicv5_its_dev *its_dev; + void *entry; + int ret; + + its_dev = gicv5_its_find_device(its, dev_id); + if (!IS_ERR(its_dev)) { + pr_err("A device with this DeviceID (0x%x) has already been registered.\n", + dev_id); + + return ERR_PTR(-EBUSY); + } + + its_dev = kzalloc(sizeof(*its_dev), GFP_KERNEL); + if (!its_dev) + return ERR_PTR(-ENOMEM); + + its_dev->device_id = dev_id; + its_dev->num_events = nvec; + + ret = gicv5_its_device_register(its, its_dev); + if (ret) { + pr_err("Failed to register the device\n"); + goto out_dev_free; + } + + gicv5_its_device_cache_inv(its, its_dev); + + its_dev->its_node = its; + + its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL); + if (!its_dev->event_map) { + ret = -ENOMEM; + goto out_unregister; + } + + entry = xa_store(&its->its_devices, dev_id, its_dev, GFP_KERNEL); + if (xa_is_err(entry)) { + ret = xa_err(entry); + goto out_bitmap_free; + } + + return its_dev; + +out_bitmap_free: + bitmap_free(its_dev->event_map); +out_unregister: + gicv5_its_device_unregister(its, its_dev); +out_dev_free: + kfree(its_dev); + return ERR_PTR(ret); +} + +static int gicv5_its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + u32 dev_id = info->scratchpad[0].ul; + struct msi_domain_info *msi_info; + struct gicv5_its_chip_data *its; + struct gicv5_its_dev *its_dev; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + guard(mutex)(&its->dev_alloc_lock); + + its_dev = gicv5_its_alloc_device(its, nvec, dev_id); + if (IS_ERR(its_dev)) + return PTR_ERR(its_dev); + + its_dev->its_trans_phys_base = info->scratchpad[1].ul; + info->scratchpad[0].ptr = its_dev; + + return 0; +} + +static void gicv5_its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info) +{ + struct gicv5_its_dev *its_dev = info->scratchpad[0].ptr; + struct msi_domain_info *msi_info; + struct gicv5_its_chip_data *its; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + guard(mutex)(&its->dev_alloc_lock); + + if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map, its_dev->num_events))) + return; + + xa_erase(&its->its_devices, its_dev->device_id); + bitmap_free(its_dev->event_map); + gicv5_its_device_unregister(its, its_dev); + kfree(its_dev); +} + +static struct msi_domain_ops gicv5_its_msi_domain_ops = { + .msi_prepare = gicv5_its_msi_prepare, + .msi_teardown = gicv5_its_msi_teardown, +}; + +static int gicv5_its_map_event(struct gicv5_its_dev *its_dev, u16 event_id, u32 lpi) +{ + struct gicv5_its_chip_data *its = its_dev->its_node; + u64 itt_entry; + __le64 *itte; + + itte = gicv5_its_device_get_itte_ref(its_dev, event_id); + + if (FIELD_GET(GICV5_ITTL2E_VALID, *itte)) + return -EEXIST; + + itt_entry = FIELD_PREP(GICV5_ITTL2E_LPI_ID, lpi) | + FIELD_PREP(GICV5_ITTL2E_VALID, 0x1); + + its_write_table_entry(its, itte, itt_entry); + + gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id); + + return 0; +} + +static void gicv5_its_unmap_event(struct gicv5_its_dev *its_dev, u16 event_id) +{ + struct gicv5_its_chip_data *its = its_dev->its_node; + u64 itte_val; + __le64 *itte; + + itte = gicv5_its_device_get_itte_ref(its_dev, event_id); + + itte_val = le64_to_cpu(*itte); + itte_val &= ~GICV5_ITTL2E_VALID; + + its_write_table_entry(its, itte, itte_val); + + gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id); +} + +static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, + unsigned int nr_irqs, u32 *eventid) +{ + int ret; + + ret = bitmap_find_free_region(its_dev->event_map, + its_dev->num_events, + get_count_order(nr_irqs)); + + if (ret < 0) + return ret; + + *eventid = ret; + + return 0; +} + +static void gicv5_its_free_eventid(struct gicv5_its_dev *its_dev, u32 event_id_base, + unsigned int nr_irqs) +{ + bitmap_release_region(its_dev->event_map, event_id_base, + get_count_order(nr_irqs)); +} + +static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + u32 device_id, event_id_base, lpi; + struct gicv5_its_dev *its_dev; + msi_alloc_info_t *info = arg; + irq_hw_number_t hwirq; + struct irq_data *irqd; + int ret, i; + + its_dev = info->scratchpad[0].ptr; + + ret = gicv5_its_alloc_eventid(its_dev, nr_irqs, &event_id_base); + if (ret) + return ret; + + ret = iommu_dma_prepare_msi(info->desc, its_dev->its_trans_phys_base); + if (ret) + goto out_eventid; + + device_id = its_dev->device_id; + + for (i = 0; i < nr_irqs; i++) { + lpi = gicv5_alloc_lpi(); + if (ret < 0) { + pr_debug("Failed to find free LPI!\n"); + goto out_eventid; + } + + ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi); + if (ret) + goto out_free_lpi; + + /* + * Store eventid and deviceid into the hwirq for later use. + * + * hwirq = event_id << 32 | device_id + */ + hwirq = FIELD_PREP(GICV5_ITS_HWIRQ_DEVICE_ID, device_id) | + FIELD_PREP(GICV5_ITS_HWIRQ_EVENT_ID, (u64)event_id_base + i); + irq_domain_set_info(domain, virq + i, hwirq, + &gicv5_its_irq_chip, its_dev, + handle_fasteoi_irq, NULL, NULL); + + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); + } + + return 0; + +out_free_lpi: + gicv5_free_lpi(lpi); +out_eventid: + gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs); + return ret; +} + +static void gicv5_its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct gicv5_its_chip_data *its; + struct gicv5_its_dev *its_dev; + u16 event_id_base; + unsigned int i; + + its_dev = irq_data_get_irq_chip_data(d); + its = its_dev->its_node; + + event_id_base = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + + bitmap_release_region(its_dev->event_map, event_id_base, + get_count_order(nr_irqs)); + + /* Hierarchically free irq data */ + for (i = 0; i < nr_irqs; i++) { + d = irq_domain_get_irq_data(domain, virq + i); + + gicv5_free_lpi(d->parent_data->hwirq); + irq_domain_reset_irq_data(d); + irq_domain_free_irqs_parent(domain, virq + i, 1); + } + + gicv5_its_syncr(its, its_dev); + gicv5_irs_syncr(); +} + +static int gicv5_its_irq_domain_activate(struct irq_domain *domain, struct irq_data *d, + bool reserve) +{ + struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d); + u16 event_id; + u32 lpi; + + event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + lpi = d->parent_data->hwirq; + + return gicv5_its_map_event(its_dev, event_id, lpi); +} + +static void gicv5_its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct gicv5_its_dev *its_dev = irq_data_get_irq_chip_data(d); + u16 event_id; + + event_id = FIELD_GET(GICV5_ITS_HWIRQ_EVENT_ID, d->hwirq); + + gicv5_its_unmap_event(its_dev, event_id); +} + +static const struct irq_domain_ops gicv5_its_irq_domain_ops = { + .alloc = gicv5_its_irq_domain_alloc, + .free = gicv5_its_irq_domain_free, + .activate = gicv5_its_irq_domain_activate, + .deactivate = gicv5_its_irq_domain_deactivate, + .select = msi_lib_irq_domain_select, +}; + +static int gicv5_its_write_cr0(struct gicv5_its_chip_data *its, bool enable) +{ + u32 cr0 = FIELD_PREP(GICV5_ITS_CR0_ITSEN, enable); + + its_writel_relaxed(its, cr0, GICV5_ITS_CR0); + return gicv5_wait_for_op_atomic(its->its_base, GICV5_ITS_CR0, + GICV5_ITS_CR0_IDLE, NULL); +} + +static int gicv5_its_enable(struct gicv5_its_chip_data *its) +{ + return gicv5_its_write_cr0(its, true); +} + +static int gicv5_its_disable(struct gicv5_its_chip_data *its) +{ + return gicv5_its_write_cr0(its, false); +} + +static void gicv5_its_print_info(struct gicv5_its_chip_data *its_node) +{ + bool devtab_linear; + u8 device_id_bits; + u8 str; + + device_id_bits = devtab_cfgr_field(its_node, DEVICEID_BITS); + + str = devtab_cfgr_field(its_node, STRUCTURE); + devtab_linear = (str == GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR); + + pr_info("ITS %s enabled using %s device table device_id_bits %u\n", + fwnode_get_name(its_node->fwnode), + devtab_linear ? "linear" : "2-level", + device_id_bits); +} + +static int gicv5_its_init_domain(struct gicv5_its_chip_data *its, struct irq_domain *parent) +{ + struct irq_domain_info dom_info = { + .fwnode = its->fwnode, + .ops = &gicv5_its_irq_domain_ops, + .domain_flags = its->msi_domain_flags, + .parent = parent, + }; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->ops = &gicv5_its_msi_domain_ops; + info->data = its; + dom_info.host_data = info; + + if (!msi_create_parent_irq_domain(&dom_info, &gic_v5_its_msi_parent_ops)) { + kfree(info); + return -ENOMEM; + } + + return 0; +} + +static int __init gicv5_its_init_bases(void __iomem *its_base, struct fwnode_handle *handle, + struct irq_domain *parent_domain) +{ + struct device_node *np = to_of_node(handle); + struct gicv5_its_chip_data *its_node; + u32 cr0, cr1; + bool enabled; + int ret; + + its_node = kzalloc(sizeof(*its_node), GFP_KERNEL); + if (!its_node) + return -ENOMEM; + + mutex_init(&its_node->dev_alloc_lock); + xa_init(&its_node->its_devices); + its_node->fwnode = handle; + its_node->its_base = its_base; + its_node->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | + IRQ_DOMAIN_FLAG_FWNODE_PARENT; + + cr0 = its_readl_relaxed(its_node, GICV5_ITS_CR0); + enabled = FIELD_GET(GICV5_ITS_CR0_ITSEN, cr0); + if (WARN(enabled, "ITS %s enabled, disabling it before proceeding\n", np->full_name)) { + ret = gicv5_its_disable(its_node); + if (ret) + goto out_free_node; + } + + if (of_property_read_bool(np, "dma-noncoherent")) { + /* + * A non-coherent ITS implies that some cache levels cannot be + * used coherently by the cores and GIC. Our only option is to mark + * memory attributes for the GIC as non-cacheable; by default, + * non-cacheable memory attributes imply outer-shareable + * shareability, the value written into ITS_CR1_SH is ignored. + */ + cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_NO_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_NON_CACHE) | + FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_NON_CACHE); + its_node->flags |= ITS_FLAGS_NON_COHERENT; + } else { + cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_READ_ALLOC) | + FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_WB_CACHE) | + FIELD_PREP(GICV5_ITS_CR1_SH, GICV5_INNER_SHARE); + } + + its_writel_relaxed(its_node, cr1, GICV5_ITS_CR1); + + ret = gicv5_its_init_devtab(its_node); + if (ret) + goto out_free_node; + + ret = gicv5_its_enable(its_node); + if (ret) + goto out_free_devtab; + + ret = gicv5_its_init_domain(its_node, parent_domain); + if (ret) + goto out_disable_its; + + gicv5_its_print_info(its_node); + + return 0; + +out_disable_its: + gicv5_its_disable(its_node); +out_free_devtab: + gicv5_its_deinit_devtab(its_node); +out_free_node: + kfree(its_node); + return ret; +} + +static int __init gicv5_its_init(struct device_node *node) +{ + void __iomem *its_base; + int ret, idx; + + idx = of_property_match_string(node, "reg-names", "ns-config"); + if (idx < 0) { + pr_err("%pOF: ns-config reg-name not present\n", node); + return -ENODEV; + } + + its_base = of_io_request_and_map(node, idx, of_node_full_name(node)); + if (IS_ERR(its_base)) { + pr_err("%pOF: unable to map GICv5 ITS_CONFIG_FRAME\n", node); + return PTR_ERR(its_base); + } + + ret = gicv5_its_init_bases(its_base, of_fwnode_handle(node), + gicv5_global_data.lpi_domain); + if (ret) + goto out_unmap; + + return 0; + +out_unmap: + iounmap(its_base); + return ret; +} + +void __init gicv5_its_of_probe(struct device_node *parent) +{ + struct device_node *np; + + for_each_available_child_of_node(parent, np) { + if (!of_device_is_compatible(np, "arm,gic-v5-its")) + continue; + + if (gicv5_its_init(np)) + pr_err("Failed to init ITS %s\n", np->full_name); + } +} diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c index 97ff935d79bd..a9e3e75adb96 100644 --- a/drivers/irqchip/irq-gic-v5.c +++ b/drivers/irqchip/irq-gic-v5.c @@ -1071,6 +1071,8 @@ static int __init gicv5_of_init(struct device_node *node, struct device_node *pa gicv5_smp_init(); + gicv5_irs_its_probe(); + return 0; out_int: diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h index 680eed794a35..07b952549bfa 100644 --- a/include/linux/irqchip/arm-gic-v5.h +++ b/include/linux/irqchip/arm-gic-v5.h @@ -50,6 +50,8 @@ #define GICV5_IRS_IDR7 0x001c #define GICV5_IRS_CR0 0x0080 #define GICV5_IRS_CR1 0x0084 +#define GICV5_IRS_SYNCR 0x00c0 +#define GICV5_IRS_SYNC_STATUSR 0x00c4 #define GICV5_IRS_SPI_SELR 0x0108 #define GICV5_IRS_SPI_CFGR 0x0114 #define GICV5_IRS_SPI_STATUSR 0x0118 @@ -103,6 +105,10 @@ #define GICV5_IRS_CR1_OC GENMASK(3, 2) #define GICV5_IRS_CR1_SH GENMASK(1, 0) +#define GICV5_IRS_SYNCR_SYNC BIT(31) + +#define GICV5_IRS_SYNC_STATUSR_IDLE BIT(0) + #define GICV5_IRS_SPI_STATUSR_V BIT(1) #define GICV5_IRS_SPI_STATUSR_IDLE BIT(0) @@ -144,6 +150,104 @@ #define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12) +/* + * ITS registers and tables structures + */ +#define GICV5_ITS_IDR1 0x0004 +#define GICV5_ITS_IDR2 0x0008 +#define GICV5_ITS_CR0 0x0080 +#define GICV5_ITS_CR1 0x0084 +#define GICV5_ITS_DT_BASER 0x00c0 +#define GICV5_ITS_DT_CFGR 0x00d0 +#define GICV5_ITS_DIDR 0x0100 +#define GICV5_ITS_EIDR 0x0108 +#define GICV5_ITS_INV_EVENTR 0x010c +#define GICV5_ITS_INV_DEVICER 0x0110 +#define GICV5_ITS_STATUSR 0x0120 +#define GICV5_ITS_SYNCR 0x0140 +#define GICV5_ITS_SYNC_STATUSR 0x0148 + +#define GICV5_ITS_IDR1_L2SZ GENMASK(10, 8) +#define GICV5_ITS_IDR1_ITT_LEVELS BIT(7) +#define GICV5_ITS_IDR1_DT_LEVELS BIT(6) +#define GICV5_ITS_IDR1_DEVICEID_BITS GENMASK(5, 0) + +#define GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(8), (r)) +#define GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(9), (r)) +#define GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(10), (r)) + +#define GICV5_ITS_IDR2_XDMN_EVENTs GENMASK(6, 5) +#define GICV5_ITS_IDR2_EVENTID_BITS GENMASK(4, 0) + +#define GICV5_ITS_CR0_IDLE BIT(1) +#define GICV5_ITS_CR0_ITSEN BIT(0) + +#define GICV5_ITS_CR1_ITT_RA BIT(7) +#define GICV5_ITS_CR1_DT_RA BIT(6) +#define GICV5_ITS_CR1_IC GENMASK(5, 4) +#define GICV5_ITS_CR1_OC GENMASK(3, 2) +#define GICV5_ITS_CR1_SH GENMASK(1, 0) + +#define GICV5_ITS_DT_CFGR_STRUCTURE BIT(16) +#define GICV5_ITS_DT_CFGR_L2SZ GENMASK(7, 6) +#define GICV5_ITS_DT_CFGR_DEVICEID_BITS GENMASK(5, 0) + +#define GICV5_ITS_DT_BASER_ADDR_MASK GENMASK_ULL(55, 3) + +#define GICV5_ITS_INV_DEVICER_I BIT(31) +#define GICV5_ITS_INV_DEVICER_EVENTID_BITS GENMASK(5, 1) +#define GICV5_ITS_INV_DEVICER_L1 BIT(0) + +#define GICV5_ITS_DIDR_DEVICEID GENMASK_ULL(31, 0) + +#define GICV5_ITS_EIDR_EVENTID GENMASK(15, 0) + +#define GICV5_ITS_INV_EVENTR_I BIT(31) +#define GICV5_ITS_INV_EVENTR_ITT_L2SZ GENMASK(2, 1) +#define GICV5_ITS_INV_EVENTR_L1 BIT(0) + +#define GICV5_ITS_STATUSR_IDLE BIT(0) + +#define GICV5_ITS_SYNCR_SYNC BIT_ULL(63) +#define GICV5_ITS_SYNCR_SYNCALL BIT_ULL(32) +#define GICV5_ITS_SYNCR_DEVICEID GENMASK_ULL(31, 0) + +#define GICV5_ITS_SYNC_STATUSR_IDLE BIT(0) + +#define GICV5_DTL1E_VALID BIT_ULL(0) +/* Note that there is no shift for the address by design */ +#define GICV5_DTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3) +#define GICV5_DTL1E_SPAN GENMASK_ULL(63, 60) + +#define GICV5_DTL2E_VALID BIT_ULL(0) +#define GICV5_DTL2E_ITT_L2SZ GENMASK_ULL(2, 1) +/* Note that there is no shift for the address by design */ +#define GICV5_DTL2E_ITT_ADDR_MASK GENMASK_ULL(55, 3) +#define GICV5_DTL2E_ITT_DSWE BIT_ULL(57) +#define GICV5_DTL2E_ITT_STRUCTURE BIT_ULL(58) +#define GICV5_DTL2E_EVENT_ID_BITS GENMASK_ULL(63, 59) + +#define GICV5_ITTL1E_VALID BIT_ULL(0) +/* Note that there is no shift for the address by design */ +#define GICV5_ITTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3) +#define GICV5_ITTL1E_SPAN GENMASK_ULL(63, 60) + +#define GICV5_ITTL2E_LPI_ID GENMASK_ULL(23, 0) +#define GICV5_ITTL2E_DAC GENMASK_ULL(29, 28) +#define GICV5_ITTL2E_VIRTUAL BIT_ULL(30) +#define GICV5_ITTL2E_VALID BIT_ULL(31) +#define GICV5_ITTL2E_VM_ID GENMASK_ULL(47, 32) + +#define GICV5_ITS_DT_ITT_CFGR_L2SZ_4k 0b00 +#define GICV5_ITS_DT_ITT_CFGR_L2SZ_16k 0b01 +#define GICV5_ITS_DT_ITT_CFGR_L2SZ_64k 0b10 + +#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR 0 +#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL 1 + +#define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0) +#define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32) + /* * Global Data structures and functions */ @@ -197,24 +301,77 @@ static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset, return 0; } +static inline int gicv5_wait_for_op_s(void __iomem *addr, u32 offset, + const char *reg_s, u32 mask) +{ + void __iomem *reg = addr + offset; + u32 val; + int ret; + + ret = readl_poll_timeout(reg, val, val & mask, 1, 10 * USEC_PER_MSEC); + if (unlikely(ret == -ETIMEDOUT)) { + pr_err_ratelimited("%s timeout...\n", reg_s); + return ret; + } + + return 0; +} + #define gicv5_wait_for_op_atomic(base, reg, mask, val) \ gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val) +#define gicv5_wait_for_op(base, reg, mask) \ + gicv5_wait_for_op_s(base, reg, #reg, mask) + void __init gicv5_init_lpi_domain(void); void __init gicv5_free_lpi_domain(void); int gicv5_irs_of_probe(struct device_node *parent); void gicv5_irs_remove(void); int gicv5_irs_enable(void); +void gicv5_irs_its_probe(void); int gicv5_irs_register_cpu(int cpuid); int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid); struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id); int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type); int gicv5_irs_iste_alloc(u32 lpi); +void gicv5_irs_syncr(void); + +struct gicv5_its_devtab_cfg { + union { + struct { + __le64 *devtab; + } linear; + struct { + __le64 *l1devtab; + __le64 **l2ptrs; + } l2; + }; + u32 cfgr; +}; + +struct gicv5_its_itt_cfg { + union { + struct { + __le64 *itt; + unsigned int num_ents; + } linear; + struct { + __le64 *l1itt; + __le64 **l2ptrs; + unsigned int num_l1_ents; + u8 l2sz; + } l2; + }; + u8 event_id_bits; + bool l2itt; +}; void gicv5_init_lpis(u32 max); void gicv5_deinit_lpis(void); int gicv5_alloc_lpi(void); void gicv5_free_lpi(u32 lpi); + +void __init gicv5_its_of_probe(struct device_node *parent); #endif -- cgit v1.2.3 From 695949d8b16f11f2f172d8d0c7ccc1ae09ed6cb7 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 3 Jul 2025 12:25:19 +0200 Subject: irqchip/gic-v5: Add GICv5 IWB support The GICv5 architecture implements the Interrupt Wire Bridge (IWB) in order to support wired interrupts that cannot be connected directly to an IRS and instead uses the ITS to translate a wire event into an IRQ signal. Add the wired-to-MSI IWB driver to manage IWB wired interrupts. An IWB is connected to an ITS and it has its own deviceID for all interrupt wires that it manages; the IWB input wire number must be exposed to the ITS as an eventID with a 1:1 mapping. This eventID is not programmable and therefore requires a new msi_alloc_info_t flag to make sure the ITS driver does not allocate an eventid for the wire but rather it uses the msi_alloc_info_t.hwirq number to gather the ITS eventID. Co-developed-by: Sascha Bischoff Signed-off-by: Sascha Bischoff Co-developed-by: Timothy Hayes Signed-off-by: Timothy Hayes Signed-off-by: Lorenzo Pieralisi Reviewed-by: Marc Zyngier Cc: Thomas Gleixner Cc: Marc Zyngier Link: https://lore.kernel.org/r/20250703-gicv5-host-v7-29-12e71f1b3528@kernel.org Signed-off-by: Marc Zyngier --- drivers/irqchip/Makefile | 3 +- drivers/irqchip/irq-gic-v5-its.c | 40 ++++-- drivers/irqchip/irq-gic-v5-iwb.c | 284 +++++++++++++++++++++++++++++++++++++ include/asm-generic/msi.h | 1 + include/linux/irqchip/arm-gic-v5.h | 17 +++ 5 files changed, 335 insertions(+), 10 deletions(-) create mode 100644 drivers/irqchip/irq-gic-v5-iwb.c (limited to 'include/linux') diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 5459f66e597f..e83dad932ac0 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -37,7 +37,8 @@ obj-$(CONFIG_ARM_GIC_ITS_PARENT) += irq-gic-its-msi-parent.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o -obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o +obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o \ + irq-gic-v5-iwb.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c index 700df6d0687e..340640fdbdf6 100644 --- a/drivers/irqchip/irq-gic-v5-its.c +++ b/drivers/irqchip/irq-gic-v5-its.c @@ -880,19 +880,41 @@ static void gicv5_its_unmap_event(struct gicv5_its_dev *its_dev, u16 event_id) gicv5_its_itt_cache_inv(its, its_dev->device_id, event_id); } -static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, +static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, msi_alloc_info_t *info, unsigned int nr_irqs, u32 *eventid) { - int ret; + int event_id_base; + + if (!(info->flags & MSI_ALLOC_FLAGS_FIXED_MSG_DATA)) { + event_id_base = bitmap_find_free_region(its_dev->event_map, + its_dev->num_events, + get_count_order(nr_irqs)); + if (event_id_base < 0) + return event_id_base; + } else { + /* + * We want to have a fixed EventID mapped for hardcoded + * message data allocations. + */ + if (WARN_ON_ONCE(nr_irqs != 1)) + return -EINVAL; - ret = bitmap_find_free_region(its_dev->event_map, - its_dev->num_events, - get_count_order(nr_irqs)); + event_id_base = info->hwirq; - if (ret < 0) - return ret; + if (event_id_base >= its_dev->num_events) { + pr_err("EventID ouside of ITT range; cannot allocate an ITT entry!\n"); + + return -EINVAL; + } + + if (test_and_set_bit(event_id_base, its_dev->event_map)) { + pr_warn("Can't reserve event_id bitmap\n"); + return -EINVAL; + + } + } - *eventid = ret; + *eventid = event_id_base; return 0; } @@ -916,7 +938,7 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi its_dev = info->scratchpad[0].ptr; - ret = gicv5_its_alloc_eventid(its_dev, nr_irqs, &event_id_base); + ret = gicv5_its_alloc_eventid(its_dev, info, nr_irqs, &event_id_base); if (ret) return ret; diff --git a/drivers/irqchip/irq-gic-v5-iwb.c b/drivers/irqchip/irq-gic-v5-iwb.c new file mode 100644 index 000000000000..ed72fbdd4900 --- /dev/null +++ b/drivers/irqchip/irq-gic-v5-iwb.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025 ARM Limited, All Rights Reserved. + */ +#define pr_fmt(fmt) "GICv5 IWB: " fmt + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct gicv5_iwb_chip_data { + void __iomem *iwb_base; + u16 nr_regs; +}; + +static u32 iwb_readl_relaxed(struct gicv5_iwb_chip_data *iwb_node, const u32 reg_offset) +{ + return readl_relaxed(iwb_node->iwb_base + reg_offset); +} + +static void iwb_writel_relaxed(struct gicv5_iwb_chip_data *iwb_node, const u32 val, + const u32 reg_offset) +{ + writel_relaxed(val, iwb_node->iwb_base + reg_offset); +} + +static int gicv5_iwb_wait_for_wenabler(struct gicv5_iwb_chip_data *iwb_node) +{ + return gicv5_wait_for_op_atomic(iwb_node->iwb_base, GICV5_IWB_WENABLE_STATUSR, + GICV5_IWB_WENABLE_STATUSR_IDLE, NULL); +} + +static int __gicv5_iwb_set_wire_enable(struct gicv5_iwb_chip_data *iwb_node, + u32 iwb_wire, bool enable) +{ + u32 n = iwb_wire / 32; + u8 i = iwb_wire % 32; + u32 val; + + if (n >= iwb_node->nr_regs) { + pr_err("IWB_WENABLER is invalid for n=%u\n", n); + return -EINVAL; + } + + /* + * Enable IWB wire/pin at this point + * Note: This is not the same as enabling the interrupt + */ + val = iwb_readl_relaxed(iwb_node, GICV5_IWB_WENABLER + (4 * n)); + if (enable) + val |= BIT(i); + else + val &= ~BIT(i); + iwb_writel_relaxed(iwb_node, val, GICV5_IWB_WENABLER + (4 * n)); + + return gicv5_iwb_wait_for_wenabler(iwb_node); +} + +static int gicv5_iwb_enable_wire(struct gicv5_iwb_chip_data *iwb_node, + u32 iwb_wire) +{ + return __gicv5_iwb_set_wire_enable(iwb_node, iwb_wire, true); +} + +static int gicv5_iwb_disable_wire(struct gicv5_iwb_chip_data *iwb_node, + u32 iwb_wire) +{ + return __gicv5_iwb_set_wire_enable(iwb_node, iwb_wire, false); +} + +static void gicv5_iwb_irq_disable(struct irq_data *d) +{ + struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d); + + gicv5_iwb_disable_wire(iwb_node, d->hwirq); + irq_chip_disable_parent(d); +} + +static void gicv5_iwb_irq_enable(struct irq_data *d) +{ + struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d); + + gicv5_iwb_enable_wire(iwb_node, d->hwirq); + irq_chip_enable_parent(d); +} + +static int gicv5_iwb_set_type(struct irq_data *d, unsigned int type) +{ + struct gicv5_iwb_chip_data *iwb_node = irq_data_get_irq_chip_data(d); + u32 iwb_wire, n, wtmr; + u8 i; + + iwb_wire = d->hwirq; + i = iwb_wire % 32; + n = iwb_wire / 32; + + if (n >= iwb_node->nr_regs) { + pr_err_once("reg %u out of range\n", n); + return -EINVAL; + } + + wtmr = iwb_readl_relaxed(iwb_node, GICV5_IWB_WTMR + (4 * n)); + + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_LEVEL_LOW: + wtmr |= BIT(i); + break; + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + wtmr &= ~BIT(i); + break; + default: + pr_debug("unexpected wire trigger mode"); + return -EINVAL; + } + + iwb_writel_relaxed(iwb_node, wtmr, GICV5_IWB_WTMR + (4 * n)); + + return 0; +} + +static void gicv5_iwb_domain_set_desc(msi_alloc_info_t *alloc_info, struct msi_desc *desc) +{ + alloc_info->desc = desc; + alloc_info->hwirq = (u32)desc->data.icookie.value; +} + +static int gicv5_iwb_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, + unsigned int *type) +{ + if (!is_of_node(fwspec->fwnode)) + return -EINVAL; + + if (fwspec->param_count < 2) + return -EINVAL; + + /* + * param[0] is be the wire + * param[1] is the interrupt type + */ + *hwirq = fwspec->param[0]; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static void gicv5_iwb_write_msi_msg(struct irq_data *d, struct msi_msg *msg) {} + +static const struct msi_domain_template iwb_msi_template = { + .chip = { + .name = "GICv5-IWB", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_enable = gicv5_iwb_irq_enable, + .irq_disable = gicv5_iwb_irq_disable, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_type = gicv5_iwb_set_type, + .irq_write_msi_msg = gicv5_iwb_write_msi_msg, + .irq_set_affinity = irq_chip_set_affinity_parent, + .irq_get_irqchip_state = irq_chip_get_parent_state, + .irq_set_irqchip_state = irq_chip_set_parent_state, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, + }, + + .ops = { + .set_desc = gicv5_iwb_domain_set_desc, + .msi_translate = gicv5_iwb_irq_domain_translate, + }, + + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_USE_DEV_FWNODE, + }, + + .alloc_info = { + .flags = MSI_ALLOC_FLAGS_FIXED_MSG_DATA, + }, +}; + +static bool gicv5_iwb_create_device_domain(struct device *dev, unsigned int size, + struct gicv5_iwb_chip_data *iwb_node) +{ + if (WARN_ON_ONCE(!dev->msi.domain)) + return false; + + return msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, + &iwb_msi_template, size, + NULL, iwb_node); +} + +static struct gicv5_iwb_chip_data * +gicv5_iwb_init_bases(void __iomem *iwb_base, struct platform_device *pdev) +{ + u32 nr_wires, idr0, cr0; + unsigned int n; + int ret; + + struct gicv5_iwb_chip_data *iwb_node __free(kfree) = kzalloc(sizeof(*iwb_node), + GFP_KERNEL); + if (!iwb_node) + return ERR_PTR(-ENOMEM); + + iwb_node->iwb_base = iwb_base; + + idr0 = iwb_readl_relaxed(iwb_node, GICV5_IWB_IDR0); + nr_wires = (FIELD_GET(GICV5_IWB_IDR0_IW_RANGE, idr0) + 1) * 32; + + cr0 = iwb_readl_relaxed(iwb_node, GICV5_IWB_CR0); + if (!FIELD_GET(GICV5_IWB_CR0_IWBEN, cr0)) { + dev_err(&pdev->dev, "IWB must be enabled in firmware\n"); + return ERR_PTR(-EINVAL); + } + + iwb_node->nr_regs = FIELD_GET(GICV5_IWB_IDR0_IW_RANGE, idr0) + 1; + + for (n = 0; n < iwb_node->nr_regs; n++) + iwb_writel_relaxed(iwb_node, 0, GICV5_IWB_WENABLER + (sizeof(u32) * n)); + + ret = gicv5_iwb_wait_for_wenabler(iwb_node); + if (ret) + return ERR_PTR(ret); + + if (!gicv5_iwb_create_device_domain(&pdev->dev, nr_wires, iwb_node)) + return ERR_PTR(-ENOMEM); + + return_ptr(iwb_node); +} + +static int gicv5_iwb_device_probe(struct platform_device *pdev) +{ + struct gicv5_iwb_chip_data *iwb_node; + void __iomem *iwb_base; + struct resource *res; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + iwb_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!iwb_base) { + dev_err(&pdev->dev, "failed to ioremap %pR\n", res); + return -ENOMEM; + } + + iwb_node = gicv5_iwb_init_bases(iwb_base, pdev); + if (IS_ERR(iwb_node)) { + ret = PTR_ERR(iwb_node); + goto out_unmap; + } + + return 0; + +out_unmap: + iounmap(iwb_base); + return ret; +} + +static const struct of_device_id gicv5_iwb_of_match[] = { + { .compatible = "arm,gic-v5-iwb" }, + { /* END */ } +}; +MODULE_DEVICE_TABLE(of, gicv5_iwb_of_match); + +static struct platform_driver gicv5_iwb_platform_driver = { + .driver = { + .name = "GICv5 IWB", + .of_match_table = gicv5_iwb_of_match, + .suppress_bind_attrs = true, + }, + .probe = gicv5_iwb_device_probe, +}; + +module_platform_driver(gicv5_iwb_platform_driver); diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h index 124c734ca5d9..92cca4b23f13 100644 --- a/include/asm-generic/msi.h +++ b/include/asm-generic/msi.h @@ -33,6 +33,7 @@ typedef struct msi_alloc_info { /* Device generating MSIs is proxying for another device */ #define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0) +#define MSI_ALLOC_FLAGS_FIXED_MSG_DATA (1UL << 1) #define GENERIC_MSI_DOMAIN_OPS 1 diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h index 07b952549bfa..68ddcdb1cec5 100644 --- a/include/linux/irqchip/arm-gic-v5.h +++ b/include/linux/irqchip/arm-gic-v5.h @@ -248,6 +248,23 @@ #define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0) #define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32) +/* + * IWB registers + */ +#define GICV5_IWB_IDR0 0x0000 +#define GICV5_IWB_CR0 0x0080 +#define GICV5_IWB_WENABLE_STATUSR 0x00c0 +#define GICV5_IWB_WENABLER 0x2000 +#define GICV5_IWB_WTMR 0x4000 + +#define GICV5_IWB_IDR0_INT_DOMS GENMASK(14, 11) +#define GICV5_IWB_IDR0_IW_RANGE GENMASK(10, 0) + +#define GICV5_IWB_CR0_IDLE BIT(1) +#define GICV5_IWB_CR0_IWBEN BIT(0) + +#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0) + /* * Global Data structures and functions */ -- cgit v1.2.3 From 1ec38ce3d024bebdff2a9ffb526e4d198605204d Mon Sep 17 00:00:00 2001 From: Sascha Bischoff Date: Fri, 27 Jun 2025 10:09:01 +0000 Subject: irqchip/gic-v5: Populate struct gic_kvm_info Populate the gic_kvm_info struct based on support for FEAT_GCIE_LEGACY. The struct is used by KVM to probe for a compatible GIC. Co-authored-by: Timothy Hayes Signed-off-by: Timothy Hayes Signed-off-by: Sascha Bischoff Reviewed-by: Lorenzo Pieralisi Link: https://lore.kernel.org/r/20250627100847.1022515-3-sascha.bischoff@arm.com Signed-off-by: Oliver Upton --- drivers/irqchip/irq-gic-v5.c | 33 +++++++++++++++++++++++++++++++++ include/linux/irqchip/arm-vgic-info.h | 4 ++++ 2 files changed, 37 insertions(+) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c index 0e4789818131..4bd224f359a7 100644 --- a/drivers/irqchip/irq-gic-v5.c +++ b/drivers/irqchip/irq-gic-v5.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -1058,6 +1059,36 @@ static void gicv5_set_cpuif_idbits(void) } } +#ifdef CONFIG_KVM +static struct gic_kvm_info gic_v5_kvm_info __initdata; + +static bool __init gicv5_cpuif_has_gcie_legacy(void) +{ + u64 idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1); + return !!FIELD_GET(ICC_IDR0_EL1_GCIE_LEGACY, idr0); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + gic_v5_kvm_info.type = GIC_V5; + gic_v5_kvm_info.has_gcie_v3_compat = gicv5_cpuif_has_gcie_legacy(); + + /* GIC Virtual CPU interface maintenance interrupt */ + gic_v5_kvm_info.no_maint_irq_mask = false; + gic_v5_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v5_kvm_info.maint_irq) { + pr_warn("cannot find GICv5 virtual CPU interface maintenance interrupt\n"); + return; + } + + vgic_set_kvm_info(&gic_v5_kvm_info); +} +#else +static inline void __init gic_of_setup_kvm_info(struct device_node *node) +{ +} +#endif // CONFIG_KVM + static int __init gicv5_of_init(struct device_node *node, struct device_node *parent) { int ret = gicv5_irs_of_probe(node); @@ -1090,6 +1121,8 @@ static int __init gicv5_of_init(struct device_node *node, struct device_node *pa gicv5_irs_its_probe(); + gic_of_setup_kvm_info(node); + return 0; out_int: diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h index a75b2c7de69d..ca1713fac6e3 100644 --- a/include/linux/irqchip/arm-vgic-info.h +++ b/include/linux/irqchip/arm-vgic-info.h @@ -15,6 +15,8 @@ enum gic_type { GIC_V2, /* Full GICv3, optionally with v2 compat */ GIC_V3, + /* Full GICv5, optionally with v3 compat */ + GIC_V5, }; struct gic_kvm_info { @@ -34,6 +36,8 @@ struct gic_kvm_info { bool has_v4_1; /* Deactivation impared, subpar stuff */ bool no_hw_deactivation; + /* v3 compat support (GICv5 hosts, only) */ + bool has_gcie_v3_compat; }; #ifdef CONFIG_KVM -- cgit v1.2.3 From ee4a2e08c10188f02fe3fb36b6beddc3c2fdb287 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 8 Jul 2025 19:27:41 +1000 Subject: entry: Add arch_in_rcu_eqs() All architectures have an interruptible RCU extended quiescent state (EQS) as part of their idle sequences, where interrupts can occur without RCU watching. Entry code must account for this and wake RCU as necessary; the common entry code deals with this in irqentry_enter() by treating any interrupt from an idle thread as potentially having occurred within an EQS and waking RCU for the duration of the interrupt via rcu_irq_enter() .. rcu_irq_exit(). Some architectures may have other interruptible EQSs which require similar treatment. For example, on s390 it is necessary to enable interrupts around guest entry in the middle of a period where core KVM code has entered an EQS. So that architectures can wake RCU in these cases, this patch adds a new arch_in_rcu_eqs() hook to the common entry code which is checked in addition to the existing is_idle_thread() check, with RCU woken if either returns true. A default implementation is provided which always returns false, which suffices for most architectures. As no architectures currently implement arch_in_rcu_eqs(), there should be no functional change as a result of this patch alone. A subsequent patch will add an s390 implementation to fix a latent bug with missing RCU wakeups. [ajd@linux.ibm.com: rebase, fix commit message] Signed-off-by: Mark Rutland Cc: Andy Lutomirski Cc: Christian Borntraeger Cc: Heiko Carstens Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Sven Schnelle Cc: Thomas Gleixner Cc: Claudio Imbrenda Cc: Vasily Gorbik Cc: Alexander Gordeev Cc: Janosch Frank Reviewed-by: Christian Borntraeger Signed-off-by: Andrew Donnellan Acked-by: Peter Zijlstra (Intel) Reviewed-by: Janosch Frank Link: https://lore.kernel.org/r/20250708092742.104309-2-ajd@linux.ibm.com Signed-off-by: Janosch Frank Message-ID: <20250708092742.104309-2-ajd@linux.ibm.com> --- include/linux/entry-common.h | 16 ++++++++++++++++ kernel/entry/common.c | 3 ++- 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index f94f3fdf15fc..3bf99cbad8a3 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -86,6 +86,22 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} #endif +/** + * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent + * states. + * + * Returns: true if the CPU is potentially in an RCU EQS, false otherwise. + * + * Architectures only need to define this if threads other than the idle thread + * may have an interruptible EQS. This does not need to handle idle threads. It + * is safe to over-estimate at the cost of redundant RCU management work. + * + * Invoked from irqentry_enter() + */ +#ifndef arch_in_rcu_eqs +static __always_inline bool arch_in_rcu_eqs(void) { return false; } +#endif + /** * enter_from_user_mode - Establish state when coming from user mode * diff --git a/kernel/entry/common.c b/kernel/entry/common.c index a8dd1f27417c..eb52d38e8099 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -220,7 +220,8 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) * TINY_RCU does not support EQS, so let the compiler eliminate * this part when enabled. */ - if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { + if (!IS_ENABLED(CONFIG_TINY_RCU) && + (is_idle_task(current) || arch_in_rcu_eqs())) { /* * If RCU is not watching then the same careful * sequence vs. lockdep and tracing is required -- cgit v1.2.3