summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/vgic/vgic-v2.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/vgic/vgic-v2.c')
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c291
1 files changed, 220 insertions, 71 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 381673f03c39..585491fbda80 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -9,6 +9,7 @@
#include <kvm/arm_vgic.h>
#include <asm/kvm_mmu.h>
+#include "vgic-mmio.h"
#include "vgic.h"
static inline void vgic_v2_write_lr(int lr, u32 val)
@@ -26,11 +27,24 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr(i, 0);
}
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
- cpuif->vgic_hcr |= GICH_HCR_UIE;
+ cpuif->vgic_hcr = GICH_HCR_EN;
+
+ if (irqs_pending_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_NPIE;
+ if (irqs_active_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_LRENPIE;
+ if (irqs_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_UIE;
+
+ cpuif->vgic_hcr |= (cpuif->vgic_vmcr & GICH_VMCR_ENABLE_GRP0_MASK) ?
+ GICH_HCR_VGrp0DIE : GICH_HCR_VGrp0EIE;
+ cpuif->vgic_hcr |= (cpuif->vgic_vmcr & GICH_VMCR_ENABLE_GRP1_MASK) ?
+ GICH_HCR_VGrp1DIE : GICH_HCR_VGrp1EIE;
}
static bool lr_signals_eoi_mi(u32 lr_val)
@@ -39,43 +53,23 @@ static bool lr_signals_eoi_mi(u32 lr_val)
!(lr_val & GICH_LR_HW);
}
-/*
- * transfer the content of the LRs back into the corresponding ap_list:
- * - active bit is transferred as is
- * - pending bit is
- * - transferred as is in case of edge sensitive IRQs
- * - set to the line-level (resample time) for level sensitive IRQs
- */
-void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
+static void vgic_v2_fold_lr(struct kvm_vcpu *vcpu, u32 val)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
- int lr;
-
- DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
-
- cpuif->vgic_hcr &= ~GICH_HCR_UIE;
-
- for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
- u32 val = cpuif->vgic_lr[lr];
- u32 cpuid, intid = val & GICH_LR_VIRTUALID;
- struct vgic_irq *irq;
- bool deactivated;
-
- /* Extract the source vCPU id from the LR */
- cpuid = val & GICH_LR_PHYSID_CPUID;
- cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
- cpuid &= 7;
+ u32 cpuid, intid = val & GICH_LR_VIRTUALID;
+ struct vgic_irq *irq;
+ bool deactivated;
- /* Notify fds when the guest EOI'ed a level-triggered SPI */
- if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
- kvm_notify_acked_irq(vcpu->kvm, 0,
- intid - VGIC_NR_PRIVATE_IRQS);
+ /* Extract the source vCPU id from the LR */
+ cpuid = FIELD_GET(GICH_LR_PHYSID_CPUID, val) & 7;
- irq = vgic_get_vcpu_irq(vcpu, intid);
+ /* Notify fds when the guest EOI'ed a level-triggered SPI */
+ if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
- raw_spin_lock(&irq->irq_lock);
+ irq = vgic_get_vcpu_irq(vcpu, intid);
+ scoped_guard(raw_spinlock, &irq->irq_lock) {
/* Always preserve the active bit, note deactivation */
deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -101,29 +95,139 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
/* Handle resampling for mapped interrupts if required */
vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
- raw_spin_unlock(&irq->irq_lock);
- vgic_put_irq(vcpu->kvm, irq);
+ irq->on_lr = false;
}
- cpuif->used_lrs = 0;
+ vgic_put_irq(vcpu->kvm, irq);
}
+static u32 vgic_v2_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq);
+
/*
- * Populates the particular LR with the state of a given IRQ:
- * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
- * - for a level sensitive IRQ the pending state value is unchanged;
- * it is dictated directly by the input level
- *
- * If @irq describes an SGI with multiple sources, we choose the
- * lowest-numbered source VCPU and clear that bit in the source bitmap.
- *
- * The irq_lock must be held by the caller.
+ * transfer the content of the LRs back into the corresponding ap_list:
+ * - active bit is transferred as is
+ * - pending bit is
+ * - transferred as is in case of edge sensitive IRQs
+ * - set to the line-level (resample time) for level sensitive IRQs
*/
-void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
+ u32 eoicount = FIELD_GET(GICH_HCR_EOICOUNT, cpuif->vgic_hcr);
+ struct vgic_irq *irq;
+
+ DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
+
+ for (int lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++)
+ vgic_v2_fold_lr(vcpu, cpuif->vgic_lr[lr]);
+
+ /* See the GICv3 equivalent for the EOIcount handling rationale */
+ list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ u32 lr;
+
+ if (!eoicount) {
+ break;
+ } else {
+ guard(raw_spinlock)(&irq->irq_lock);
+
+ if (!(likely(vgic_target_oracle(irq) == vcpu) &&
+ irq->active))
+ continue;
+
+ lr = vgic_v2_compute_lr(vcpu, irq) & ~GICH_LR_ACTIVE_BIT;
+ }
+
+ if (lr & GICH_LR_HW)
+ writel_relaxed(FIELD_GET(GICH_LR_PHYSID_CPUID, lr),
+ kvm_vgic_global_state.gicc_base + GIC_CPU_DEACTIVATE);
+ vgic_v2_fold_lr(vcpu, lr);
+ eoicount--;
+ }
+
+ cpuif->used_lrs = 0;
+}
+
+void vgic_v2_deactivate(struct kvm_vcpu *vcpu, u32 val)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
+ struct kvm_vcpu *target_vcpu = NULL;
+ bool mmio = false;
+ struct vgic_irq *irq;
+ unsigned long flags;
+ u64 lr = 0;
+ u8 cpuid;
+
+ /* Snapshot CPUID, and remove it from the INTID */
+ cpuid = FIELD_GET(GENMASK_ULL(12, 10), val);
+ val &= ~GENMASK_ULL(12, 10);
+
+ /* We only deal with DIR when EOIMode==1 */
+ if (!(cpuif->vgic_vmcr & GICH_VMCR_EOI_MODE_MASK))
+ return;
+
+ /* Make sure we're in the same context as LR handling */
+ local_irq_save(flags);
+
+ irq = vgic_get_vcpu_irq(vcpu, val);
+ if (WARN_ON_ONCE(!irq))
+ goto out;
+
+ /* See the corresponding v3 code for the rationale */
+ scoped_guard(raw_spinlock, &irq->irq_lock) {
+ target_vcpu = irq->vcpu;
+
+ /* Not on any ap_list? */
+ if (!target_vcpu)
+ goto put;
+
+ /*
+ * Urgh. We're deactivating something that we cannot
+ * observe yet... Big hammer time.
+ */
+ if (irq->on_lr) {
+ mmio = true;
+ goto put;
+ }
+
+ /* SGI: check that the cpuid matches */
+ if (val < VGIC_NR_SGIS && irq->active_source != cpuid) {
+ target_vcpu = NULL;
+ goto put;
+ }
+
+ /* (with a Dalek voice) DEACTIVATE!!!! */
+ lr = vgic_v2_compute_lr(vcpu, irq) & ~GICH_LR_ACTIVE_BIT;
+ }
+
+ if (lr & GICH_LR_HW)
+ writel_relaxed(FIELD_GET(GICH_LR_PHYSID_CPUID, lr),
+ kvm_vgic_global_state.gicc_base + GIC_CPU_DEACTIVATE);
+
+ vgic_v2_fold_lr(vcpu, lr);
+
+put:
+ vgic_put_irq(vcpu->kvm, irq);
+
+out:
+ local_irq_restore(flags);
+
+ if (mmio)
+ vgic_mmio_write_cactive(vcpu, (val / 32) * 4, 4, BIT(val % 32));
+
+ /* Force the ap_list to be pruned */
+ if (target_vcpu)
+ kvm_make_request(KVM_REQ_VGIC_PROCESS_UPDATE, target_vcpu);
+}
+
+static u32 vgic_v2_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
{
u32 val = irq->intid;
bool allow_pending = true;
+ WARN_ON(irq->on_lr);
+
if (irq->active) {
val |= GICH_LR_ACTIVE_BIT;
if (vgic_irq_is_sgi(irq->intid))
@@ -163,22 +267,52 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (allow_pending && irq_is_pending(irq)) {
val |= GICH_LR_PENDING_BIT;
- if (irq->config == VGIC_CONFIG_EDGE)
- irq->pending_latch = false;
-
if (vgic_irq_is_sgi(irq->intid)) {
u32 src = ffs(irq->source);
if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
irq->intid))
- return;
+ return 0;
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
- irq->source &= ~(1 << (src - 1));
- if (irq->source) {
- irq->pending_latch = true;
+ if (irq->source & ~BIT(src - 1))
val |= GICH_LR_EOI;
- }
+ }
+ }
+
+ /* The GICv2 LR only holds five bits of priority. */
+ val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
+
+ return val;
+}
+
+/*
+ * Populates the particular LR with the state of a given IRQ:
+ * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
+ * - for a level sensitive IRQ the pending state value is unchanged;
+ * it is dictated directly by the input level
+ *
+ * If @irq describes an SGI with multiple sources, we choose the
+ * lowest-numbered source VCPU and clear that bit in the source bitmap.
+ *
+ * The irq_lock must be held by the caller.
+ */
+void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+ u32 val = vgic_v2_compute_lr(vcpu, irq);
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
+
+ if (val & GICH_LR_PENDING_BIT) {
+ if (irq->config == VGIC_CONFIG_EDGE)
+ irq->pending_latch = false;
+
+ if (vgic_irq_is_sgi(irq->intid)) {
+ u32 src = ffs(irq->source);
+
+ irq->source &= ~BIT(src - 1);
+ if (irq->source)
+ irq->pending_latch = true;
}
}
@@ -194,7 +328,7 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
/* The GICv2 LR only holds five bits of priority. */
val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
- vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
+ irq->on_lr = true;
}
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
@@ -257,7 +391,7 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
}
-void vgic_v2_enable(struct kvm_vcpu *vcpu)
+void vgic_v2_reset(struct kvm_vcpu *vcpu)
{
/*
* By forcing VMCR to zero, the GIC will restore the binary
@@ -265,9 +399,6 @@ void vgic_v2_enable(struct kvm_vcpu *vcpu)
* anyway.
*/
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
-
- /* Get the show on the road... */
- vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
}
/* check for overlapping regions and for regions crossing the end of memory */
@@ -289,6 +420,7 @@ static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
int vgic_v2_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
+ unsigned int len;
int ret = 0;
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
@@ -312,10 +444,20 @@ int vgic_v2_map_resources(struct kvm *kvm)
return ret;
}
+ len = vgic_v2_init_cpuif_iodev(&dist->cpuif_iodev);
+ dist->cpuif_iodev.base_addr = dist->vgic_cpu_base;
+ dist->cpuif_iodev.iodev_type = IODEV_CPUIF;
+ dist->cpuif_iodev.redist_vcpu = NULL;
+
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist->vgic_cpu_base,
+ len, &dist->cpuif_iodev.dev);
+ if (ret)
+ return ret;
+
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm_vgic_global_state.vcpu_base,
- KVM_VGIC_V2_CPU_SIZE, true);
+ KVM_VGIC_V2_CPU_SIZE - SZ_4K, true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
return ret;
@@ -385,6 +527,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.can_emulate_gicv2 = true;
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
+ kvm_vgic_global_state.gicc_base = info->gicc_base;
kvm_vgic_global_state.type = VGIC_V2;
kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
@@ -423,16 +566,26 @@ static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
void __iomem *base = kvm_vgic_global_state.vctrl_base;
u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
if (!base)
return;
- if (used_lrs) {
+ cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
+
+ if (used_lrs)
save_lrs(vcpu, base);
- writel_relaxed(0, base + GICH_HCR);
+
+ if (cpu_if->vgic_hcr & GICH_HCR_LRENPIE) {
+ u32 val = readl_relaxed(base + GICH_HCR);
+
+ cpu_if->vgic_hcr &= ~GICH_HCR_EOICOUNT;
+ cpu_if->vgic_hcr |= val & GICH_HCR_EOICOUNT;
}
+
+ writel_relaxed(0, base + GICH_HCR);
}
void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
@@ -445,13 +598,10 @@ void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
if (!base)
return;
- if (used_lrs) {
- writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
- for (i = 0; i < used_lrs; i++) {
- writel_relaxed(cpu_if->vgic_lr[i],
- base + GICH_LR0 + (i * 4));
- }
- }
+ writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+
+ for (i = 0; i < used_lrs; i++)
+ writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
}
void vgic_v2_load(struct kvm_vcpu *vcpu)
@@ -468,6 +618,5 @@ void vgic_v2_put(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
}