summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/vgic/vgic-v3.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/vgic/vgic-v3.c')
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c427
1 files changed, 329 insertions, 98 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 6fbb4b099855..1d6dd1b545bd 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -12,6 +12,7 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_asm.h>
+#include "vgic-mmio.h"
#include "vgic.h"
static bool group0_trap;
@@ -20,11 +21,48 @@ static bool common_trap;
static bool dir_trap;
static bool gicv4_enable;
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
- cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ cpuif->vgic_hcr = ICH_HCR_EL2_En;
+
+ if (irqs_pending_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_NPIE;
+ if (irqs_active_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_LRENPIE;
+ if (irqs_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+
+ if (!als->nr_sgi)
+ cpuif->vgic_hcr |= ICH_HCR_EL2_vSGIEOICount;
+
+ cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_ENG0_MASK) ?
+ ICH_HCR_EL2_VGrp0DIE : ICH_HCR_EL2_VGrp0EIE;
+ cpuif->vgic_hcr |= (cpuif->vgic_vmcr & ICH_VMCR_ENG1_MASK) ?
+ ICH_HCR_EL2_VGrp1DIE : ICH_HCR_EL2_VGrp1EIE;
+
+ /*
+ * Dealing with EOImode=1 is a massive source of headache. Not
+ * only do we need to track that we have active interrupts
+ * outside of the LRs and force DIR to be trapped, we also
+ * need to deal with SPIs that can be deactivated on another
+ * CPU.
+ *
+ * On systems that do not implement TDIR, force the bit in the
+ * shadow state anyway to avoid IPI-ing on these poor sods.
+ *
+ * Note that we set the trap irrespective of EOIMode, as that
+ * can change behind our back without any warning...
+ */
+ if (!cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR) ||
+ irqs_active_outside_lrs(als) ||
+ atomic_read(&vcpu->kvm->arch.vgic.active_spis))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_TDIR;
}
static bool lr_signals_eoi_mi(u64 lr_val)
@@ -33,84 +71,238 @@ static bool lr_signals_eoi_mi(u64 lr_val)
!(lr_val & ICH_LR_HW);
}
+static void vgic_v3_fold_lr(struct kvm_vcpu *vcpu, u64 val)
+{
+ struct vgic_irq *irq;
+ bool is_v2_sgi = false;
+ bool deactivated;
+ u32 intid;
+
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ intid = val & ICH_LR_VIRTUAL_ID_MASK;
+ } else {
+ intid = val & GICH_LR_VIRTUALID;
+ is_v2_sgi = vgic_irq_is_sgi(intid);
+ }
+
+ irq = vgic_get_vcpu_irq(vcpu, intid);
+ if (!irq) /* An LPI could have been unmapped. */
+ return;
+
+ scoped_guard(raw_spinlock, &irq->irq_lock) {
+ /* Always preserve the active bit for !LPIs, note deactivation */
+ if (irq->intid >= VGIC_MIN_LPI)
+ val &= ~ICH_LR_ACTIVE_BIT;
+ deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
+ irq->active = !!(val & ICH_LR_ACTIVE_BIT);
+
+ /* Edge is the only case where we preserve the pending bit */
+ if (irq->config == VGIC_CONFIG_EDGE &&
+ (val & ICH_LR_PENDING_BIT))
+ irq->pending_latch = true;
+
+ /*
+ * Clear soft pending state when level irqs have been acked.
+ */
+ if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
+ irq->pending_latch = false;
+
+ if (is_v2_sgi) {
+ u8 cpuid = FIELD_GET(GICH_LR_PHYSID_CPUID, val);
+
+ if (irq->active)
+ irq->active_source = cpuid;
+
+ if (val & ICH_LR_PENDING_BIT)
+ irq->source |= BIT(cpuid);
+ }
+
+ /* Handle resampling for mapped interrupts if required */
+ vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
+
+ irq->on_lr = false;
+ }
+
+ /* Notify fds when the guest EOI'ed a level-triggered SPI, and drop the refcount */
+ if (deactivated && lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) {
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
+ atomic_dec_if_positive(&vcpu->kvm->arch.vgic.active_spis);
+ }
+
+ vgic_put_irq(vcpu->kvm, irq);
+}
+
+static u64 vgic_v3_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq);
+
+static void vgic_v3_deactivate_phys(u32 intid)
+{
+ if (cpus_have_final_cap(ARM64_HAS_GICV5_LEGACY))
+ gic_insn(intid | FIELD_PREP(GICV5_GIC_CDDI_TYPE_MASK, 1), CDDI);
+ else
+ gic_write_dir(intid);
+}
+
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
- u32 model = vcpu->kvm->arch.vgic.vgic_model;
- int lr;
+ u32 eoicount = FIELD_GET(ICH_HCR_EL2_EOIcount, cpuif->vgic_hcr);
+ struct vgic_irq *irq;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
- cpuif->vgic_hcr &= ~ICH_HCR_EL2_UIE;
-
- for (lr = 0; lr < cpuif->used_lrs; lr++) {
- u64 val = cpuif->vgic_lr[lr];
- u32 intid, cpuid;
- struct vgic_irq *irq;
- bool is_v2_sgi = false;
- bool deactivated;
+ for (int lr = 0; lr < cpuif->used_lrs; lr++)
+ vgic_v3_fold_lr(vcpu, cpuif->vgic_lr[lr]);
- cpuid = val & GICH_LR_PHYSID_CPUID;
- cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+ /*
+ * EOIMode=0: use EOIcount to emulate deactivation. We are
+ * guaranteed to deactivate in reverse order of the activation, so
+ * just pick one active interrupt after the other in the ap_list,
+ * and replay the deactivation as if the CPU was doing it. We also
+ * rely on priority drop to have taken place, and the list to be
+ * sorted by priority.
+ */
+ list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ u64 lr;
- if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
- intid = val & ICH_LR_VIRTUAL_ID_MASK;
+ /*
+ * I would have loved to write this using a scoped_guard(),
+ * but using 'continue' here is a total train wreck.
+ */
+ if (!eoicount) {
+ break;
} else {
- intid = val & GICH_LR_VIRTUALID;
- is_v2_sgi = vgic_irq_is_sgi(intid);
+ guard(raw_spinlock)(&irq->irq_lock);
+
+ if (!(likely(vgic_target_oracle(irq) == vcpu) &&
+ irq->active))
+ continue;
+
+ lr = vgic_v3_compute_lr(vcpu, irq) & ~ICH_LR_ACTIVE_BIT;
}
- /* Notify fds when the guest EOI'ed a level-triggered IRQ */
- if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
- kvm_notify_acked_irq(vcpu->kvm, 0,
- intid - VGIC_NR_PRIVATE_IRQS);
+ if (lr & ICH_LR_HW)
+ vgic_v3_deactivate_phys(FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
- irq = vgic_get_vcpu_irq(vcpu, intid);
- if (!irq) /* An LPI could have been unmapped. */
- continue;
+ vgic_v3_fold_lr(vcpu, lr);
+ eoicount--;
+ }
- raw_spin_lock(&irq->irq_lock);
+ cpuif->used_lrs = 0;
+}
- /* Always preserve the active bit, note deactivation */
- deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
- irq->active = !!(val & ICH_LR_ACTIVE_BIT);
+void vgic_v3_deactivate(struct kvm_vcpu *vcpu, u64 val)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
+ struct kvm_vcpu *target_vcpu = NULL;
+ bool mmio = false, is_v2_sgi;
+ struct vgic_irq *irq;
+ unsigned long flags;
+ u64 lr = 0;
+ u8 cpuid;
- if (irq->active && is_v2_sgi)
- irq->active_source = cpuid;
+ /* Snapshot CPUID, and remove it from the INTID */
+ cpuid = FIELD_GET(GENMASK_ULL(12, 10), val);
+ val &= ~GENMASK_ULL(12, 10);
- /* Edge is the only case where we preserve the pending bit */
- if (irq->config == VGIC_CONFIG_EDGE &&
- (val & ICH_LR_PENDING_BIT)) {
- irq->pending_latch = true;
+ is_v2_sgi = (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
+ val < VGIC_NR_SGIS);
- if (is_v2_sgi)
- irq->source |= (1 << cpuid);
- }
+ /*
+ * We only deal with DIR when EOIMode==1, and only for SGI,
+ * PPI or SPI.
+ */
+ if (!(cpuif->vgic_vmcr & ICH_VMCR_EOIM_MASK) ||
+ val >= vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)
+ return;
+
+ /* Make sure we're in the same context as LR handling */
+ local_irq_save(flags);
+
+ irq = vgic_get_vcpu_irq(vcpu, val);
+ if (WARN_ON_ONCE(!irq))
+ goto out;
+
+ /*
+ * EOIMode=1: we must rely on traps to handle deactivate of
+ * overflowing interrupts, as there is no ordering guarantee and
+ * EOIcount isn't being incremented. Priority drop will have taken
+ * place, as ICV_EOIxR_EL1 only affects the APRs and not the LRs.
+ *
+ * Three possibities:
+ *
+ * - The irq is not queued on any CPU, and there is nothing to
+ * do,
+ *
+ * - Or the irq is in an LR, meaning that its state is not
+ * directly observable. Treat it bluntly by making it as if
+ * this was a write to GICD_ICACTIVER, which will force an
+ * exit on all vcpus. If it hurts, don't do that.
+ *
+ * - Or the irq is active, but not in an LR, and we can
+ * directly deactivate it by building a pseudo-LR, fold it,
+ * and queue a request to prune the resulting ap_list,
+ *
+ * Special care must be taken to match the source CPUID when
+ * deactivating a GICv2 SGI.
+ */
+ scoped_guard(raw_spinlock, &irq->irq_lock) {
+ target_vcpu = irq->vcpu;
+
+ /* Not on any ap_list? */
+ if (!target_vcpu)
+ goto put;
/*
- * Clear soft pending state when level irqs have been acked.
+ * Urgh. We're deactivating something that we cannot
+ * observe yet... Big hammer time.
*/
- if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
- irq->pending_latch = false;
+ if (irq->on_lr) {
+ mmio = true;
+ goto put;
+ }
- /* Handle resampling for mapped interrupts if required */
- vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
+ /* GICv2 SGI: check that the cpuid matches */
+ if (is_v2_sgi && irq->active_source != cpuid) {
+ target_vcpu = NULL;
+ goto put;
+ }
- raw_spin_unlock(&irq->irq_lock);
- vgic_put_irq(vcpu->kvm, irq);
+ /* (with a Dalek voice) DEACTIVATE!!!! */
+ lr = vgic_v3_compute_lr(vcpu, irq) & ~ICH_LR_ACTIVE_BIT;
}
- cpuif->used_lrs = 0;
+ if (lr & ICH_LR_HW)
+ vgic_v3_deactivate_phys(FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
+
+ vgic_v3_fold_lr(vcpu, lr);
+
+put:
+ vgic_put_irq(vcpu->kvm, irq);
+
+out:
+ local_irq_restore(flags);
+
+ if (mmio)
+ vgic_mmio_write_cactive(vcpu, (val / 32) * 4, 4, BIT(val % 32));
+
+ /* Force the ap_list to be pruned */
+ if (target_vcpu)
+ kvm_make_request(KVM_REQ_VGIC_PROCESS_UPDATE, target_vcpu);
}
/* Requires the irq to be locked already */
-void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+static u64 vgic_v3_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
{
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u64 val = irq->intid;
bool allow_pending = true, is_v2_sgi;
+ WARN_ON(irq->on_lr);
+
is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
model == KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -150,6 +342,35 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (allow_pending && irq_is_pending(irq)) {
val |= ICH_LR_PENDING_BIT;
+ if (is_v2_sgi) {
+ u32 src = ffs(irq->source);
+
+ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+ irq->intid))
+ return 0;
+
+ val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+ if (irq->source & ~BIT(src - 1))
+ val |= ICH_LR_EOI;
+ }
+ }
+
+ if (irq->group)
+ val |= ICH_LR_GROUP;
+
+ val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
+
+ return val;
+}
+
+void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
+ u64 val = vgic_v3_compute_lr(vcpu, irq);
+
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
+
+ if (val & ICH_LR_PENDING_BIT) {
if (irq->config == VGIC_CONFIG_EDGE)
irq->pending_latch = false;
@@ -157,16 +378,9 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
u32 src = ffs(irq->source);
- if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
- irq->intid))
- return;
-
- val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
- irq->source &= ~(1 << (src - 1));
- if (irq->source) {
+ irq->source &= ~BIT(src - 1);
+ if (irq->source)
irq->pending_latch = true;
- val |= ICH_LR_EOI;
- }
}
}
@@ -179,12 +393,7 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
irq->line_level = false;
- if (irq->group)
- val |= ICH_LR_GROUP;
-
- val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
-
- vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
+ irq->on_lr = true;
}
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
@@ -258,7 +467,7 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
-void vgic_v3_enable(struct kvm_vcpu *vcpu)
+void vgic_v3_reset(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -288,9 +497,6 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
kvm_vgic_global_state.ich_vtr_el2);
vcpu->arch.vgic_cpu.num_pri_bits = FIELD_GET(ICH_VTR_EL2_PRIbits,
kvm_vgic_global_state.ich_vtr_el2) + 1;
-
- /* Get the show on the road... */
- vgic_v3->vgic_hcr = ICH_HCR_EL2_En;
}
void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
@@ -301,20 +507,10 @@ void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
return;
/* Hide GICv3 sysreg if necessary */
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2 ||
+ !irqchip_in_kernel(vcpu->kvm))
vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
ICH_HCR_EL2_TC);
- return;
- }
-
- if (group0_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL0;
- if (group1_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL1;
- if (common_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TC;
- if (dir_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_EL2_TDIR;
}
int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
@@ -635,8 +831,53 @@ static const struct midr_range broken_seis[] = {
static bool vgic_v3_broken_seis(void)
{
- return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_SEIS) &&
- is_midr_in_range_list(broken_seis));
+ return (is_kernel_in_hyp_mode() &&
+ is_midr_in_range_list(broken_seis) &&
+ (read_sysreg_s(SYS_ICH_VTR_EL2) & ICH_VTR_EL2_SEIS));
+}
+
+void noinstr kvm_compute_ich_hcr_trap_bits(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+{
+ u32 insn, oinsn, rd;
+ u64 hcr = 0;
+
+ if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
+ group0_trap = true;
+ group1_trap = true;
+ }
+
+ if (vgic_v3_broken_seis()) {
+ /* We know that these machines have ICH_HCR_EL2.TDIR */
+ group0_trap = true;
+ group1_trap = true;
+ dir_trap = true;
+ }
+
+ if (!cpus_have_cap(ARM64_HAS_ICH_HCR_EL2_TDIR))
+ common_trap = true;
+
+ if (group0_trap)
+ hcr |= ICH_HCR_EL2_TALL0;
+ if (group1_trap)
+ hcr |= ICH_HCR_EL2_TALL1;
+ if (common_trap)
+ hcr |= ICH_HCR_EL2_TC;
+ if (dir_trap)
+ hcr |= ICH_HCR_EL2_TDIR;
+
+ /* Compute target register */
+ oinsn = le32_to_cpu(*origptr);
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
+
+ /* movz rd, #(val & 0xffff) */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)hcr,
+ 0,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_ZERO);
+ *updptr = cpu_to_le32(insn);
}
/**
@@ -650,6 +891,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
{
u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
bool has_v2;
+ u64 traps;
int ret;
has_v2 = ich_vtr_el2 >> 63;
@@ -708,29 +950,18 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
if (has_v2)
static_branch_enable(&vgic_v3_has_v2_compat);
- if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
- group0_trap = true;
- group1_trap = true;
- }
-
if (vgic_v3_broken_seis()) {
kvm_info("GICv3 with broken locally generated SEI\n");
-
kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_EL2_SEIS;
- group0_trap = true;
- group1_trap = true;
- if (ich_vtr_el2 & ICH_VTR_EL2_TDS)
- dir_trap = true;
- else
- common_trap = true;
}
- if (group0_trap || group1_trap || common_trap | dir_trap) {
+ traps = vgic_ich_hcr_trap_bits();
+ if (traps) {
kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
- group0_trap ? "G0" : "",
- group1_trap ? "G1" : "",
- common_trap ? "C" : "",
- dir_trap ? "D" : "");
+ (traps & ICH_HCR_EL2_TALL0) ? "G0" : "",
+ (traps & ICH_HCR_EL2_TALL1) ? "G1" : "",
+ (traps & ICH_HCR_EL2_TC) ? "C" : "",
+ (traps & ICH_HCR_EL2_TDIR) ? "D" : "");
static_branch_enable(&vgic_v3_cpuif_trap);
}
@@ -770,7 +1001,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
}
if (likely(!is_protected_kvm_enabled()))
- kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
+ kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
WARN_ON(vgic_v4_put(vcpu));
if (has_vhe())