summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c27
1 files changed, 25 insertions, 2 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a1c49bc681c4..f122906ed9f3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9908,8 +9908,11 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
rcu_read_lock();
map = rcu_dereference(vcpu->kvm->arch.apic_map);
- if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
- target = map->phys_map[dest_id]->vcpu;
+ if (likely(map) && dest_id <= map->max_apic_id) {
+ dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
+ if (map->phys_map[dest_id])
+ target = map->phys_map[dest_id]->vcpu;
+ }
rcu_read_unlock();
@@ -11008,6 +11011,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
wrmsrq(MSR_IA32_XFD_ERR, 0);
/*
+ * Mark this CPU as needing a branch predictor flush before running
+ * userspace. Must be done before enabling preemption to ensure it gets
+ * set for the CPU that actually ran the guest, and not the CPU that it
+ * may migrate to.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
+ this_cpu_write(x86_ibpb_exit_to_user, true);
+
+ /*
* Consume any pending interrupts, including the possible source of
* VM-Exit on SVM and any ticks that occur between VM-Exit and now.
* An instruction is required after local_irq_enable() to fully unblock
@@ -13518,6 +13530,16 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
+#ifdef CONFIG_KVM_GUEST_MEMFD
+/*
+ * KVM doesn't yet support mmap() on guest_memfd for VMs with private memory
+ * (the private vs. shared tracking needs to be moved into guest_memfd).
+ */
+bool kvm_arch_supports_gmem_mmap(struct kvm *kvm)
+{
+ return !kvm_arch_has_private_mem(kvm);
+}
+
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
{
@@ -13531,6 +13553,7 @@ void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
kvm_x86_call(gmem_invalidate)(start, end);
}
#endif
+#endif
int kvm_spec_ctrl_test_value(u64 value)
{