summaryrefslogtreecommitdiff
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b7a0ae2a7b20..5fcd401a5897 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -49,6 +49,7 @@
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/suspend.h>
+#include <linux/rseq.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
@@ -4026,7 +4027,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
- WRITE_ONCE(kvm->last_boosted_vcpu, i);
+ WRITE_ONCE(kvm->last_boosted_vcpu, idx);
break;
} else if (yielded < 0 && !--try) {
break;
@@ -4434,10 +4435,10 @@ static long kvm_vcpu_ioctl(struct file *filp,
return r;
/*
- * Some architectures have vcpu ioctls that are asynchronous to vcpu
- * execution; mutex_lock() would break them.
+ * Let arch code handle select vCPU ioctls without holding vcpu->mutex,
+ * e.g. to support ioctls that can run asynchronous to vCPU execution.
*/
- r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
+ r = kvm_arch_vcpu_unlocked_ioctl(filp, ioctl, arg);
if (r != -ENOIOCTLCMD)
return r;
@@ -4476,6 +4477,12 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = kvm_arch_vcpu_ioctl_run(vcpu);
vcpu->wants_to_run = false;
+ /*
+ * FIXME: Remove this hack once all KVM architectures
+ * support the generic TIF bits, i.e. a dedicated TIF_RSEQ.
+ */
+ rseq_virt_userspace_exit();
+
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
}
@@ -5629,7 +5636,7 @@ static int kvm_offline_cpu(unsigned int cpu)
return 0;
}
-static void kvm_shutdown(void)
+static void kvm_shutdown(void *data)
{
/*
* Disable hardware virtualization and set kvm_rebooting to indicate
@@ -5647,7 +5654,7 @@ static void kvm_shutdown(void)
on_each_cpu(kvm_disable_virtualization_cpu, NULL, 1);
}
-static int kvm_suspend(void)
+static int kvm_suspend(void *data)
{
/*
* Secondary CPUs and CPU hotplug are disabled across the suspend/resume
@@ -5664,7 +5671,7 @@ static int kvm_suspend(void)
return 0;
}
-static void kvm_resume(void)
+static void kvm_resume(void *data)
{
lockdep_assert_not_held(&kvm_usage_lock);
lockdep_assert_irqs_disabled();
@@ -5672,12 +5679,16 @@ static void kvm_resume(void)
WARN_ON_ONCE(kvm_enable_virtualization_cpu());
}
-static struct syscore_ops kvm_syscore_ops = {
+static const struct syscore_ops kvm_syscore_ops = {
.suspend = kvm_suspend,
.resume = kvm_resume,
.shutdown = kvm_shutdown,
};
+static struct syscore kvm_syscore = {
+ .ops = &kvm_syscore_ops,
+};
+
int kvm_enable_virtualization(void)
{
int r;
@@ -5694,7 +5705,7 @@ int kvm_enable_virtualization(void)
if (r)
goto err_cpuhp;
- register_syscore_ops(&kvm_syscore_ops);
+ register_syscore(&kvm_syscore);
/*
* Undo virtualization enabling and bail if the system is going down.
@@ -5716,7 +5727,7 @@ int kvm_enable_virtualization(void)
return 0;
err_rebooting:
- unregister_syscore_ops(&kvm_syscore_ops);
+ unregister_syscore(&kvm_syscore);
cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
err_cpuhp:
kvm_arch_disable_virtualization();
@@ -5732,7 +5743,7 @@ void kvm_disable_virtualization(void)
if (--kvm_usage_count)
return;
- unregister_syscore_ops(&kvm_syscore_ops);
+ unregister_syscore(&kvm_syscore);
cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
kvm_arch_disable_virtualization();
}
@@ -6517,7 +6528,9 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
if (WARN_ON_ONCE(r))
goto err_vfio;
- kvm_gmem_init(module);
+ r = kvm_gmem_init(module);
+ if (r)
+ goto err_gmem;
r = kvm_init_virtualization();
if (r)
@@ -6538,6 +6551,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
err_register:
kvm_uninit_virtualization();
err_virt:
+ kvm_gmem_exit();
+err_gmem:
kvm_vfio_ops_exit();
err_vfio:
kvm_async_pf_deinit();
@@ -6569,6 +6584,7 @@ void kvm_exit(void)
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
+ kvm_gmem_exit();
kvm_vfio_ops_exit();
kvm_async_pf_deinit();
kvm_irqfd_exit();