summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm/tlb.c')
-rw-r--r--arch/riscv/kvm/tlb.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index 3c5a70a2b927..ff1aeac4eb8e 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -158,6 +158,36 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
csr_write(CSR_HGATP, hgatp);
}
+void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
+{
+ unsigned long vmid;
+
+ if (!kvm_riscv_gstage_vmid_bits() ||
+ vcpu->arch.last_exit_cpu == vcpu->cpu)
+ return;
+
+ /*
+ * On RISC-V platforms with hardware VMID support, we share same
+ * VMID for all VCPUs of a particular Guest/VM. This means we might
+ * have stale G-stage TLB entries on the current Host CPU due to
+ * some other VCPU of the same Guest which ran previously on the
+ * current Host CPU.
+ *
+ * To cleanup stale TLB entries, we simply flush all G-stage TLB
+ * entries by VMID whenever underlying Host CPU changes for a VCPU.
+ */
+
+ vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
+ kvm_riscv_local_hfence_gvma_vmid_all(vmid);
+
+ /*
+ * Flush VS-stage TLB entries for implementation where VS-stage
+ * TLB does not cahce guest physical address and VMID.
+ */
+ if (static_branch_unlikely(&kvm_riscv_vsstage_tlb_no_gpa))
+ kvm_riscv_local_hfence_vvma_all(vmid);
+}
+
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
{
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);