summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/configs/defconfig2
-rw-r--r--arch/riscv/include/asm/irq.h6
-rw-r--r--arch/riscv/include/asm/kvm_host.h4
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_pmu.h3
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h25
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h34
-rw-r--r--arch/riscv/include/asm/sbi.h75
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h21
-rw-r--r--arch/riscv/kvm/Makefile1
-rw-r--r--arch/riscv/kvm/gstage.c27
-rw-r--r--arch/riscv/kvm/main.c33
-rw-r--r--arch/riscv/kvm/vcpu.c3
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c95
-rw-r--r--arch/riscv/kvm/vcpu_pmu.c74
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c176
-rw-r--r--arch/riscv/kvm/vcpu_sbi_fwft.c544
-rw-r--r--arch/riscv/kvm/vcpu_sbi_pmu.c3
-rw-r--r--arch/riscv/kvm/vcpu_sbi_sta.c72
-rw-r--r--arch/riscv/kvm/vmid.c8
20 files changed, 1046 insertions, 161 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 6eb9db8017b8..0c6038dc5dfd 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -16,6 +16,7 @@ config RISCV
select ACPI_MCFG if (ACPI && PCI)
select ACPI_PPTT if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
+ select ACPI_RIMT if ACPI
select ACPI_SPCR_TABLE if ACPI
select ARCH_DMA_DEFAULT_COHERENT
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index b9ef2da15fb2..fc2725cbca18 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -140,6 +140,8 @@ CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
CONFIG_MOTORCOMM_PHY=y
CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
CONFIG_KEYBOARD_SUN4I_LRADC=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 59c975f750c9..e29ded3416b4 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -32,6 +32,7 @@ enum riscv_irqchip_type {
ACPI_RISCV_IRQCHIP_IMSIC = 0x01,
ACPI_RISCV_IRQCHIP_PLIC = 0x02,
ACPI_RISCV_IRQCHIP_APLIC = 0x03,
+ ACPI_RISCV_IRQCHIP_SMSI = 0x04,
};
int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
@@ -42,6 +43,7 @@ unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int
unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id);
unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx);
int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res);
+int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs);
#else
static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
@@ -76,6 +78,10 @@ static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resour
return 0;
}
+static inline int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_ACPI */
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index d71d3299a335..4d794573e3db 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -21,6 +21,7 @@
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_insn.h>
#include <asm/kvm_vcpu_sbi.h>
+#include <asm/kvm_vcpu_sbi_fwft.h>
#include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_pmu.h>
@@ -263,6 +264,9 @@ struct kvm_vcpu_arch {
/* Performance monitoring context */
struct kvm_pmu pmu_context;
+ /* Firmware feature SBI extension context */
+ struct kvm_sbi_fwft fwft_context;
+
/* 'static' configurations which are set only once */
struct kvm_vcpu_config cfg;
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
index 1d85b6617508..9a930afc8f57 100644
--- a/arch/riscv/include/asm/kvm_vcpu_pmu.h
+++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
@@ -98,6 +98,9 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
unsigned long saddr_high, unsigned long flags,
struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low,
+ unsigned long saddr_high, unsigned long num_events,
+ unsigned long flags, struct kvm_vcpu_sbi_return *retdata);
void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index d678fd7e5973..3497489e04db 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -11,7 +11,7 @@
#define KVM_SBI_IMPID 3
-#define KVM_SBI_VERSION_MAJOR 2
+#define KVM_SBI_VERSION_MAJOR 3
#define KVM_SBI_VERSION_MINOR 0
enum kvm_riscv_sbi_ext_status {
@@ -59,6 +59,14 @@ struct kvm_vcpu_sbi_extension {
void (*deinit)(struct kvm_vcpu *vcpu);
void (*reset)(struct kvm_vcpu *vcpu);
+
+ unsigned long state_reg_subtype;
+ unsigned long (*get_state_reg_count)(struct kvm_vcpu *vcpu);
+ int (*get_state_reg_id)(struct kvm_vcpu *vcpu, int index, u64 *reg_id);
+ int (*get_state_reg)(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, void *reg_val);
+ int (*set_state_reg)(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, const void *reg_val);
};
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
@@ -69,27 +77,21 @@ void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
unsigned long pc, unsigned long a1);
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
-int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg);
-int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_reg_indices_sbi(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
struct kvm_vcpu *vcpu, unsigned long extid);
-bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu);
-int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
- unsigned long *reg_val);
-int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
- unsigned long reg_val);
-
#ifdef CONFIG_RISCV_SBI_V01
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
#endif
@@ -102,6 +104,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_fwft;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h b/arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h
new file mode 100644
index 000000000000..5604cec79902
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Rivos Inc.
+ *
+ * Authors:
+ * Clément Léger <cleger@rivosinc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_FWFT_H
+#define __KVM_VCPU_RISCV_FWFT_H
+
+#include <asm/sbi.h>
+
+struct kvm_sbi_fwft_feature;
+
+struct kvm_sbi_fwft_config {
+ const struct kvm_sbi_fwft_feature *feature;
+ bool supported;
+ bool enabled;
+ unsigned long flags;
+};
+
+/* FWFT data structure per vcpu */
+struct kvm_sbi_fwft {
+ struct kvm_sbi_fwft_config *configs;
+#ifndef CONFIG_32BIT
+ bool have_vs_pmlen_7;
+ bool have_vs_pmlen_16;
+#endif
+};
+
+#define vcpu_to_fwft(vcpu) (&(vcpu)->arch.fwft_context)
+
+#endif /* !__KVM_VCPU_RISCV_FWFT_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 341e74238aa0..ccc77a89b1e2 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -36,6 +36,7 @@ enum sbi_ext_id {
SBI_EXT_STA = 0x535441,
SBI_EXT_NACL = 0x4E41434C,
SBI_EXT_FWFT = 0x46574654,
+ SBI_EXT_MPXY = 0x4D505859,
/* Experimentals extensions must lie within this range */
SBI_EXT_EXPERIMENTAL_START = 0x08000000,
@@ -136,6 +137,7 @@ enum sbi_ext_pmu_fid {
SBI_EXT_PMU_COUNTER_FW_READ,
SBI_EXT_PMU_COUNTER_FW_READ_HI,
SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+ SBI_EXT_PMU_EVENT_GET_INFO,
};
union sbi_pmu_ctr_info {
@@ -159,9 +161,20 @@ struct riscv_pmu_snapshot_data {
u64 reserved[447];
};
+struct riscv_pmu_event_info {
+ u32 event_idx;
+ u32 output;
+ u64 event_data;
+};
+
+#define RISCV_PMU_EVENT_INFO_OUTPUT_MASK 0x01
+
#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
+/* SBI v3.0 allows extended hpmeventX width value */
+#define RISCV_PMU_RAW_EVENT_V2_MASK GENMASK_ULL(55, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
+#define RISCV_PMU_RAW_EVENT_V2_IDX 0x30000
#define RISCV_PLAT_FW_EVENT 0xFFFF
/** General pmu event codes specified in SBI PMU extension */
@@ -219,6 +232,7 @@ enum sbi_pmu_event_type {
SBI_PMU_EVENT_TYPE_HW = 0x0,
SBI_PMU_EVENT_TYPE_CACHE = 0x1,
SBI_PMU_EVENT_TYPE_RAW = 0x2,
+ SBI_PMU_EVENT_TYPE_RAW_V2 = 0x3,
SBI_PMU_EVENT_TYPE_FW = 0xf,
};
@@ -430,6 +444,67 @@ enum sbi_fwft_feature_t {
#define SBI_FWFT_SET_FLAG_LOCK BIT(0)
+enum sbi_ext_mpxy_fid {
+ SBI_EXT_MPXY_GET_SHMEM_SIZE,
+ SBI_EXT_MPXY_SET_SHMEM,
+ SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ SBI_EXT_MPXY_READ_ATTRS,
+ SBI_EXT_MPXY_WRITE_ATTRS,
+ SBI_EXT_MPXY_SEND_MSG_WITH_RESP,
+ SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP,
+ SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS,
+};
+
+enum sbi_mpxy_attribute_id {
+ /* Standard channel attributes managed by MPXY framework */
+ SBI_MPXY_ATTR_MSG_PROT_ID = 0x00000000,
+ SBI_MPXY_ATTR_MSG_PROT_VER = 0x00000001,
+ SBI_MPXY_ATTR_MSG_MAX_LEN = 0x00000002,
+ SBI_MPXY_ATTR_MSG_SEND_TIMEOUT = 0x00000003,
+ SBI_MPXY_ATTR_MSG_COMPLETION_TIMEOUT = 0x00000004,
+ SBI_MPXY_ATTR_CHANNEL_CAPABILITY = 0x00000005,
+ SBI_MPXY_ATTR_SSE_EVENT_ID = 0x00000006,
+ SBI_MPXY_ATTR_MSI_CONTROL = 0x00000007,
+ SBI_MPXY_ATTR_MSI_ADDR_LO = 0x00000008,
+ SBI_MPXY_ATTR_MSI_ADDR_HI = 0x00000009,
+ SBI_MPXY_ATTR_MSI_DATA = 0x0000000A,
+ SBI_MPXY_ATTR_EVENTS_STATE_CONTROL = 0x0000000B,
+ SBI_MPXY_ATTR_STD_ATTR_MAX_IDX,
+ /*
+ * Message protocol specific attributes, managed by
+ * the message protocol specification.
+ */
+ SBI_MPXY_ATTR_MSGPROTO_ATTR_START = 0x80000000,
+ SBI_MPXY_ATTR_MSGPROTO_ATTR_END = 0xffffffff
+};
+
+/* Possible values of MSG_PROT_ID attribute as-per SBI v3.0 (or higher) */
+enum sbi_mpxy_msgproto_id {
+ SBI_MPXY_MSGPROTO_RPMI_ID = 0x0,
+};
+
+/* RPMI message protocol specific MPXY attributes */
+enum sbi_mpxy_rpmi_attribute_id {
+ SBI_MPXY_RPMI_ATTR_SERVICEGROUP_ID = SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
+ SBI_MPXY_RPMI_ATTR_SERVICEGROUP_VERSION,
+ SBI_MPXY_RPMI_ATTR_IMPL_ID,
+ SBI_MPXY_RPMI_ATTR_IMPL_VERSION,
+ SBI_MPXY_RPMI_ATTR_MAX_ID
+};
+
+/* Encoding of MSG_PROT_VER attribute */
+#define SBI_MPXY_MSG_PROT_VER_MAJOR(__ver) upper_16_bits(__ver)
+#define SBI_MPXY_MSG_PROT_VER_MINOR(__ver) lower_16_bits(__ver)
+#define SBI_MPXY_MSG_PROT_MKVER(__maj, __min) (((u32)(__maj) << 16) | (u16)(__min))
+
+/* Capabilities available through CHANNEL_CAPABILITY attribute */
+#define SBI_MPXY_CHAN_CAP_MSI BIT(0)
+#define SBI_MPXY_CHAN_CAP_SSE BIT(1)
+#define SBI_MPXY_CHAN_CAP_EVENTS_STATE BIT(2)
+#define SBI_MPXY_CHAN_CAP_SEND_WITH_RESP BIT(3)
+#define SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP BIT(4)
+#define SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS BIT(5)
+
/* SBI spec version fields */
#define SBI_SPEC_VERSION_DEFAULT 0x1
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 251099d860aa..759a4852c09a 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -56,6 +56,7 @@ struct kvm_riscv_config {
unsigned long mimpid;
unsigned long zicboz_block_size;
unsigned long satp_mode;
+ unsigned long zicbop_block_size;
};
/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
@@ -185,6 +186,10 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZICCRSE,
KVM_RISCV_ISA_EXT_ZAAMO,
KVM_RISCV_ISA_EXT_ZALRSC,
+ KVM_RISCV_ISA_EXT_ZICBOP,
+ KVM_RISCV_ISA_EXT_ZFBFMIN,
+ KVM_RISCV_ISA_EXT_ZVFBFMIN,
+ KVM_RISCV_ISA_EXT_ZVFBFWMA,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -205,6 +210,7 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
KVM_RISCV_SBI_EXT_SUSP,
+ KVM_RISCV_SBI_EXT_FWFT,
KVM_RISCV_SBI_EXT_MAX,
};
@@ -214,6 +220,18 @@ struct kvm_riscv_sbi_sta {
unsigned long shmem_hi;
};
+struct kvm_riscv_sbi_fwft_feature {
+ unsigned long enable;
+ unsigned long flags;
+ unsigned long value;
+};
+
+/* SBI FWFT extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_sbi_fwft {
+ struct kvm_riscv_sbi_fwft_feature misaligned_deleg;
+ struct kvm_riscv_sbi_fwft_feature pointer_masking;
+};
+
/* Possible states for kvm_riscv_timer */
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
@@ -297,6 +315,9 @@ struct kvm_riscv_sbi_sta {
#define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_SBI_STA_REG(name) \
(offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_SBI_FWFT (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_FWFT_REG(name) \
+ (offsetof(struct kvm_riscv_sbi_fwft, name) / sizeof(unsigned long))
/* Device Control API: RISC-V AIA */
#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 4b199dc3e58b..07197395750e 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -27,6 +27,7 @@ kvm-y += vcpu_onereg.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o
kvm-y += vcpu_sbi.o
kvm-y += vcpu_sbi_base.o
+kvm-y += vcpu_sbi_fwft.o
kvm-y += vcpu_sbi_hsm.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_sbi_pmu.o
kvm-y += vcpu_sbi_replace.o
diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
index 24c270d6d0e2..b67d60d722c2 100644
--- a/arch/riscv/kvm/gstage.c
+++ b/arch/riscv/kvm/gstage.c
@@ -321,7 +321,7 @@ void __init kvm_riscv_gstage_mode_detect(void)
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) {
kvm_riscv_gstage_mode = HGATP_MODE_SV57X4;
kvm_riscv_gstage_pgd_levels = 5;
- goto skip_sv48x4_test;
+ goto done;
}
/* Try Sv48x4 G-stage mode */
@@ -329,10 +329,31 @@ void __init kvm_riscv_gstage_mode_detect(void)
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
kvm_riscv_gstage_mode = HGATP_MODE_SV48X4;
kvm_riscv_gstage_pgd_levels = 4;
+ goto done;
}
-skip_sv48x4_test:
+ /* Try Sv39x4 G-stage mode */
+ csr_write(CSR_HGATP, HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
+ if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV39X4) {
+ kvm_riscv_gstage_mode = HGATP_MODE_SV39X4;
+ kvm_riscv_gstage_pgd_levels = 3;
+ goto done;
+ }
+#else /* CONFIG_32BIT */
+ /* Try Sv32x4 G-stage mode */
+ csr_write(CSR_HGATP, HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
+ if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV32X4) {
+ kvm_riscv_gstage_mode = HGATP_MODE_SV32X4;
+ kvm_riscv_gstage_pgd_levels = 2;
+ goto done;
+ }
+#endif
+
+ /* KVM depends on !HGATP_MODE_OFF */
+ kvm_riscv_gstage_mode = HGATP_MODE_OFF;
+ kvm_riscv_gstage_pgd_levels = 0;
+
+done:
csr_write(CSR_HGATP, 0);
kvm_riscv_local_hfence_gvma_all();
-#endif
}
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 67c876de74ef..77dc1655b442 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -93,6 +93,23 @@ static int __init riscv_kvm_init(void)
return rc;
kvm_riscv_gstage_mode_detect();
+ switch (kvm_riscv_gstage_mode) {
+ case HGATP_MODE_SV32X4:
+ str = "Sv32x4";
+ break;
+ case HGATP_MODE_SV39X4:
+ str = "Sv39x4";
+ break;
+ case HGATP_MODE_SV48X4:
+ str = "Sv48x4";
+ break;
+ case HGATP_MODE_SV57X4:
+ str = "Sv57x4";
+ break;
+ default:
+ kvm_riscv_nacl_exit();
+ return -ENODEV;
+ }
kvm_riscv_gstage_vmid_detect();
@@ -135,22 +152,6 @@ static int __init riscv_kvm_init(void)
(rc) ? slist : "no features");
}
- switch (kvm_riscv_gstage_mode) {
- case HGATP_MODE_SV32X4:
- str = "Sv32x4";
- break;
- case HGATP_MODE_SV39X4:
- str = "Sv39x4";
- break;
- case HGATP_MODE_SV48X4:
- str = "Sv48x4";
- break;
- case HGATP_MODE_SV57X4:
- str = "Sv57x4";
- break;
- default:
- return -ENODEV;
- }
kvm_info("using %s G-stage page table format\n", str);
kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 3ebcfffaa978..47bcf190ccc5 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -133,6 +133,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
+
+ vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
@@ -570,7 +572,6 @@ static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
}
- cfg->hedeleg = KVM_HEDELEG_DEFAULT;
if (vcpu->guest_debug)
cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
}
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index cce6a38ea54f..865dae903aa0 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -65,9 +65,11 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZCF),
KVM_ISA_EXT_ARR(ZCMOP),
KVM_ISA_EXT_ARR(ZFA),
+ KVM_ISA_EXT_ARR(ZFBFMIN),
KVM_ISA_EXT_ARR(ZFH),
KVM_ISA_EXT_ARR(ZFHMIN),
KVM_ISA_EXT_ARR(ZICBOM),
+ KVM_ISA_EXT_ARR(ZICBOP),
KVM_ISA_EXT_ARR(ZICBOZ),
KVM_ISA_EXT_ARR(ZICCRSE),
KVM_ISA_EXT_ARR(ZICNTR),
@@ -88,6 +90,8 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(ZTSO),
KVM_ISA_EXT_ARR(ZVBB),
KVM_ISA_EXT_ARR(ZVBC),
+ KVM_ISA_EXT_ARR(ZVFBFMIN),
+ KVM_ISA_EXT_ARR(ZVFBFWMA),
KVM_ISA_EXT_ARR(ZVFH),
KVM_ISA_EXT_ARR(ZVFHMIN),
KVM_ISA_EXT_ARR(ZVKB),
@@ -173,7 +177,6 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
- case KVM_RISCV_ISA_EXT_SMNPM:
/* There is not architectural config bit to disable sscofpmf completely */
case KVM_RISCV_ISA_EXT_SSCOFPMF:
case KVM_RISCV_ISA_EXT_SSNPM:
@@ -199,8 +202,10 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZCF:
case KVM_RISCV_ISA_EXT_ZCMOP:
case KVM_RISCV_ISA_EXT_ZFA:
+ case KVM_RISCV_ISA_EXT_ZFBFMIN:
case KVM_RISCV_ISA_EXT_ZFH:
case KVM_RISCV_ISA_EXT_ZFHMIN:
+ case KVM_RISCV_ISA_EXT_ZICBOP:
case KVM_RISCV_ISA_EXT_ZICCRSE:
case KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_RISCV_ISA_EXT_ZICOND:
@@ -220,6 +225,8 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_ZTSO:
case KVM_RISCV_ISA_EXT_ZVBB:
case KVM_RISCV_ISA_EXT_ZVBC:
+ case KVM_RISCV_ISA_EXT_ZVFBFMIN:
+ case KVM_RISCV_ISA_EXT_ZVFBFWMA:
case KVM_RISCV_ISA_EXT_ZVFH:
case KVM_RISCV_ISA_EXT_ZVFHMIN:
case KVM_RISCV_ISA_EXT_ZVKB:
@@ -277,15 +284,20 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
- if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ if (!riscv_isa_extension_available(NULL, ZICBOM))
return -ENOENT;
reg_val = riscv_cbom_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
- if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ if (!riscv_isa_extension_available(NULL, ZICBOZ))
return -ENOENT;
reg_val = riscv_cboz_block_size;
break;
+ case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
+ if (!riscv_isa_extension_available(NULL, ZICBOP))
+ return -ENOENT;
+ reg_val = riscv_cbop_block_size;
+ break;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
reg_val = vcpu->arch.mvendorid;
break;
@@ -366,17 +378,23 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
}
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
- if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ if (!riscv_isa_extension_available(NULL, ZICBOM))
return -ENOENT;
if (reg_val != riscv_cbom_block_size)
return -EINVAL;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
- if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ if (!riscv_isa_extension_available(NULL, ZICBOZ))
return -ENOENT;
if (reg_val != riscv_cboz_block_size)
return -EINVAL;
break;
+ case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size):
+ if (!riscv_isa_extension_available(NULL, ZICBOP))
+ return -ENOENT;
+ if (reg_val != riscv_cbop_block_size)
+ return -EINVAL;
+ break;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
if (reg_val == vcpu->arch.mvendorid)
break;
@@ -817,10 +835,13 @@ static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
* was not available.
*/
if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
- !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ !riscv_isa_extension_available(NULL, ZICBOM))
continue;
else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
- !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ !riscv_isa_extension_available(NULL, ZICBOZ))
+ continue;
+ else if (i == KVM_REG_RISCV_CONFIG_REG(zicbop_block_size) &&
+ !riscv_isa_extension_available(NULL, ZICBOP))
continue;
size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
@@ -1061,66 +1082,14 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
return copy_isa_ext_reg_indices(vcpu, NULL);
}
-static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- unsigned int n = 0;
-
- for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
- u64 size = IS_ENABLED(CONFIG_32BIT) ?
- KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
- u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
- KVM_REG_RISCV_SBI_SINGLE | i;
-
- if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
- continue;
-
- if (uindices) {
- if (put_user(reg, uindices))
- return -EFAULT;
- uindices++;
- }
-
- n++;
- }
-
- return n;
-}
-
static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
{
- return copy_sbi_ext_reg_indices(vcpu, NULL);
-}
-
-static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
- struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
- int total = 0;
-
- if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
- u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
- int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
-
- for (int i = 0; i < n; i++) {
- u64 reg = KVM_REG_RISCV | size |
- KVM_REG_RISCV_SBI_STATE |
- KVM_REG_RISCV_SBI_STA | i;
-
- if (uindices) {
- if (put_user(reg, uindices))
- return -EFAULT;
- uindices++;
- }
- }
-
- total += n;
- }
-
- return total;
+ return kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, NULL);
}
static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
{
- return copy_sbi_reg_indices(vcpu, NULL);
+ return kvm_riscv_vcpu_reg_indices_sbi(vcpu, NULL);
}
static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
@@ -1243,12 +1212,12 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
return ret;
uindices += ret;
- ret = copy_sbi_ext_reg_indices(vcpu, uindices);
+ ret = kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
- ret = copy_sbi_reg_indices(vcpu, uindices);
+ ret = kvm_riscv_vcpu_reg_indices_sbi(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 78ac3216a54d..a2fae70ee174 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -60,6 +60,7 @@ static u32 kvm_pmu_get_perf_event_type(unsigned long eidx)
type = PERF_TYPE_HW_CACHE;
break;
case SBI_PMU_EVENT_TYPE_RAW:
+ case SBI_PMU_EVENT_TYPE_RAW_V2:
case SBI_PMU_EVENT_TYPE_FW:
type = PERF_TYPE_RAW;
break;
@@ -128,6 +129,9 @@ static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data)
case SBI_PMU_EVENT_TYPE_RAW:
config = evt_data & RISCV_PMU_RAW_EVENT_MASK;
break;
+ case SBI_PMU_EVENT_TYPE_RAW_V2:
+ config = evt_data & RISCV_PMU_RAW_EVENT_V2_MASK;
+ break;
case SBI_PMU_EVENT_TYPE_FW:
if (ecode < SBI_PMU_FW_MAX)
config = (1ULL << 63) | ecode;
@@ -405,8 +409,6 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
int sbiret = 0;
gpa_t saddr;
- unsigned long hva;
- bool writable;
if (!kvpmu || flags) {
sbiret = SBI_ERR_INVALID_PARAM;
@@ -428,19 +430,14 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
goto out;
}
- hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable);
- if (kvm_is_error_hva(hva) || !writable) {
- sbiret = SBI_ERR_INVALID_ADDRESS;
- goto out;
- }
-
kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC);
if (!kvpmu->sdata)
return -ENOMEM;
+ /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) {
kfree(kvpmu->sdata);
- sbiret = SBI_ERR_FAILURE;
+ sbiret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
@@ -452,6 +449,65 @@ out:
return 0;
}
+int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low,
+ unsigned long saddr_high, unsigned long num_events,
+ unsigned long flags, struct kvm_vcpu_sbi_return *retdata)
+{
+ struct riscv_pmu_event_info *einfo = NULL;
+ int shmem_size = num_events * sizeof(*einfo);
+ gpa_t shmem;
+ u32 eidx, etype;
+ u64 econfig;
+ int ret;
+
+ if (flags != 0 || (saddr_low & (SZ_16 - 1) || num_events == 0)) {
+ ret = SBI_ERR_INVALID_PARAM;
+ goto out;
+ }
+
+ shmem = saddr_low;
+ if (saddr_high != 0) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ shmem |= ((gpa_t)saddr_high << 32);
+ } else {
+ ret = SBI_ERR_INVALID_ADDRESS;
+ goto out;
+ }
+ }
+
+ einfo = kzalloc(shmem_size, GFP_KERNEL);
+ if (!einfo)
+ return -ENOMEM;
+
+ ret = kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size);
+ if (ret) {
+ ret = SBI_ERR_FAILURE;
+ goto free_mem;
+ }
+
+ for (int i = 0; i < num_events; i++) {
+ eidx = einfo[i].event_idx;
+ etype = kvm_pmu_get_perf_event_type(eidx);
+ econfig = kvm_pmu_get_perf_event_config(eidx, einfo[i].event_data);
+ ret = riscv_pmu_get_event_info(etype, econfig, NULL);
+ einfo[i].output = (ret > 0) ? 1 : 0;
+ }
+
+ ret = kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
+ if (ret) {
+ ret = SBI_ERR_INVALID_ADDRESS;
+ goto free_mem;
+ }
+
+ ret = 0;
+free_mem:
+ kfree(einfo);
+out:
+ retdata->err_val = ret;
+
+ return 0;
+}
+
int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu,
struct kvm_vcpu_sbi_return *retdata)
{
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index a56c4959f9ad..1b13623380e1 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -79,6 +79,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
.ext_ptr = &vcpu_sbi_ext_sta,
},
{
+ .ext_idx = KVM_RISCV_SBI_EXT_FWFT,
+ .ext_ptr = &vcpu_sbi_ext_fwft,
+ },
+ {
.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
.ext_ptr = &vcpu_sbi_ext_experimental,
},
@@ -106,7 +110,7 @@ riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
return sext;
}
-bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
+static bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
{
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
const struct kvm_riscv_sbi_extension_entry *sext;
@@ -284,6 +288,31 @@ static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
return 0;
}
+int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ unsigned int n = 0;
+
+ for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_SINGLE | i;
+
+ if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
+ continue;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -360,64 +389,163 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
return 0;
}
-int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
+int kvm_riscv_vcpu_reg_indices_sbi(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+ const struct kvm_riscv_sbi_extension_entry *entry;
+ const struct kvm_vcpu_sbi_extension *ext;
+ unsigned long state_reg_count;
+ int i, j, rc, count = 0;
+ u64 reg;
+
+ for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+ entry = &sbi_ext[i];
+ ext = entry->ext_ptr;
+
+ if (!ext->get_state_reg_count ||
+ scontext->ext_status[entry->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED)
+ continue;
+
+ state_reg_count = ext->get_state_reg_count(vcpu);
+ if (!uindices)
+ goto skip_put_user;
+
+ for (j = 0; j < state_reg_count; j++) {
+ if (ext->get_state_reg_id) {
+ rc = ext->get_state_reg_id(vcpu, j, &reg);
+ if (rc)
+ return rc;
+ } else {
+ reg = KVM_REG_RISCV |
+ (IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) |
+ KVM_REG_RISCV_SBI_STATE |
+ ext->state_reg_subtype | j;
+ }
+
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+skip_put_user:
+ count += state_reg_count;
+ }
+
+ return count;
+}
+
+static const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext_withstate(struct kvm_vcpu *vcpu,
+ unsigned long subtype)
+{
+ struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+ const struct kvm_riscv_sbi_extension_entry *entry;
+ const struct kvm_vcpu_sbi_extension *ext;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+ entry = &sbi_ext[i];
+ ext = entry->ext_ptr;
+
+ if (ext->get_state_reg_count &&
+ ext->state_reg_subtype == subtype &&
+ scontext->ext_status[entry->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_ENABLED)
+ return ext;
+ }
+
+ return NULL;
+}
+
+int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_SBI_STATE);
- unsigned long reg_subtype, reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ const struct kvm_vcpu_sbi_extension *ext;
+ unsigned long reg_subtype;
+ void *reg_val;
+ u64 data64;
+ u32 data32;
+ u16 data16;
+ u8 data8;
+
+ switch (KVM_REG_SIZE(reg->id)) {
+ case 1:
+ reg_val = &data8;
+ break;
+ case 2:
+ reg_val = &data16;
+ break;
+ case 4:
+ reg_val = &data32;
+ break;
+ case 8:
+ reg_val = &data64;
+ break;
+ default:
return -EINVAL;
+ }
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+ if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
- switch (reg_subtype) {
- case KVM_REG_RISCV_SBI_STA:
- return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
- default:
+ ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
+ if (!ext || !ext->set_state_reg)
return -EINVAL;
- }
- return 0;
+ return ext->set_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
}
-int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
+int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_SBI_STATE);
- unsigned long reg_subtype, reg_val;
+ const struct kvm_vcpu_sbi_extension *ext;
+ unsigned long reg_subtype;
+ void *reg_val;
+ u64 data64;
+ u32 data32;
+ u16 data16;
+ u8 data8;
int ret;
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ switch (KVM_REG_SIZE(reg->id)) {
+ case 1:
+ reg_val = &data8;
+ break;
+ case 2:
+ reg_val = &data16;
+ break;
+ case 4:
+ reg_val = &data32;
+ break;
+ case 8:
+ reg_val = &data64;
+ break;
+ default:
return -EINVAL;
+ }
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
- switch (reg_subtype) {
- case KVM_REG_RISCV_SBI_STA:
- ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
- break;
- default:
+ ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype);
+ if (!ext || !ext->get_state_reg)
return -EINVAL;
- }
+ ret = ext->get_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val);
if (ret)
return ret;
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+ if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
diff --git a/arch/riscv/kvm/vcpu_sbi_fwft.c b/arch/riscv/kvm/vcpu_sbi_fwft.c
new file mode 100644
index 000000000000..62cc9c3d5759
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_fwft.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Rivos Inc.
+ *
+ * Authors:
+ * Clément Léger <cleger@rivosinc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/cpufeature.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_sbi.h>
+#include <asm/kvm_vcpu_sbi_fwft.h>
+
+#define MIS_DELEG (BIT_ULL(EXC_LOAD_MISALIGNED) | BIT_ULL(EXC_STORE_MISALIGNED))
+
+struct kvm_sbi_fwft_feature {
+ /**
+ * @id: Feature ID
+ */
+ enum sbi_fwft_feature_t id;
+
+ /**
+ * @first_reg_num: ONE_REG index of the first ONE_REG register
+ */
+ unsigned long first_reg_num;
+
+ /**
+ * @supported: Check if the feature is supported on the vcpu
+ *
+ * This callback is optional, if not provided the feature is assumed to
+ * be supported
+ */
+ bool (*supported)(struct kvm_vcpu *vcpu);
+
+ /**
+ * @reset: Reset the feature value irrespective whether feature is supported or not
+ *
+ * This callback is mandatory
+ */
+ void (*reset)(struct kvm_vcpu *vcpu);
+
+ /**
+ * @set: Set the feature value
+ *
+ * Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*)
+ *
+ * This callback is mandatory
+ */
+ long (*set)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long value);
+
+ /**
+ * @get: Get the feature current value
+ *
+ * Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*)
+ *
+ * This callback is mandatory
+ */
+ long (*get)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long *value);
+};
+
+static const enum sbi_fwft_feature_t kvm_fwft_defined_features[] = {
+ SBI_FWFT_MISALIGNED_EXC_DELEG,
+ SBI_FWFT_LANDING_PAD,
+ SBI_FWFT_SHADOW_STACK,
+ SBI_FWFT_DOUBLE_TRAP,
+ SBI_FWFT_PTE_AD_HW_UPDATING,
+ SBI_FWFT_POINTER_MASKING_PMLEN,
+};
+
+static bool kvm_fwft_is_defined_feature(enum sbi_fwft_feature_t feature)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_fwft_defined_features); i++) {
+ if (kvm_fwft_defined_features[i] == feature)
+ return true;
+ }
+
+ return false;
+}
+
+static bool kvm_sbi_fwft_misaligned_delegation_supported(struct kvm_vcpu *vcpu)
+{
+ return misaligned_traps_can_delegate();
+}
+
+static void kvm_sbi_fwft_reset_misaligned_delegation(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+ cfg->hedeleg &= ~MIS_DELEG;
+}
+
+static long kvm_sbi_fwft_set_misaligned_delegation(struct kvm_vcpu *vcpu,
+ struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long value)
+{
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+ if (value == 1) {
+ cfg->hedeleg |= MIS_DELEG;
+ if (!one_reg_access)
+ csr_set(CSR_HEDELEG, MIS_DELEG);
+ } else if (value == 0) {
+ cfg->hedeleg &= ~MIS_DELEG;
+ if (!one_reg_access)
+ csr_clear(CSR_HEDELEG, MIS_DELEG);
+ } else {
+ return SBI_ERR_INVALID_PARAM;
+ }
+
+ return SBI_SUCCESS;
+}
+
+static long kvm_sbi_fwft_get_misaligned_delegation(struct kvm_vcpu *vcpu,
+ struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long *value)
+{
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+ *value = (cfg->hedeleg & MIS_DELEG) == MIS_DELEG;
+ return SBI_SUCCESS;
+}
+
+#ifndef CONFIG_32BIT
+
+static bool try_to_set_pmm(unsigned long value)
+{
+ csr_set(CSR_HENVCFG, value);
+ return (csr_read_clear(CSR_HENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
+}
+
+static bool kvm_sbi_fwft_pointer_masking_pmlen_supported(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
+
+ if (!riscv_isa_extension_available(vcpu->arch.isa, SMNPM))
+ return false;
+
+ fwft->have_vs_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
+ fwft->have_vs_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
+
+ return fwft->have_vs_pmlen_7 || fwft->have_vs_pmlen_16;
+}
+
+static void kvm_sbi_fwft_reset_pointer_masking_pmlen(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.cfg.henvcfg &= ~ENVCFG_PMM;
+}
+
+static long kvm_sbi_fwft_set_pointer_masking_pmlen(struct kvm_vcpu *vcpu,
+ struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long value)
+{
+ struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
+ unsigned long pmm;
+
+ switch (value) {
+ case 0:
+ pmm = ENVCFG_PMM_PMLEN_0;
+ break;
+ case 7:
+ if (!fwft->have_vs_pmlen_7)
+ return SBI_ERR_INVALID_PARAM;
+ pmm = ENVCFG_PMM_PMLEN_7;
+ break;
+ case 16:
+ if (!fwft->have_vs_pmlen_16)
+ return SBI_ERR_INVALID_PARAM;
+ pmm = ENVCFG_PMM_PMLEN_16;
+ break;
+ default:
+ return SBI_ERR_INVALID_PARAM;
+ }
+
+ vcpu->arch.cfg.henvcfg &= ~ENVCFG_PMM;
+ vcpu->arch.cfg.henvcfg |= pmm;
+
+ /*
+ * Instead of waiting for vcpu_load/put() to update HENVCFG CSR,
+ * update here so that VCPU see's pointer masking mode change
+ * immediately.
+ */
+ if (!one_reg_access)
+ csr_write(CSR_HENVCFG, vcpu->arch.cfg.henvcfg);
+
+ return SBI_SUCCESS;
+}
+
+static long kvm_sbi_fwft_get_pointer_masking_pmlen(struct kvm_vcpu *vcpu,
+ struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long *value)
+{
+ switch (vcpu->arch.cfg.henvcfg & ENVCFG_PMM) {
+ case ENVCFG_PMM_PMLEN_0:
+ *value = 0;
+ break;
+ case ENVCFG_PMM_PMLEN_7:
+ *value = 7;
+ break;
+ case ENVCFG_PMM_PMLEN_16:
+ *value = 16;
+ break;
+ default:
+ return SBI_ERR_FAILURE;
+ }
+
+ return SBI_SUCCESS;
+}
+
+#endif
+
+static const struct kvm_sbi_fwft_feature features[] = {
+ {
+ .id = SBI_FWFT_MISALIGNED_EXC_DELEG,
+ .first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, misaligned_deleg.enable) /
+ sizeof(unsigned long),
+ .supported = kvm_sbi_fwft_misaligned_delegation_supported,
+ .reset = kvm_sbi_fwft_reset_misaligned_delegation,
+ .set = kvm_sbi_fwft_set_misaligned_delegation,
+ .get = kvm_sbi_fwft_get_misaligned_delegation,
+ },
+#ifndef CONFIG_32BIT
+ {
+ .id = SBI_FWFT_POINTER_MASKING_PMLEN,
+ .first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, pointer_masking.enable) /
+ sizeof(unsigned long),
+ .supported = kvm_sbi_fwft_pointer_masking_pmlen_supported,
+ .reset = kvm_sbi_fwft_reset_pointer_masking_pmlen,
+ .set = kvm_sbi_fwft_set_pointer_masking_pmlen,
+ .get = kvm_sbi_fwft_get_pointer_masking_pmlen,
+ },
+#endif
+};
+
+static const struct kvm_sbi_fwft_feature *kvm_sbi_fwft_regnum_to_feature(unsigned long reg_num)
+{
+ const struct kvm_sbi_fwft_feature *feature;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(features); i++) {
+ feature = &features[i];
+ if (feature->first_reg_num <= reg_num && reg_num < (feature->first_reg_num + 3))
+ return feature;
+ }
+
+ return NULL;
+}
+
+static struct kvm_sbi_fwft_config *
+kvm_sbi_fwft_get_config(struct kvm_vcpu *vcpu, enum sbi_fwft_feature_t feature)
+{
+ int i;
+ struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
+
+ for (i = 0; i < ARRAY_SIZE(features); i++) {
+ if (fwft->configs[i].feature->id == feature)
+ return &fwft->configs[i];
+ }
+
+ return NULL;
+}
+
+static int kvm_fwft_get_feature(struct kvm_vcpu *vcpu, u32 feature,
+ struct kvm_sbi_fwft_config **conf)
+{
+ struct kvm_sbi_fwft_config *tconf;
+
+ tconf = kvm_sbi_fwft_get_config(vcpu, feature);
+ if (!tconf) {
+ if (kvm_fwft_is_defined_feature(feature))
+ return SBI_ERR_NOT_SUPPORTED;
+
+ return SBI_ERR_DENIED;
+ }
+
+ if (!tconf->supported || !tconf->enabled)
+ return SBI_ERR_NOT_SUPPORTED;
+
+ *conf = tconf;
+
+ return SBI_SUCCESS;
+}
+
+static int kvm_sbi_fwft_set(struct kvm_vcpu *vcpu, u32 feature,
+ unsigned long value, unsigned long flags)
+{
+ int ret;
+ struct kvm_sbi_fwft_config *conf;
+
+ ret = kvm_fwft_get_feature(vcpu, feature, &conf);
+ if (ret)
+ return ret;
+
+ if ((flags & ~SBI_FWFT_SET_FLAG_LOCK) != 0)
+ return SBI_ERR_INVALID_PARAM;
+
+ if (conf->flags & SBI_FWFT_SET_FLAG_LOCK)
+ return SBI_ERR_DENIED_LOCKED;
+
+ conf->flags = flags;
+
+ return conf->feature->set(vcpu, conf, false, value);
+}
+
+static int kvm_sbi_fwft_get(struct kvm_vcpu *vcpu, unsigned long feature,
+ unsigned long *value)
+{
+ int ret;
+ struct kvm_sbi_fwft_config *conf;
+
+ ret = kvm_fwft_get_feature(vcpu, feature, &conf);
+ if (ret)
+ return ret;
+
+ return conf->feature->get(vcpu, conf, false, value);
+}
+
+static int kvm_sbi_ext_fwft_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata)
+{
+ int ret;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long funcid = cp->a6;
+
+ switch (funcid) {
+ case SBI_EXT_FWFT_SET:
+ ret = kvm_sbi_fwft_set(vcpu, cp->a0, cp->a1, cp->a2);
+ break;
+ case SBI_EXT_FWFT_GET:
+ ret = kvm_sbi_fwft_get(vcpu, cp->a0, &retdata->out_val);
+ break;
+ default:
+ ret = SBI_ERR_NOT_SUPPORTED;
+ break;
+ }
+
+ retdata->err_val = ret;
+
+ return 0;
+}
+
+static int kvm_sbi_ext_fwft_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ int i;
+
+ fwft->configs = kcalloc(ARRAY_SIZE(features), sizeof(struct kvm_sbi_fwft_config),
+ GFP_KERNEL);
+ if (!fwft->configs)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(features); i++) {
+ feature = &features[i];
+ conf = &fwft->configs[i];
+ if (feature->supported)
+ conf->supported = feature->supported(vcpu);
+ else
+ conf->supported = true;
+
+ conf->enabled = conf->supported;
+ conf->feature = feature;
+ }
+
+ return 0;
+}
+
+static void kvm_sbi_ext_fwft_deinit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
+
+ kfree(fwft->configs);
+}
+
+static void kvm_sbi_ext_fwft_reset(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
+ const struct kvm_sbi_fwft_feature *feature;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(features); i++) {
+ fwft->configs[i].flags = 0;
+ feature = &features[i];
+ if (feature->reset)
+ feature->reset(vcpu);
+ }
+}
+
+static unsigned long kvm_sbi_ext_fwft_get_reg_count(struct kvm_vcpu *vcpu)
+{
+ unsigned long max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long);
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ unsigned long reg, ret = 0;
+
+ for (reg = 0; reg < max_reg_count; reg++) {
+ feature = kvm_sbi_fwft_regnum_to_feature(reg);
+ if (!feature)
+ continue;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ continue;
+
+ ret++;
+ }
+
+ return ret;
+}
+
+static int kvm_sbi_ext_fwft_get_reg_id(struct kvm_vcpu *vcpu, int index, u64 *reg_id)
+{
+ int reg, max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long);
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ int idx = 0;
+
+ for (reg = 0; reg < max_reg_count; reg++) {
+ feature = kvm_sbi_fwft_regnum_to_feature(reg);
+ if (!feature)
+ continue;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ continue;
+
+ if (index == idx) {
+ *reg_id = KVM_REG_RISCV |
+ (IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) |
+ KVM_REG_RISCV_SBI_STATE |
+ KVM_REG_RISCV_SBI_FWFT | reg;
+ return 0;
+ }
+
+ idx++;
+ }
+
+ return -ENOENT;
+}
+
+static int kvm_sbi_ext_fwft_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, void *reg_val)
+{
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ unsigned long *value;
+ int ret = 0;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = reg_val;
+
+ feature = kvm_sbi_fwft_regnum_to_feature(reg_num);
+ if (!feature)
+ return -ENOENT;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ return -ENOENT;
+
+ switch (reg_num - feature->first_reg_num) {
+ case 0:
+ *value = conf->enabled;
+ break;
+ case 1:
+ *value = conf->flags;
+ break;
+ case 2:
+ ret = conf->feature->get(vcpu, conf, true, value);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return sbi_err_map_linux_errno(ret);
+}
+
+static int kvm_sbi_ext_fwft_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, const void *reg_val)
+{
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ unsigned long value;
+ int ret = 0;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = *(const unsigned long *)reg_val;
+
+ feature = kvm_sbi_fwft_regnum_to_feature(reg_num);
+ if (!feature)
+ return -ENOENT;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ return -ENOENT;
+
+ switch (reg_num - feature->first_reg_num) {
+ case 0:
+ switch (value) {
+ case 0:
+ conf->enabled = false;
+ break;
+ case 1:
+ conf->enabled = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ conf->flags = value & SBI_FWFT_SET_FLAG_LOCK;
+ break;
+ case 2:
+ ret = conf->feature->set(vcpu, conf, true, value);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return sbi_err_map_linux_errno(ret);
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_fwft = {
+ .extid_start = SBI_EXT_FWFT,
+ .extid_end = SBI_EXT_FWFT,
+ .handler = kvm_sbi_ext_fwft_handler,
+ .init = kvm_sbi_ext_fwft_init,
+ .deinit = kvm_sbi_ext_fwft_deinit,
+ .reset = kvm_sbi_ext_fwft_reset,
+ .state_reg_subtype = KVM_REG_RISCV_SBI_FWFT,
+ .get_state_reg_count = kvm_sbi_ext_fwft_get_reg_count,
+ .get_state_reg_id = kvm_sbi_ext_fwft_get_reg_id,
+ .get_state_reg = kvm_sbi_ext_fwft_get_reg,
+ .set_state_reg = kvm_sbi_ext_fwft_set_reg,
+};
diff --git a/arch/riscv/kvm/vcpu_sbi_pmu.c b/arch/riscv/kvm/vcpu_sbi_pmu.c
index e4be34e03e83..a020d979d179 100644
--- a/arch/riscv/kvm/vcpu_sbi_pmu.c
+++ b/arch/riscv/kvm/vcpu_sbi_pmu.c
@@ -73,6 +73,9 @@ static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
case SBI_EXT_PMU_SNAPSHOT_SET_SHMEM:
ret = kvm_riscv_vcpu_pmu_snapshot_set_shmem(vcpu, cp->a0, cp->a1, cp->a2, retdata);
break;
+ case SBI_EXT_PMU_EVENT_GET_INFO:
+ ret = kvm_riscv_vcpu_pmu_event_info(vcpu, cp->a0, cp->a1, cp->a2, cp->a3, retdata);
+ break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index cc6cb7c8f0e4..afa0545c3bcf 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -85,8 +85,6 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0};
- unsigned long hva;
- bool writable;
gpa_t shmem;
int ret;
@@ -111,13 +109,10 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
return SBI_ERR_INVALID_ADDRESS;
}
- hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
- if (kvm_is_error_hva(hva) || !writable)
- return SBI_ERR_INVALID_ADDRESS;
-
+ /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
if (ret)
- return SBI_ERR_FAILURE;
+ return SBI_ERR_INVALID_ADDRESS;
vcpu->arch.sta.shmem = shmem;
vcpu->arch.sta.last_steal = current->sched_info.run_delay;
@@ -151,63 +146,82 @@ static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
return !!sched_info_on();
}
-const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
- .extid_start = SBI_EXT_STA,
- .extid_end = SBI_EXT_STA,
- .handler = kvm_sbi_ext_sta_handler,
- .probe = kvm_sbi_ext_sta_probe,
- .reset = kvm_riscv_vcpu_sbi_sta_reset,
-};
+static unsigned long kvm_sbi_ext_sta_get_state_reg_count(struct kvm_vcpu *vcpu)
+{
+ return sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
+}
-int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
- unsigned long reg_num,
- unsigned long *reg_val)
+static int kvm_sbi_ext_sta_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, void *reg_val)
{
+ unsigned long *value;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = reg_val;
+
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
- *reg_val = (unsigned long)vcpu->arch.sta.shmem;
+ *value = (unsigned long)vcpu->arch.sta.shmem;
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT))
- *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
+ *value = upper_32_bits(vcpu->arch.sta.shmem);
else
- *reg_val = 0;
+ *value = 0;
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
}
-int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
- unsigned long reg_num,
- unsigned long reg_val)
+static int kvm_sbi_ext_sta_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, const void *reg_val)
{
+ unsigned long value;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = *(const unsigned long *)reg_val;
+
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
- vcpu->arch.sta.shmem = reg_val;
+ vcpu->arch.sta.shmem = value;
vcpu->arch.sta.shmem |= hi << 32;
} else {
- vcpu->arch.sta.shmem = reg_val;
+ vcpu->arch.sta.shmem = value;
}
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
- vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
+ vcpu->arch.sta.shmem = ((gpa_t)value << 32);
vcpu->arch.sta.shmem |= lo;
- } else if (reg_val != 0) {
+ } else if (value != 0) {
return -EINVAL;
}
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
+ .extid_start = SBI_EXT_STA,
+ .extid_end = SBI_EXT_STA,
+ .handler = kvm_sbi_ext_sta_handler,
+ .probe = kvm_sbi_ext_sta_probe,
+ .reset = kvm_riscv_vcpu_sbi_sta_reset,
+ .state_reg_subtype = KVM_REG_RISCV_SBI_STA,
+ .get_state_reg_count = kvm_sbi_ext_sta_get_state_reg_count,
+ .get_state_reg = kvm_sbi_ext_sta_get_reg,
+ .set_state_reg = kvm_sbi_ext_sta_set_reg,
+};
diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c
index 3b426c800480..abb1c2bf2542 100644
--- a/arch/riscv/kvm/vmid.c
+++ b/arch/riscv/kvm/vmid.c
@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
+#include <asm/kvm_mmu.h>
#include <asm/kvm_tlb.h>
#include <asm/kvm_vmid.h>
@@ -24,15 +25,12 @@ static DEFINE_SPINLOCK(vmid_lock);
void __init kvm_riscv_gstage_vmid_detect(void)
{
- unsigned long old;
-
/* Figure-out number of VMID bits in HW */
- old = csr_read(CSR_HGATP);
- csr_write(CSR_HGATP, old | HGATP_VMID);
+ csr_write(CSR_HGATP, (kvm_riscv_gstage_mode << HGATP_MODE_SHIFT) | HGATP_VMID);
vmid_bits = csr_read(CSR_HGATP);
vmid_bits = (vmid_bits & HGATP_VMID) >> HGATP_VMID_SHIFT;
vmid_bits = fls_long(vmid_bits);
- csr_write(CSR_HGATP, old);
+ csr_write(CSR_HGATP, 0);
/* We polluted local TLB so flush all guest TLB */
kvm_riscv_local_hfence_gvma_all();