diff options
Diffstat (limited to 'arch/riscv/kvm')
-rw-r--r-- | arch/riscv/kvm/Makefile | 1 | ||||
-rw-r--r-- | arch/riscv/kvm/gstage.c | 27 | ||||
-rw-r--r-- | arch/riscv/kvm/main.c | 33 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu.c | 3 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_onereg.c | 95 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_pmu.c | 74 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi.c | 176 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi_fwft.c | 544 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi_pmu.c | 3 | ||||
-rw-r--r-- | arch/riscv/kvm/vcpu_sbi_sta.c | 72 | ||||
-rw-r--r-- | arch/riscv/kvm/vmid.c | 8 |
11 files changed, 886 insertions, 150 deletions
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 4b199dc3e58b..07197395750e 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -27,6 +27,7 @@ kvm-y += vcpu_onereg.o kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o kvm-y += vcpu_sbi.o kvm-y += vcpu_sbi_base.o +kvm-y += vcpu_sbi_fwft.o kvm-y += vcpu_sbi_hsm.o kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_sbi_pmu.o kvm-y += vcpu_sbi_replace.o diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c index 24c270d6d0e2..b67d60d722c2 100644 --- a/arch/riscv/kvm/gstage.c +++ b/arch/riscv/kvm/gstage.c @@ -321,7 +321,7 @@ void __init kvm_riscv_gstage_mode_detect(void) if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) { kvm_riscv_gstage_mode = HGATP_MODE_SV57X4; kvm_riscv_gstage_pgd_levels = 5; - goto skip_sv48x4_test; + goto done; } /* Try Sv48x4 G-stage mode */ @@ -329,10 +329,31 @@ void __init kvm_riscv_gstage_mode_detect(void) if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) { kvm_riscv_gstage_mode = HGATP_MODE_SV48X4; kvm_riscv_gstage_pgd_levels = 4; + goto done; } -skip_sv48x4_test: + /* Try Sv39x4 G-stage mode */ + csr_write(CSR_HGATP, HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT); + if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV39X4) { + kvm_riscv_gstage_mode = HGATP_MODE_SV39X4; + kvm_riscv_gstage_pgd_levels = 3; + goto done; + } +#else /* CONFIG_32BIT */ + /* Try Sv32x4 G-stage mode */ + csr_write(CSR_HGATP, HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT); + if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV32X4) { + kvm_riscv_gstage_mode = HGATP_MODE_SV32X4; + kvm_riscv_gstage_pgd_levels = 2; + goto done; + } +#endif + + /* KVM depends on !HGATP_MODE_OFF */ + kvm_riscv_gstage_mode = HGATP_MODE_OFF; + kvm_riscv_gstage_pgd_levels = 0; + +done: csr_write(CSR_HGATP, 0); kvm_riscv_local_hfence_gvma_all(); -#endif } diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 67c876de74ef..77dc1655b442 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -93,6 +93,23 @@ static int __init riscv_kvm_init(void) return rc; kvm_riscv_gstage_mode_detect(); + switch (kvm_riscv_gstage_mode) { + case HGATP_MODE_SV32X4: + str = "Sv32x4"; + break; + case HGATP_MODE_SV39X4: + str = "Sv39x4"; + break; + case HGATP_MODE_SV48X4: + str = "Sv48x4"; + break; + case HGATP_MODE_SV57X4: + str = "Sv57x4"; + break; + default: + kvm_riscv_nacl_exit(); + return -ENODEV; + } kvm_riscv_gstage_vmid_detect(); @@ -135,22 +152,6 @@ static int __init riscv_kvm_init(void) (rc) ? slist : "no features"); } - switch (kvm_riscv_gstage_mode) { - case HGATP_MODE_SV32X4: - str = "Sv32x4"; - break; - case HGATP_MODE_SV39X4: - str = "Sv39x4"; - break; - case HGATP_MODE_SV48X4: - str = "Sv48x4"; - break; - case HGATP_MODE_SV57X4: - str = "Sv57x4"; - break; - default: - return -ENODEV; - } kvm_info("using %s G-stage page table format\n", str); kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits()); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 3ebcfffaa978..47bcf190ccc5 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -133,6 +133,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Mark this VCPU never ran */ vcpu->arch.ran_atleast_once = false; + + vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT; vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); @@ -570,7 +572,6 @@ static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu) cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; } - cfg->hedeleg = KVM_HEDELEG_DEFAULT; if (vcpu->guest_debug) cfg->hedeleg &= ~BIT(EXC_BREAKPOINT); } diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index cce6a38ea54f..865dae903aa0 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -65,9 +65,11 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(ZCF), KVM_ISA_EXT_ARR(ZCMOP), KVM_ISA_EXT_ARR(ZFA), + KVM_ISA_EXT_ARR(ZFBFMIN), KVM_ISA_EXT_ARR(ZFH), KVM_ISA_EXT_ARR(ZFHMIN), KVM_ISA_EXT_ARR(ZICBOM), + KVM_ISA_EXT_ARR(ZICBOP), KVM_ISA_EXT_ARR(ZICBOZ), KVM_ISA_EXT_ARR(ZICCRSE), KVM_ISA_EXT_ARR(ZICNTR), @@ -88,6 +90,8 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(ZTSO), KVM_ISA_EXT_ARR(ZVBB), KVM_ISA_EXT_ARR(ZVBC), + KVM_ISA_EXT_ARR(ZVFBFMIN), + KVM_ISA_EXT_ARR(ZVFBFWMA), KVM_ISA_EXT_ARR(ZVFH), KVM_ISA_EXT_ARR(ZVFHMIN), KVM_ISA_EXT_ARR(ZVKB), @@ -173,7 +177,6 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_C: case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: - case KVM_RISCV_ISA_EXT_SMNPM: /* There is not architectural config bit to disable sscofpmf completely */ case KVM_RISCV_ISA_EXT_SSCOFPMF: case KVM_RISCV_ISA_EXT_SSNPM: @@ -199,8 +202,10 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_ZCF: case KVM_RISCV_ISA_EXT_ZCMOP: case KVM_RISCV_ISA_EXT_ZFA: + case KVM_RISCV_ISA_EXT_ZFBFMIN: case KVM_RISCV_ISA_EXT_ZFH: case KVM_RISCV_ISA_EXT_ZFHMIN: + case KVM_RISCV_ISA_EXT_ZICBOP: case KVM_RISCV_ISA_EXT_ZICCRSE: case KVM_RISCV_ISA_EXT_ZICNTR: case KVM_RISCV_ISA_EXT_ZICOND: @@ -220,6 +225,8 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_ZTSO: case KVM_RISCV_ISA_EXT_ZVBB: case KVM_RISCV_ISA_EXT_ZVBC: + case KVM_RISCV_ISA_EXT_ZVFBFMIN: + case KVM_RISCV_ISA_EXT_ZVFBFWMA: case KVM_RISCV_ISA_EXT_ZVFH: case KVM_RISCV_ISA_EXT_ZVFHMIN: case KVM_RISCV_ISA_EXT_ZVKB: @@ -277,15 +284,20 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + if (!riscv_isa_extension_available(NULL, ZICBOM)) return -ENOENT; reg_val = riscv_cbom_block_size; break; case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) + if (!riscv_isa_extension_available(NULL, ZICBOZ)) return -ENOENT; reg_val = riscv_cboz_block_size; break; + case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size): + if (!riscv_isa_extension_available(NULL, ZICBOP)) + return -ENOENT; + reg_val = riscv_cbop_block_size; + break; case KVM_REG_RISCV_CONFIG_REG(mvendorid): reg_val = vcpu->arch.mvendorid; break; @@ -366,17 +378,23 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, } break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + if (!riscv_isa_extension_available(NULL, ZICBOM)) return -ENOENT; if (reg_val != riscv_cbom_block_size) return -EINVAL; break; case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): - if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) + if (!riscv_isa_extension_available(NULL, ZICBOZ)) return -ENOENT; if (reg_val != riscv_cboz_block_size) return -EINVAL; break; + case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size): + if (!riscv_isa_extension_available(NULL, ZICBOP)) + return -ENOENT; + if (reg_val != riscv_cbop_block_size) + return -EINVAL; + break; case KVM_REG_RISCV_CONFIG_REG(mvendorid): if (reg_val == vcpu->arch.mvendorid) break; @@ -817,10 +835,13 @@ static int copy_config_reg_indices(const struct kvm_vcpu *vcpu, * was not available. */ if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) && - !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) + !riscv_isa_extension_available(NULL, ZICBOM)) continue; else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) && - !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) + !riscv_isa_extension_available(NULL, ZICBOZ)) + continue; + else if (i == KVM_REG_RISCV_CONFIG_REG(zicbop_block_size) && + !riscv_isa_extension_available(NULL, ZICBOP)) continue; size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; @@ -1061,66 +1082,14 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu) return copy_isa_ext_reg_indices(vcpu, NULL); } -static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int n = 0; - - for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) { - u64 size = IS_ENABLED(CONFIG_32BIT) ? - KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; - u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | - KVM_REG_RISCV_SBI_SINGLE | i; - - if (!riscv_vcpu_supports_sbi_ext(vcpu, i)) - continue; - - if (uindices) { - if (put_user(reg, uindices)) - return -EFAULT; - uindices++; - } - - n++; - } - - return n; -} - static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu) { - return copy_sbi_ext_reg_indices(vcpu, NULL); -} - -static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; - int total = 0; - - if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) { - u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; - int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long); - - for (int i = 0; i < n; i++) { - u64 reg = KVM_REG_RISCV | size | - KVM_REG_RISCV_SBI_STATE | - KVM_REG_RISCV_SBI_STA | i; - - if (uindices) { - if (put_user(reg, uindices)) - return -EFAULT; - uindices++; - } - } - - total += n; - } - - return total; + return kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, NULL); } static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu) { - return copy_sbi_reg_indices(vcpu, NULL); + return kvm_riscv_vcpu_reg_indices_sbi(vcpu, NULL); } static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu) @@ -1243,12 +1212,12 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, return ret; uindices += ret; - ret = copy_sbi_ext_reg_indices(vcpu, uindices); + ret = kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, uindices); if (ret < 0) return ret; uindices += ret; - ret = copy_sbi_reg_indices(vcpu, uindices); + ret = kvm_riscv_vcpu_reg_indices_sbi(vcpu, uindices); if (ret < 0) return ret; uindices += ret; diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 78ac3216a54d..a2fae70ee174 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -60,6 +60,7 @@ static u32 kvm_pmu_get_perf_event_type(unsigned long eidx) type = PERF_TYPE_HW_CACHE; break; case SBI_PMU_EVENT_TYPE_RAW: + case SBI_PMU_EVENT_TYPE_RAW_V2: case SBI_PMU_EVENT_TYPE_FW: type = PERF_TYPE_RAW; break; @@ -128,6 +129,9 @@ static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data) case SBI_PMU_EVENT_TYPE_RAW: config = evt_data & RISCV_PMU_RAW_EVENT_MASK; break; + case SBI_PMU_EVENT_TYPE_RAW_V2: + config = evt_data & RISCV_PMU_RAW_EVENT_V2_MASK; + break; case SBI_PMU_EVENT_TYPE_FW: if (ecode < SBI_PMU_FW_MAX) config = (1ULL << 63) | ecode; @@ -405,8 +409,6 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data); int sbiret = 0; gpa_t saddr; - unsigned long hva; - bool writable; if (!kvpmu || flags) { sbiret = SBI_ERR_INVALID_PARAM; @@ -428,19 +430,14 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s goto out; } - hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable); - if (kvm_is_error_hva(hva) || !writable) { - sbiret = SBI_ERR_INVALID_ADDRESS; - goto out; - } - kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC); if (!kvpmu->sdata) return -ENOMEM; + /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */ if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) { kfree(kvpmu->sdata); - sbiret = SBI_ERR_FAILURE; + sbiret = SBI_ERR_INVALID_ADDRESS; goto out; } @@ -452,6 +449,65 @@ out: return 0; } +int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low, + unsigned long saddr_high, unsigned long num_events, + unsigned long flags, struct kvm_vcpu_sbi_return *retdata) +{ + struct riscv_pmu_event_info *einfo = NULL; + int shmem_size = num_events * sizeof(*einfo); + gpa_t shmem; + u32 eidx, etype; + u64 econfig; + int ret; + + if (flags != 0 || (saddr_low & (SZ_16 - 1) || num_events == 0)) { + ret = SBI_ERR_INVALID_PARAM; + goto out; + } + + shmem = saddr_low; + if (saddr_high != 0) { + if (IS_ENABLED(CONFIG_32BIT)) { + shmem |= ((gpa_t)saddr_high << 32); + } else { + ret = SBI_ERR_INVALID_ADDRESS; + goto out; + } + } + + einfo = kzalloc(shmem_size, GFP_KERNEL); + if (!einfo) + return -ENOMEM; + + ret = kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size); + if (ret) { + ret = SBI_ERR_FAILURE; + goto free_mem; + } + + for (int i = 0; i < num_events; i++) { + eidx = einfo[i].event_idx; + etype = kvm_pmu_get_perf_event_type(eidx); + econfig = kvm_pmu_get_perf_event_config(eidx, einfo[i].event_data); + ret = riscv_pmu_get_event_info(etype, econfig, NULL); + einfo[i].output = (ret > 0) ? 1 : 0; + } + + ret = kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size); + if (ret) { + ret = SBI_ERR_INVALID_ADDRESS; + goto free_mem; + } + + ret = 0; +free_mem: + kfree(einfo); +out: + retdata->err_val = ret; + + return 0; +} + int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata) { diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index a56c4959f9ad..1b13623380e1 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -79,6 +79,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = { .ext_ptr = &vcpu_sbi_ext_sta, }, { + .ext_idx = KVM_RISCV_SBI_EXT_FWFT, + .ext_ptr = &vcpu_sbi_ext_fwft, + }, + { .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL, .ext_ptr = &vcpu_sbi_ext_experimental, }, @@ -106,7 +110,7 @@ riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx) return sext; } -bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx) +static bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx) { struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; const struct kvm_riscv_sbi_extension_entry *sext; @@ -284,6 +288,31 @@ static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu, return 0; } +int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + unsigned int n = 0; + + for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | + KVM_REG_RISCV_SBI_SINGLE | i; + + if (!riscv_vcpu_supports_sbi_ext(vcpu, i)) + continue; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + + n++; + } + + return n; +} + int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -360,64 +389,163 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu, return 0; } -int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) +int kvm_riscv_vcpu_reg_indices_sbi(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + const struct kvm_riscv_sbi_extension_entry *entry; + const struct kvm_vcpu_sbi_extension *ext; + unsigned long state_reg_count; + int i, j, rc, count = 0; + u64 reg; + + for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { + entry = &sbi_ext[i]; + ext = entry->ext_ptr; + + if (!ext->get_state_reg_count || + scontext->ext_status[entry->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED) + continue; + + state_reg_count = ext->get_state_reg_count(vcpu); + if (!uindices) + goto skip_put_user; + + for (j = 0; j < state_reg_count; j++) { + if (ext->get_state_reg_id) { + rc = ext->get_state_reg_id(vcpu, j, ®); + if (rc) + return rc; + } else { + reg = KVM_REG_RISCV | + (IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) | + KVM_REG_RISCV_SBI_STATE | + ext->state_reg_subtype | j; + } + + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + +skip_put_user: + count += state_reg_count; + } + + return count; +} + +static const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext_withstate(struct kvm_vcpu *vcpu, + unsigned long subtype) +{ + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + const struct kvm_riscv_sbi_extension_entry *entry; + const struct kvm_vcpu_sbi_extension *ext; + int i; + + for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { + entry = &sbi_ext[i]; + ext = entry->ext_ptr; + + if (ext->get_state_reg_count && + ext->state_reg_subtype == subtype && + scontext->ext_status[entry->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) + return ext; + } + + return NULL; +} + +int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { unsigned long __user *uaddr = (unsigned long __user *)(unsigned long)reg->addr; unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_RISCV_SBI_STATE); - unsigned long reg_subtype, reg_val; - - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + const struct kvm_vcpu_sbi_extension *ext; + unsigned long reg_subtype; + void *reg_val; + u64 data64; + u32 data32; + u16 data16; + u8 data8; + + switch (KVM_REG_SIZE(reg->id)) { + case 1: + reg_val = &data8; + break; + case 2: + reg_val = &data16; + break; + case 4: + reg_val = &data32; + break; + case 8: + reg_val = &data64; + break; + default: return -EINVAL; + } - if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; - switch (reg_subtype) { - case KVM_REG_RISCV_SBI_STA: - return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val); - default: + ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype); + if (!ext || !ext->set_state_reg) return -EINVAL; - } - return 0; + return ext->set_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val); } -int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, - const struct kvm_one_reg *reg) +int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { unsigned long __user *uaddr = (unsigned long __user *)(unsigned long)reg->addr; unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_RISCV_SBI_STATE); - unsigned long reg_subtype, reg_val; + const struct kvm_vcpu_sbi_extension *ext; + unsigned long reg_subtype; + void *reg_val; + u64 data64; + u32 data32; + u16 data16; + u8 data8; int ret; - if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + switch (KVM_REG_SIZE(reg->id)) { + case 1: + reg_val = &data8; + break; + case 2: + reg_val = &data16; + break; + case 4: + reg_val = &data32; + break; + case 8: + reg_val = &data64; + break; + default: return -EINVAL; + } reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; - switch (reg_subtype) { - case KVM_REG_RISCV_SBI_STA: - ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, ®_val); - break; - default: + ext = kvm_vcpu_sbi_find_ext_withstate(vcpu, reg_subtype); + if (!ext || !ext->get_state_reg) return -EINVAL; - } + ret = ext->get_state_reg(vcpu, reg_num, KVM_REG_SIZE(reg->id), reg_val); if (ret) return ret; - if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id))) return -EFAULT; return 0; diff --git a/arch/riscv/kvm/vcpu_sbi_fwft.c b/arch/riscv/kvm/vcpu_sbi_fwft.c new file mode 100644 index 000000000000..62cc9c3d5759 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_fwft.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025 Rivos Inc. + * + * Authors: + * Clément Léger <cleger@rivosinc.com> + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <asm/cpufeature.h> +#include <asm/sbi.h> +#include <asm/kvm_vcpu_sbi.h> +#include <asm/kvm_vcpu_sbi_fwft.h> + +#define MIS_DELEG (BIT_ULL(EXC_LOAD_MISALIGNED) | BIT_ULL(EXC_STORE_MISALIGNED)) + +struct kvm_sbi_fwft_feature { + /** + * @id: Feature ID + */ + enum sbi_fwft_feature_t id; + + /** + * @first_reg_num: ONE_REG index of the first ONE_REG register + */ + unsigned long first_reg_num; + + /** + * @supported: Check if the feature is supported on the vcpu + * + * This callback is optional, if not provided the feature is assumed to + * be supported + */ + bool (*supported)(struct kvm_vcpu *vcpu); + + /** + * @reset: Reset the feature value irrespective whether feature is supported or not + * + * This callback is mandatory + */ + void (*reset)(struct kvm_vcpu *vcpu); + + /** + * @set: Set the feature value + * + * Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*) + * + * This callback is mandatory + */ + long (*set)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf, + bool one_reg_access, unsigned long value); + + /** + * @get: Get the feature current value + * + * Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*) + * + * This callback is mandatory + */ + long (*get)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf, + bool one_reg_access, unsigned long *value); +}; + +static const enum sbi_fwft_feature_t kvm_fwft_defined_features[] = { + SBI_FWFT_MISALIGNED_EXC_DELEG, + SBI_FWFT_LANDING_PAD, + SBI_FWFT_SHADOW_STACK, + SBI_FWFT_DOUBLE_TRAP, + SBI_FWFT_PTE_AD_HW_UPDATING, + SBI_FWFT_POINTER_MASKING_PMLEN, +}; + +static bool kvm_fwft_is_defined_feature(enum sbi_fwft_feature_t feature) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kvm_fwft_defined_features); i++) { + if (kvm_fwft_defined_features[i] == feature) + return true; + } + + return false; +} + +static bool kvm_sbi_fwft_misaligned_delegation_supported(struct kvm_vcpu *vcpu) +{ + return misaligned_traps_can_delegate(); +} + +static void kvm_sbi_fwft_reset_misaligned_delegation(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + cfg->hedeleg &= ~MIS_DELEG; +} + +static long kvm_sbi_fwft_set_misaligned_delegation(struct kvm_vcpu *vcpu, + struct kvm_sbi_fwft_config *conf, + bool one_reg_access, unsigned long value) +{ + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + if (value == 1) { + cfg->hedeleg |= MIS_DELEG; + if (!one_reg_access) + csr_set(CSR_HEDELEG, MIS_DELEG); + } else if (value == 0) { + cfg->hedeleg &= ~MIS_DELEG; + if (!one_reg_access) + csr_clear(CSR_HEDELEG, MIS_DELEG); + } else { + return SBI_ERR_INVALID_PARAM; + } + + return SBI_SUCCESS; +} + +static long kvm_sbi_fwft_get_misaligned_delegation(struct kvm_vcpu *vcpu, + struct kvm_sbi_fwft_config *conf, + bool one_reg_access, unsigned long *value) +{ + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + *value = (cfg->hedeleg & MIS_DELEG) == MIS_DELEG; + return SBI_SUCCESS; +} + +#ifndef CONFIG_32BIT + +static bool try_to_set_pmm(unsigned long value) +{ + csr_set(CSR_HENVCFG, value); + return (csr_read_clear(CSR_HENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value; +} + +static bool kvm_sbi_fwft_pointer_masking_pmlen_supported(struct kvm_vcpu *vcpu) +{ + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + + if (!riscv_isa_extension_available(vcpu->arch.isa, SMNPM)) + return false; + + fwft->have_vs_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7); + fwft->have_vs_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16); + + return fwft->have_vs_pmlen_7 || fwft->have_vs_pmlen_16; +} + +static void kvm_sbi_fwft_reset_pointer_masking_pmlen(struct kvm_vcpu *vcpu) +{ + vcpu->arch.cfg.henvcfg &= ~ENVCFG_PMM; +} + +static long kvm_sbi_fwft_set_pointer_masking_pmlen(struct kvm_vcpu *vcpu, + struct kvm_sbi_fwft_config *conf, + bool one_reg_access, unsigned long value) +{ + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + unsigned long pmm; + + switch (value) { + case 0: + pmm = ENVCFG_PMM_PMLEN_0; + break; + case 7: + if (!fwft->have_vs_pmlen_7) + return SBI_ERR_INVALID_PARAM; + pmm = ENVCFG_PMM_PMLEN_7; + break; + case 16: + if (!fwft->have_vs_pmlen_16) + return SBI_ERR_INVALID_PARAM; + pmm = ENVCFG_PMM_PMLEN_16; + break; + default: + return SBI_ERR_INVALID_PARAM; + } + + vcpu->arch.cfg.henvcfg &= ~ENVCFG_PMM; + vcpu->arch.cfg.henvcfg |= pmm; + + /* + * Instead of waiting for vcpu_load/put() to update HENVCFG CSR, + * update here so that VCPU see's pointer masking mode change + * immediately. + */ + if (!one_reg_access) + csr_write(CSR_HENVCFG, vcpu->arch.cfg.henvcfg); + + return SBI_SUCCESS; +} + +static long kvm_sbi_fwft_get_pointer_masking_pmlen(struct kvm_vcpu *vcpu, + struct kvm_sbi_fwft_config *conf, + bool one_reg_access, unsigned long *value) +{ + switch (vcpu->arch.cfg.henvcfg & ENVCFG_PMM) { + case ENVCFG_PMM_PMLEN_0: + *value = 0; + break; + case ENVCFG_PMM_PMLEN_7: + *value = 7; + break; + case ENVCFG_PMM_PMLEN_16: + *value = 16; + break; + default: + return SBI_ERR_FAILURE; + } + + return SBI_SUCCESS; +} + +#endif + +static const struct kvm_sbi_fwft_feature features[] = { + { + .id = SBI_FWFT_MISALIGNED_EXC_DELEG, + .first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, misaligned_deleg.enable) / + sizeof(unsigned long), + .supported = kvm_sbi_fwft_misaligned_delegation_supported, + .reset = kvm_sbi_fwft_reset_misaligned_delegation, + .set = kvm_sbi_fwft_set_misaligned_delegation, + .get = kvm_sbi_fwft_get_misaligned_delegation, + }, +#ifndef CONFIG_32BIT + { + .id = SBI_FWFT_POINTER_MASKING_PMLEN, + .first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, pointer_masking.enable) / + sizeof(unsigned long), + .supported = kvm_sbi_fwft_pointer_masking_pmlen_supported, + .reset = kvm_sbi_fwft_reset_pointer_masking_pmlen, + .set = kvm_sbi_fwft_set_pointer_masking_pmlen, + .get = kvm_sbi_fwft_get_pointer_masking_pmlen, + }, +#endif +}; + +static const struct kvm_sbi_fwft_feature *kvm_sbi_fwft_regnum_to_feature(unsigned long reg_num) +{ + const struct kvm_sbi_fwft_feature *feature; + int i; + + for (i = 0; i < ARRAY_SIZE(features); i++) { + feature = &features[i]; + if (feature->first_reg_num <= reg_num && reg_num < (feature->first_reg_num + 3)) + return feature; + } + + return NULL; +} + +static struct kvm_sbi_fwft_config * +kvm_sbi_fwft_get_config(struct kvm_vcpu *vcpu, enum sbi_fwft_feature_t feature) +{ + int i; + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + + for (i = 0; i < ARRAY_SIZE(features); i++) { + if (fwft->configs[i].feature->id == feature) + return &fwft->configs[i]; + } + + return NULL; +} + +static int kvm_fwft_get_feature(struct kvm_vcpu *vcpu, u32 feature, + struct kvm_sbi_fwft_config **conf) +{ + struct kvm_sbi_fwft_config *tconf; + + tconf = kvm_sbi_fwft_get_config(vcpu, feature); + if (!tconf) { + if (kvm_fwft_is_defined_feature(feature)) + return SBI_ERR_NOT_SUPPORTED; + + return SBI_ERR_DENIED; + } + + if (!tconf->supported || !tconf->enabled) + return SBI_ERR_NOT_SUPPORTED; + + *conf = tconf; + + return SBI_SUCCESS; +} + +static int kvm_sbi_fwft_set(struct kvm_vcpu *vcpu, u32 feature, + unsigned long value, unsigned long flags) +{ + int ret; + struct kvm_sbi_fwft_config *conf; + + ret = kvm_fwft_get_feature(vcpu, feature, &conf); + if (ret) + return ret; + + if ((flags & ~SBI_FWFT_SET_FLAG_LOCK) != 0) + return SBI_ERR_INVALID_PARAM; + + if (conf->flags & SBI_FWFT_SET_FLAG_LOCK) + return SBI_ERR_DENIED_LOCKED; + + conf->flags = flags; + + return conf->feature->set(vcpu, conf, false, value); +} + +static int kvm_sbi_fwft_get(struct kvm_vcpu *vcpu, unsigned long feature, + unsigned long *value) +{ + int ret; + struct kvm_sbi_fwft_config *conf; + + ret = kvm_fwft_get_feature(vcpu, feature, &conf); + if (ret) + return ret; + + return conf->feature->get(vcpu, conf, false, value); +} + +static int kvm_sbi_ext_fwft_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_vcpu_sbi_return *retdata) +{ + int ret; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long funcid = cp->a6; + + switch (funcid) { + case SBI_EXT_FWFT_SET: + ret = kvm_sbi_fwft_set(vcpu, cp->a0, cp->a1, cp->a2); + break; + case SBI_EXT_FWFT_GET: + ret = kvm_sbi_fwft_get(vcpu, cp->a0, &retdata->out_val); + break; + default: + ret = SBI_ERR_NOT_SUPPORTED; + break; + } + + retdata->err_val = ret; + + return 0; +} + +static int kvm_sbi_ext_fwft_init(struct kvm_vcpu *vcpu) +{ + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + const struct kvm_sbi_fwft_feature *feature; + struct kvm_sbi_fwft_config *conf; + int i; + + fwft->configs = kcalloc(ARRAY_SIZE(features), sizeof(struct kvm_sbi_fwft_config), + GFP_KERNEL); + if (!fwft->configs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(features); i++) { + feature = &features[i]; + conf = &fwft->configs[i]; + if (feature->supported) + conf->supported = feature->supported(vcpu); + else + conf->supported = true; + + conf->enabled = conf->supported; + conf->feature = feature; + } + + return 0; +} + +static void kvm_sbi_ext_fwft_deinit(struct kvm_vcpu *vcpu) +{ + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + + kfree(fwft->configs); +} + +static void kvm_sbi_ext_fwft_reset(struct kvm_vcpu *vcpu) +{ + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + const struct kvm_sbi_fwft_feature *feature; + int i; + + for (i = 0; i < ARRAY_SIZE(features); i++) { + fwft->configs[i].flags = 0; + feature = &features[i]; + if (feature->reset) + feature->reset(vcpu); + } +} + +static unsigned long kvm_sbi_ext_fwft_get_reg_count(struct kvm_vcpu *vcpu) +{ + unsigned long max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long); + const struct kvm_sbi_fwft_feature *feature; + struct kvm_sbi_fwft_config *conf; + unsigned long reg, ret = 0; + + for (reg = 0; reg < max_reg_count; reg++) { + feature = kvm_sbi_fwft_regnum_to_feature(reg); + if (!feature) + continue; + + conf = kvm_sbi_fwft_get_config(vcpu, feature->id); + if (!conf || !conf->supported) + continue; + + ret++; + } + + return ret; +} + +static int kvm_sbi_ext_fwft_get_reg_id(struct kvm_vcpu *vcpu, int index, u64 *reg_id) +{ + int reg, max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long); + const struct kvm_sbi_fwft_feature *feature; + struct kvm_sbi_fwft_config *conf; + int idx = 0; + + for (reg = 0; reg < max_reg_count; reg++) { + feature = kvm_sbi_fwft_regnum_to_feature(reg); + if (!feature) + continue; + + conf = kvm_sbi_fwft_get_config(vcpu, feature->id); + if (!conf || !conf->supported) + continue; + + if (index == idx) { + *reg_id = KVM_REG_RISCV | + (IS_ENABLED(CONFIG_32BIT) ? + KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) | + KVM_REG_RISCV_SBI_STATE | + KVM_REG_RISCV_SBI_FWFT | reg; + return 0; + } + + idx++; + } + + return -ENOENT; +} + +static int kvm_sbi_ext_fwft_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long reg_size, void *reg_val) +{ + const struct kvm_sbi_fwft_feature *feature; + struct kvm_sbi_fwft_config *conf; + unsigned long *value; + int ret = 0; + + if (reg_size != sizeof(unsigned long)) + return -EINVAL; + value = reg_val; + + feature = kvm_sbi_fwft_regnum_to_feature(reg_num); + if (!feature) + return -ENOENT; + + conf = kvm_sbi_fwft_get_config(vcpu, feature->id); + if (!conf || !conf->supported) + return -ENOENT; + + switch (reg_num - feature->first_reg_num) { + case 0: + *value = conf->enabled; + break; + case 1: + *value = conf->flags; + break; + case 2: + ret = conf->feature->get(vcpu, conf, true, value); + break; + default: + return -ENOENT; + } + + return sbi_err_map_linux_errno(ret); +} + +static int kvm_sbi_ext_fwft_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long reg_size, const void *reg_val) +{ + const struct kvm_sbi_fwft_feature *feature; + struct kvm_sbi_fwft_config *conf; + unsigned long value; + int ret = 0; + + if (reg_size != sizeof(unsigned long)) + return -EINVAL; + value = *(const unsigned long *)reg_val; + + feature = kvm_sbi_fwft_regnum_to_feature(reg_num); + if (!feature) + return -ENOENT; + + conf = kvm_sbi_fwft_get_config(vcpu, feature->id); + if (!conf || !conf->supported) + return -ENOENT; + + switch (reg_num - feature->first_reg_num) { + case 0: + switch (value) { + case 0: + conf->enabled = false; + break; + case 1: + conf->enabled = true; + break; + default: + return -EINVAL; + } + break; + case 1: + conf->flags = value & SBI_FWFT_SET_FLAG_LOCK; + break; + case 2: + ret = conf->feature->set(vcpu, conf, true, value); + break; + default: + return -ENOENT; + } + + return sbi_err_map_linux_errno(ret); +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_fwft = { + .extid_start = SBI_EXT_FWFT, + .extid_end = SBI_EXT_FWFT, + .handler = kvm_sbi_ext_fwft_handler, + .init = kvm_sbi_ext_fwft_init, + .deinit = kvm_sbi_ext_fwft_deinit, + .reset = kvm_sbi_ext_fwft_reset, + .state_reg_subtype = KVM_REG_RISCV_SBI_FWFT, + .get_state_reg_count = kvm_sbi_ext_fwft_get_reg_count, + .get_state_reg_id = kvm_sbi_ext_fwft_get_reg_id, + .get_state_reg = kvm_sbi_ext_fwft_get_reg, + .set_state_reg = kvm_sbi_ext_fwft_set_reg, +}; diff --git a/arch/riscv/kvm/vcpu_sbi_pmu.c b/arch/riscv/kvm/vcpu_sbi_pmu.c index e4be34e03e83..a020d979d179 100644 --- a/arch/riscv/kvm/vcpu_sbi_pmu.c +++ b/arch/riscv/kvm/vcpu_sbi_pmu.c @@ -73,6 +73,9 @@ static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, case SBI_EXT_PMU_SNAPSHOT_SET_SHMEM: ret = kvm_riscv_vcpu_pmu_snapshot_set_shmem(vcpu, cp->a0, cp->a1, cp->a2, retdata); break; + case SBI_EXT_PMU_EVENT_GET_INFO: + ret = kvm_riscv_vcpu_pmu_event_info(vcpu, cp->a0, cp->a1, cp->a2, cp->a3, retdata); + break; default: retdata->err_val = SBI_ERR_NOT_SUPPORTED; } diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c index cc6cb7c8f0e4..afa0545c3bcf 100644 --- a/arch/riscv/kvm/vcpu_sbi_sta.c +++ b/arch/riscv/kvm/vcpu_sbi_sta.c @@ -85,8 +85,6 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) unsigned long shmem_phys_hi = cp->a1; u32 flags = cp->a2; struct sbi_sta_struct zero_sta = {0}; - unsigned long hva; - bool writable; gpa_t shmem; int ret; @@ -111,13 +109,10 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) return SBI_ERR_INVALID_ADDRESS; } - hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); - if (kvm_is_error_hva(hva) || !writable) - return SBI_ERR_INVALID_ADDRESS; - + /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */ ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); if (ret) - return SBI_ERR_FAILURE; + return SBI_ERR_INVALID_ADDRESS; vcpu->arch.sta.shmem = shmem; vcpu->arch.sta.last_steal = current->sched_info.run_delay; @@ -151,63 +146,82 @@ static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu) return !!sched_info_on(); } -const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = { - .extid_start = SBI_EXT_STA, - .extid_end = SBI_EXT_STA, - .handler = kvm_sbi_ext_sta_handler, - .probe = kvm_sbi_ext_sta_probe, - .reset = kvm_riscv_vcpu_sbi_sta_reset, -}; +static unsigned long kvm_sbi_ext_sta_get_state_reg_count(struct kvm_vcpu *vcpu) +{ + return sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long); +} -int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, - unsigned long reg_num, - unsigned long *reg_val) +static int kvm_sbi_ext_sta_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long reg_size, void *reg_val) { + unsigned long *value; + + if (reg_size != sizeof(unsigned long)) + return -EINVAL; + value = reg_val; + switch (reg_num) { case KVM_REG_RISCV_SBI_STA_REG(shmem_lo): - *reg_val = (unsigned long)vcpu->arch.sta.shmem; + *value = (unsigned long)vcpu->arch.sta.shmem; break; case KVM_REG_RISCV_SBI_STA_REG(shmem_hi): if (IS_ENABLED(CONFIG_32BIT)) - *reg_val = upper_32_bits(vcpu->arch.sta.shmem); + *value = upper_32_bits(vcpu->arch.sta.shmem); else - *reg_val = 0; + *value = 0; break; default: - return -EINVAL; + return -ENOENT; } return 0; } -int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, - unsigned long reg_num, - unsigned long reg_val) +static int kvm_sbi_ext_sta_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long reg_size, const void *reg_val) { + unsigned long value; + + if (reg_size != sizeof(unsigned long)) + return -EINVAL; + value = *(const unsigned long *)reg_val; + switch (reg_num) { case KVM_REG_RISCV_SBI_STA_REG(shmem_lo): if (IS_ENABLED(CONFIG_32BIT)) { gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem); - vcpu->arch.sta.shmem = reg_val; + vcpu->arch.sta.shmem = value; vcpu->arch.sta.shmem |= hi << 32; } else { - vcpu->arch.sta.shmem = reg_val; + vcpu->arch.sta.shmem = value; } break; case KVM_REG_RISCV_SBI_STA_REG(shmem_hi): if (IS_ENABLED(CONFIG_32BIT)) { gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem); - vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32); + vcpu->arch.sta.shmem = ((gpa_t)value << 32); vcpu->arch.sta.shmem |= lo; - } else if (reg_val != 0) { + } else if (value != 0) { return -EINVAL; } break; default: - return -EINVAL; + return -ENOENT; } return 0; } + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = { + .extid_start = SBI_EXT_STA, + .extid_end = SBI_EXT_STA, + .handler = kvm_sbi_ext_sta_handler, + .probe = kvm_sbi_ext_sta_probe, + .reset = kvm_riscv_vcpu_sbi_sta_reset, + .state_reg_subtype = KVM_REG_RISCV_SBI_STA, + .get_state_reg_count = kvm_sbi_ext_sta_get_state_reg_count, + .get_state_reg = kvm_sbi_ext_sta_get_reg, + .set_state_reg = kvm_sbi_ext_sta_set_reg, +}; diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c index 3b426c800480..abb1c2bf2542 100644 --- a/arch/riscv/kvm/vmid.c +++ b/arch/riscv/kvm/vmid.c @@ -14,6 +14,7 @@ #include <linux/smp.h> #include <linux/kvm_host.h> #include <asm/csr.h> +#include <asm/kvm_mmu.h> #include <asm/kvm_tlb.h> #include <asm/kvm_vmid.h> @@ -24,15 +25,12 @@ static DEFINE_SPINLOCK(vmid_lock); void __init kvm_riscv_gstage_vmid_detect(void) { - unsigned long old; - /* Figure-out number of VMID bits in HW */ - old = csr_read(CSR_HGATP); - csr_write(CSR_HGATP, old | HGATP_VMID); + csr_write(CSR_HGATP, (kvm_riscv_gstage_mode << HGATP_MODE_SHIFT) | HGATP_VMID); vmid_bits = csr_read(CSR_HGATP); vmid_bits = (vmid_bits & HGATP_VMID) >> HGATP_VMID_SHIFT; vmid_bits = fls_long(vmid_bits); - csr_write(CSR_HGATP, old); + csr_write(CSR_HGATP, 0); /* We polluted local TLB so flush all guest TLB */ kvm_riscv_local_hfence_gvma_all(); |