summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm/vcpu_sbi_sta.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kvm/vcpu_sbi_sta.c')
-rw-r--r--arch/riscv/kvm/vcpu_sbi_sta.c72
1 files changed, 43 insertions, 29 deletions
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index cc6cb7c8f0e4..afa0545c3bcf 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -85,8 +85,6 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0};
- unsigned long hva;
- bool writable;
gpa_t shmem;
int ret;
@@ -111,13 +109,10 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
return SBI_ERR_INVALID_ADDRESS;
}
- hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
- if (kvm_is_error_hva(hva) || !writable)
- return SBI_ERR_INVALID_ADDRESS;
-
+ /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
if (ret)
- return SBI_ERR_FAILURE;
+ return SBI_ERR_INVALID_ADDRESS;
vcpu->arch.sta.shmem = shmem;
vcpu->arch.sta.last_steal = current->sched_info.run_delay;
@@ -151,63 +146,82 @@ static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
return !!sched_info_on();
}
-const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
- .extid_start = SBI_EXT_STA,
- .extid_end = SBI_EXT_STA,
- .handler = kvm_sbi_ext_sta_handler,
- .probe = kvm_sbi_ext_sta_probe,
- .reset = kvm_riscv_vcpu_sbi_sta_reset,
-};
+static unsigned long kvm_sbi_ext_sta_get_state_reg_count(struct kvm_vcpu *vcpu)
+{
+ return sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
+}
-int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
- unsigned long reg_num,
- unsigned long *reg_val)
+static int kvm_sbi_ext_sta_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, void *reg_val)
{
+ unsigned long *value;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = reg_val;
+
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
- *reg_val = (unsigned long)vcpu->arch.sta.shmem;
+ *value = (unsigned long)vcpu->arch.sta.shmem;
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT))
- *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
+ *value = upper_32_bits(vcpu->arch.sta.shmem);
else
- *reg_val = 0;
+ *value = 0;
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
}
-int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
- unsigned long reg_num,
- unsigned long reg_val)
+static int kvm_sbi_ext_sta_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, const void *reg_val)
{
+ unsigned long value;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = *(const unsigned long *)reg_val;
+
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
- vcpu->arch.sta.shmem = reg_val;
+ vcpu->arch.sta.shmem = value;
vcpu->arch.sta.shmem |= hi << 32;
} else {
- vcpu->arch.sta.shmem = reg_val;
+ vcpu->arch.sta.shmem = value;
}
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
- vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
+ vcpu->arch.sta.shmem = ((gpa_t)value << 32);
vcpu->arch.sta.shmem |= lo;
- } else if (reg_val != 0) {
+ } else if (value != 0) {
return -EINVAL;
}
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
+ .extid_start = SBI_EXT_STA,
+ .extid_end = SBI_EXT_STA,
+ .handler = kvm_sbi_ext_sta_handler,
+ .probe = kvm_sbi_ext_sta_probe,
+ .reset = kvm_riscv_vcpu_sbi_sta_reset,
+ .state_reg_subtype = KVM_REG_RISCV_SBI_STA,
+ .get_state_reg_count = kvm_sbi_ext_sta_get_state_reg_count,
+ .get_state_reg = kvm_sbi_ext_sta_get_reg,
+ .set_state_reg = kvm_sbi_ext_sta_set_reg,
+};