diff options
Diffstat (limited to 'tools/testing')
71 files changed, 3746 insertions, 319 deletions
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 36d03860d9d8..c46ebdb9b8ef 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -125,6 +125,7 @@ TARGETS += uevent TARGETS += user_events TARGETS += vDSO TARGETS += mm +TARGETS += vfio TARGETS += x86 TARGETS += x86/bugs TARGETS += zram diff --git a/tools/testing/selftests/bpf/prog_tests/sha256.c b/tools/testing/selftests/bpf/prog_tests/sha256.c new file mode 100644 index 000000000000..604a0b1423d5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sha256.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright 2025 Google LLC */ + +#include <test_progs.h> +#include "bpf/libbpf_internal.h" + +#define MAX_LEN 4096 + +/* Test libbpf_sha256() for all lengths from 0 to MAX_LEN inclusively. */ +void test_sha256(void) +{ + /* + * The correctness of this value was verified by running this test with + * libbpf_sha256() replaced by OpenSSL's SHA256(). + */ + static const __u8 expected_digest_of_digests[SHA256_DIGEST_LENGTH] = { + 0x62, 0x30, 0x0e, 0x1d, 0xea, 0x7f, 0xc4, 0x74, + 0xfd, 0x8e, 0x64, 0x0b, 0xd8, 0x5f, 0xea, 0x04, + 0xf3, 0xef, 0x77, 0x42, 0xc2, 0x01, 0xb8, 0x90, + 0x6e, 0x19, 0x91, 0x1b, 0xca, 0xb3, 0x28, 0x42, + }; + __u64 seed = 0; + __u8 *data = NULL, *digests = NULL; + __u8 digest_of_digests[SHA256_DIGEST_LENGTH]; + size_t i; + + data = malloc(MAX_LEN); + if (!ASSERT_OK_PTR(data, "malloc")) + goto out; + digests = malloc((MAX_LEN + 1) * SHA256_DIGEST_LENGTH); + if (!ASSERT_OK_PTR(digests, "malloc")) + goto out; + + /* Generate MAX_LEN bytes of "random" data deterministically. */ + for (i = 0; i < MAX_LEN; i++) { + seed = (seed * 25214903917 + 11) & ((1ULL << 48) - 1); + data[i] = (__u8)(seed >> 16); + } + + /* Calculate a digest for each length 0 through MAX_LEN inclusively. */ + for (i = 0; i <= MAX_LEN; i++) + libbpf_sha256(data, i, &digests[i * SHA256_DIGEST_LENGTH]); + + /* Calculate and verify the digest of all the digests. */ + libbpf_sha256(digests, (MAX_LEN + 1) * SHA256_DIGEST_LENGTH, + digest_of_digests); + ASSERT_MEMEQ(digest_of_digests, expected_digest_of_digests, + SHA256_DIGEST_LENGTH, "digest_of_digests"); +out: + free(data); + free(digests); +} diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c index 6d75ede16e7c..955a37751b52 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -661,7 +661,7 @@ static void *worker_trigger(void *arg) rounds++; } - printf("tid %d trigger rounds: %lu\n", gettid(), rounds); + printf("tid %ld trigger rounds: %lu\n", sys_gettid(), rounds); return NULL; } @@ -704,7 +704,7 @@ static void *worker_attach(void *arg) rounds++; } - printf("tid %d attach rounds: %lu hits: %d\n", gettid(), rounds, skel->bss->executed); + printf("tid %ld attach rounds: %lu hits: %d\n", sys_gettid(), rounds, skel->bss->executed); uprobe_syscall_executed__destroy(skel); free(ref); return NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c index 4f7f45e69315..f4be5269fa90 100644 --- a/tools/testing/selftests/bpf/prog_tests/usdt.c +++ b/tools/testing/selftests/bpf/prog_tests/usdt.c @@ -142,7 +142,7 @@ static void subtest_basic_usdt(bool optimized) goto cleanup; #endif - alled = TRIGGER(1); + called = TRIGGER(1); ASSERT_EQ(bss->usdt0_called, called, "usdt0_called"); ASSERT_EQ(bss->usdt3_called, called, "usdt3_called"); diff --git a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c index a9ab37d3b9e2..2129e4353fd9 100644 --- a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c +++ b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c @@ -3,6 +3,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" #include "bpf_misc.h" #define MAX_ENTRIES 11 @@ -146,6 +147,24 @@ l0_%=: exit; \ : __clobber_all); } +SEC("socket") +__description("map_ptr illegal alu op, map_ptr = -map_ptr") +__failure __msg("R0 invalid mem access 'scalar'") +__failure_unpriv __msg_unpriv("R0 pointer arithmetic prohibited") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void map_ptr_illegal_alu_op(void) +{ + asm volatile (" \ + r0 = %[map_hash_48b] ll; \ + r0 = -r0; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +" : + : __imm_addr(map_hash_48b) + : __clobber_all); +} + SEC("flow_dissector") __description("flow_keys illegal alu op with variable offset") __failure __msg("R7 pointer arithmetic on flow_keys prohibited") @@ -165,4 +184,32 @@ __naked void flow_keys_illegal_variable_offset_alu(void) : __clobber_all); } +#define DEFINE_BAD_OFFSET_TEST(name, op, off, imm) \ + SEC("socket") \ + __failure __msg("BPF_ALU uses reserved fields") \ + __naked void name(void) \ + { \ + asm volatile( \ + "r0 = 1;" \ + ".8byte %[insn];" \ + "r0 = 0;" \ + "exit;" \ + : \ + : __imm_insn(insn, BPF_RAW_INSN((op), 0, 0, (off), (imm))) \ + : __clobber_all); \ + } + +/* + * Offset fields of 0 and 1 are legal for BPF_{DIV,MOD} instructions. + * Offset fields of 0 are legal for the rest of ALU instructions. + * Test that error is reported for illegal offsets, assuming that tests + * for legal offsets exist. + */ +DEFINE_BAD_OFFSET_TEST(bad_offset_divx, BPF_ALU64 | BPF_DIV | BPF_X, -1, 0) +DEFINE_BAD_OFFSET_TEST(bad_offset_modk, BPF_ALU64 | BPF_MOD | BPF_K, -1, 1) +DEFINE_BAD_OFFSET_TEST(bad_offset_addx, BPF_ALU64 | BPF_ADD | BPF_X, -1, 0) +DEFINE_BAD_OFFSET_TEST(bad_offset_divx2, BPF_ALU64 | BPF_DIV | BPF_X, 2, 0) +DEFINE_BAD_OFFSET_TEST(bad_offset_modk2, BPF_ALU64 | BPF_MOD | BPF_K, 2, 1) +DEFINE_BAD_OFFSET_TEST(bad_offset_addx2, BPF_ALU64 | BPF_ADD | BPF_X, 1, 0) + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 171987627f3a..eeaab7013ca2 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -732,7 +732,7 @@ int bpf_get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel) if (cnt == max_cnt) { max_cnt += inc_cnt; - tmp_addrs = realloc(addrs, max_cnt); + tmp_addrs = realloc(addrs, max_cnt * sizeof(long)); if (!tmp_addrs) { err = -ENOMEM; goto error; diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm index 41b40c676d7f..8926ff6808cf 100644 --- a/tools/testing/selftests/kvm/Makefile.kvm +++ b/tools/testing/selftests/kvm/Makefile.kvm @@ -156,6 +156,7 @@ TEST_GEN_PROGS_arm64 = $(TEST_GEN_PROGS_COMMON) TEST_GEN_PROGS_arm64 += arm64/aarch32_id_regs TEST_GEN_PROGS_arm64 += arm64/arch_timer_edge_cases TEST_GEN_PROGS_arm64 += arm64/debug-exceptions +TEST_GEN_PROGS_arm64 += arm64/hello_el2 TEST_GEN_PROGS_arm64 += arm64/host_sve TEST_GEN_PROGS_arm64 += arm64/hypercalls TEST_GEN_PROGS_arm64 += arm64/external_aborts @@ -175,6 +176,7 @@ TEST_GEN_PROGS_arm64 += arch_timer TEST_GEN_PROGS_arm64 += coalesced_io_test TEST_GEN_PROGS_arm64 += dirty_log_perf_test TEST_GEN_PROGS_arm64 += get-reg-list +TEST_GEN_PROGS_arm64 += guest_memfd_test TEST_GEN_PROGS_arm64 += memslot_modification_stress_test TEST_GEN_PROGS_arm64 += memslot_perf_test TEST_GEN_PROGS_arm64 += mmu_stress_test @@ -196,9 +198,15 @@ TEST_GEN_PROGS_s390 += rseq_test TEST_GEN_PROGS_riscv = $(TEST_GEN_PROGS_COMMON) TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test TEST_GEN_PROGS_riscv += riscv/ebreak_test +TEST_GEN_PROGS_riscv += access_tracking_perf_test TEST_GEN_PROGS_riscv += arch_timer TEST_GEN_PROGS_riscv += coalesced_io_test +TEST_GEN_PROGS_riscv += dirty_log_perf_test TEST_GEN_PROGS_riscv += get-reg-list +TEST_GEN_PROGS_riscv += memslot_modification_stress_test +TEST_GEN_PROGS_riscv += memslot_perf_test +TEST_GEN_PROGS_riscv += mmu_stress_test +TEST_GEN_PROGS_riscv += rseq_test TEST_GEN_PROGS_riscv += steal_time TEST_GEN_PROGS_loongarch += coalesced_io_test diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c index c9de66537ec3..b058f27b2141 100644 --- a/tools/testing/selftests/kvm/access_tracking_perf_test.c +++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c @@ -50,6 +50,7 @@ #include "memstress.h" #include "guest_modes.h" #include "processor.h" +#include "ucall_common.h" #include "cgroup_util.h" #include "lru_gen_util.h" diff --git a/tools/testing/selftests/kvm/arm64/arch_timer.c b/tools/testing/selftests/kvm/arm64/arch_timer.c index eeba1cc87ff8..d592a4515399 100644 --- a/tools/testing/selftests/kvm/arm64/arch_timer.c +++ b/tools/testing/selftests/kvm/arm64/arch_timer.c @@ -165,10 +165,8 @@ static void guest_code(void) static void test_init_timer_irq(struct kvm_vm *vm) { /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ - vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, - KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq); - vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, - KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq); + ptimer_irq = vcpu_get_ptimer_irq(vcpus[0]); + vtimer_irq = vcpu_get_vtimer_irq(vcpus[0]); sync_global_to_guest(vm, ptimer_irq); sync_global_to_guest(vm, vtimer_irq); @@ -176,14 +174,14 @@ static void test_init_timer_irq(struct kvm_vm *vm) pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq); } -static int gic_fd; - struct kvm_vm *test_vm_create(void) { struct kvm_vm *vm; unsigned int i; int nr_vcpus = test_args.nr_vcpus; + TEST_REQUIRE(kvm_supports_vgic_v3()); + vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); vm_init_descriptor_tables(vm); @@ -204,8 +202,6 @@ struct kvm_vm *test_vm_create(void) vcpu_init_descriptor_tables(vcpus[i]); test_init_timer_irq(vm); - gic_fd = vgic_v3_setup(vm, nr_vcpus, 64); - __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3"); /* Make all the test's cmdline args visible to the guest */ sync_global_to_guest(vm, test_args); @@ -215,6 +211,5 @@ struct kvm_vm *test_vm_create(void) void test_vm_cleanup(struct kvm_vm *vm) { - close(gic_fd); kvm_vm_free(vm); } diff --git a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c index ce74d069cb7b..91906414a474 100644 --- a/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c +++ b/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c @@ -924,10 +924,8 @@ static void test_run(struct kvm_vm *vm, struct kvm_vcpu *vcpu) static void test_init_timer_irq(struct kvm_vm *vm, struct kvm_vcpu *vcpu) { - vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, - KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq); - vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, - KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq); + ptimer_irq = vcpu_get_ptimer_irq(vcpu); + vtimer_irq = vcpu_get_vtimer_irq(vcpu); sync_global_to_guest(vm, ptimer_irq); sync_global_to_guest(vm, vtimer_irq); @@ -935,8 +933,6 @@ static void test_init_timer_irq(struct kvm_vm *vm, struct kvm_vcpu *vcpu) pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq); } -static int gic_fd; - static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu, enum arch_timer timer) { @@ -951,8 +947,6 @@ static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu, vcpu_args_set(*vcpu, 1, timer); test_init_timer_irq(*vm, *vcpu); - gic_fd = vgic_v3_setup(*vm, 1, 64); - __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3"); sync_global_to_guest(*vm, test_args); sync_global_to_guest(*vm, CVAL_MAX); @@ -961,7 +955,6 @@ static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu, static void test_vm_cleanup(struct kvm_vm *vm) { - close(gic_fd); kvm_vm_free(vm); } @@ -1042,6 +1035,8 @@ int main(int argc, char *argv[]) /* Tell stdout not to buffer its content */ setbuf(stdout, NULL); + TEST_REQUIRE(kvm_supports_vgic_v3()); + if (!parse_args(argc, argv)) exit(KSFT_SKIP); diff --git a/tools/testing/selftests/kvm/arm64/external_aborts.c b/tools/testing/selftests/kvm/arm64/external_aborts.c index 062bf84cced1..592b26ded779 100644 --- a/tools/testing/selftests/kvm/arm64/external_aborts.c +++ b/tools/testing/selftests/kvm/arm64/external_aborts.c @@ -250,6 +250,47 @@ static void test_serror(void) kvm_vm_free(vm); } +static void expect_sea_s1ptw_handler(struct ex_regs *regs) +{ + u64 esr = read_sysreg(esr_el1); + + GUEST_ASSERT_EQ(regs->pc, expected_abort_pc); + GUEST_ASSERT_EQ(ESR_ELx_EC(esr), ESR_ELx_EC_DABT_CUR); + GUEST_ASSERT_EQ((esr & ESR_ELx_FSC), ESR_ELx_FSC_SEA_TTW(3)); + + GUEST_DONE(); +} + +static noinline void test_s1ptw_abort_guest(void) +{ + extern char test_s1ptw_abort_insn; + + WRITE_ONCE(expected_abort_pc, (u64)&test_s1ptw_abort_insn); + + asm volatile("test_s1ptw_abort_insn:\n\t" + "ldr x0, [%0]\n\t" + : : "r" (MMIO_ADDR) : "x0", "memory"); + + GUEST_FAIL("Load on S1PTW abort should not retire"); +} + +static void test_s1ptw_abort(void) +{ + struct kvm_vcpu *vcpu; + u64 *ptep, bad_pa; + struct kvm_vm *vm = vm_create_with_dabt_handler(&vcpu, test_s1ptw_abort_guest, + expect_sea_s1ptw_handler); + + ptep = virt_get_pte_hva_at_level(vm, MMIO_ADDR, 2); + bad_pa = BIT(vm->pa_bits) - vm->page_size; + + *ptep &= ~GENMASK(47, 12); + *ptep |= bad_pa; + + vcpu_run_expect_done(vcpu); + kvm_vm_free(vm); +} + static void test_serror_emulated_guest(void) { GUEST_ASSERT(!(read_sysreg(isr_el1) & ISR_EL1_A)); @@ -327,4 +368,5 @@ int main(void) test_serror_masked(); test_serror_emulated(); test_mmio_ease(); + test_s1ptw_abort(); } diff --git a/tools/testing/selftests/kvm/arm64/hello_el2.c b/tools/testing/selftests/kvm/arm64/hello_el2.c new file mode 100644 index 000000000000..bbe6862c6ab1 --- /dev/null +++ b/tools/testing/selftests/kvm/arm64/hello_el2.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * hello_el2 - Basic KVM selftest for VM running at EL2 with E2H=RES1 + * + * Copyright 2025 Google LLC + */ +#include "kvm_util.h" +#include "processor.h" +#include "test_util.h" +#include "ucall.h" + +#include <asm/sysreg.h> + +static void guest_code(void) +{ + u64 mmfr0 = read_sysreg_s(SYS_ID_AA64MMFR0_EL1); + u64 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); + u64 mmfr4 = read_sysreg_s(SYS_ID_AA64MMFR4_EL1); + u8 e2h0 = SYS_FIELD_GET(ID_AA64MMFR4_EL1, E2H0, mmfr4); + + GUEST_ASSERT_EQ(get_current_el(), 2); + GUEST_ASSERT(read_sysreg(hcr_el2) & HCR_EL2_E2H); + GUEST_ASSERT_EQ(SYS_FIELD_GET(ID_AA64MMFR1_EL1, VH, mmfr1), + ID_AA64MMFR1_EL1_VH_IMP); + + /* + * Traps of the complete ID register space are IMPDEF without FEAT_FGT, + * which is really annoying to deal with in KVM describing E2H as RES1. + * + * If the implementation doesn't honor the trap then expect the register + * to return all zeros. + */ + if (e2h0 == ID_AA64MMFR4_EL1_E2H0_IMP) + GUEST_ASSERT_EQ(SYS_FIELD_GET(ID_AA64MMFR0_EL1, FGT, mmfr0), + ID_AA64MMFR0_EL1_FGT_NI); + else + GUEST_ASSERT_EQ(e2h0, ID_AA64MMFR4_EL1_E2H0_NI_NV1); + + GUEST_DONE(); +} + +int main(void) +{ + struct kvm_vcpu_init init; + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + struct ucall uc; + + TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_EL2)); + + vm = vm_create(1); + + kvm_get_default_vcpu_target(vm, &init); + init.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2); + vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code); + kvm_arch_vm_finalize_vcpus(vm); + + vcpu_run(vcpu); + switch (get_ucall(vcpu, &uc)) { + case UCALL_DONE: + break; + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + break; + default: + TEST_FAIL("Unhandled ucall: %ld\n", uc.cmd); + } + + kvm_vm_free(vm); + return 0; +} diff --git a/tools/testing/selftests/kvm/arm64/hypercalls.c b/tools/testing/selftests/kvm/arm64/hypercalls.c index 44cfcf8a7f46..bf038a0371f4 100644 --- a/tools/testing/selftests/kvm/arm64/hypercalls.c +++ b/tools/testing/selftests/kvm/arm64/hypercalls.c @@ -108,7 +108,7 @@ static void guest_test_hvc(const struct test_hvc_info *hc_info) for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) { memset(&res, 0, sizeof(res)); - smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res); + do_smccc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res); switch (stage) { case TEST_STAGE_HVC_IFACE_FEAT_DISABLED: diff --git a/tools/testing/selftests/kvm/arm64/kvm-uuid.c b/tools/testing/selftests/kvm/arm64/kvm-uuid.c index af9581b860f1..b5be9133535a 100644 --- a/tools/testing/selftests/kvm/arm64/kvm-uuid.c +++ b/tools/testing/selftests/kvm/arm64/kvm-uuid.c @@ -25,7 +25,7 @@ static void guest_code(void) { struct arm_smccc_res res = {}; - smccc_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0, 0, 0, 0, 0, 0, 0, &res); + do_smccc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0, 0, 0, 0, 0, 0, 0, &res); __GUEST_ASSERT(res.a0 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 && res.a1 == ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 && diff --git a/tools/testing/selftests/kvm/arm64/no-vgic-v3.c b/tools/testing/selftests/kvm/arm64/no-vgic-v3.c index f222538e6084..152c34776981 100644 --- a/tools/testing/selftests/kvm/arm64/no-vgic-v3.c +++ b/tools/testing/selftests/kvm/arm64/no-vgic-v3.c @@ -163,6 +163,8 @@ int main(int argc, char *argv[]) struct kvm_vm *vm; uint64_t pfr0; + test_disable_default_vgic(); + vm = vm_create_with_one_vcpu(&vcpu, NULL); pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); __TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0), diff --git a/tools/testing/selftests/kvm/arm64/psci_test.c b/tools/testing/selftests/kvm/arm64/psci_test.c index ab491ee9e5f7..98e49f710aef 100644 --- a/tools/testing/selftests/kvm/arm64/psci_test.c +++ b/tools/testing/selftests/kvm/arm64/psci_test.c @@ -27,7 +27,7 @@ static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, { struct arm_smccc_res res; - smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id, + do_smccc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id, 0, 0, 0, 0, &res); return res.a0; @@ -38,7 +38,7 @@ static uint64_t psci_affinity_info(uint64_t target_affinity, { struct arm_smccc_res res; - smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level, + do_smccc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level, 0, 0, 0, 0, 0, &res); return res.a0; @@ -48,7 +48,7 @@ static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) { struct arm_smccc_res res; - smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id, + do_smccc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id, 0, 0, 0, 0, 0, &res); return res.a0; @@ -58,7 +58,7 @@ static uint64_t psci_system_off2(uint64_t type, uint64_t cookie) { struct arm_smccc_res res; - smccc_hvc(PSCI_1_3_FN64_SYSTEM_OFF2, type, cookie, 0, 0, 0, 0, 0, &res); + do_smccc(PSCI_1_3_FN64_SYSTEM_OFF2, type, cookie, 0, 0, 0, 0, 0, &res); return res.a0; } @@ -67,7 +67,7 @@ static uint64_t psci_features(uint32_t func_id) { struct arm_smccc_res res; - smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res); + do_smccc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res); return res.a0; } @@ -89,12 +89,13 @@ static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source, vm = vm_create(2); - vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); + kvm_get_default_vcpu_target(vm, &init); init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2); *source = aarch64_vcpu_add(vm, 0, &init, guest_code); *target = aarch64_vcpu_add(vm, 1, &init, guest_code); + kvm_arch_vm_finalize_vcpus(vm); return vm; } diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 189321e96925..8ff1e853f7f8 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -15,8 +15,6 @@ #include "test_util.h" #include <linux/bitfield.h> -bool have_cap_arm_mte; - enum ftr_type { FTR_EXACT, /* Use a predefined safe value */ FTR_LOWER_SAFE, /* Smaller value is safe */ @@ -125,6 +123,13 @@ static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = { REG_FTR_END, }; +static const struct reg_ftr_bits ftr_id_aa64isar3_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FPRCVT, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, LSFE, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR3_EL1, FAMINMAX, 0), + REG_FTR_END, +}; + static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = { REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0), @@ -165,7 +170,9 @@ static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = { static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = { REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HCX, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TWED, 0), REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0), @@ -221,6 +228,7 @@ static struct test_feature_reg test_regs[] = { TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1), TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1), TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1), + TEST_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3_el1), TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1), TEST_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1_el1), TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1), @@ -239,6 +247,7 @@ static void guest_code(void) GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1); GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1); GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1); + GUEST_REG_SYNC(SYS_ID_AA64ISAR3_EL1); GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1); GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1); GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1); @@ -568,7 +577,9 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) uint64_t mte_frac; int idx, err; - if (!have_cap_arm_mte) { + val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); + mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val); + if (!mte) { ksft_test_result_skip("MTE capability not supported, nothing to test\n"); return; } @@ -593,9 +604,6 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu) * from unsupported (0xF) to supported (0). * */ - val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); - - mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val); mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val); if (mte != ID_AA64PFR1_EL1_MTE_MTE2 || mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) { @@ -750,28 +758,23 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu) ksft_test_result_pass("%s\n", __func__); } -void kvm_arch_vm_post_create(struct kvm_vm *vm) -{ - if (vm_check_cap(vm, KVM_CAP_ARM_MTE)) { - vm_enable_cap(vm, KVM_CAP_ARM_MTE, 0); - have_cap_arm_mte = true; - } -} - int main(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; bool aarch64_only; uint64_t val, el0; - int test_cnt; + int test_cnt, i, j; TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES)); TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_WRITABLE_IMP_ID_REGS)); + test_wants_mte(); + vm = vm_create(1); vm_enable_cap(vm, KVM_CAP_ARM_WRITABLE_IMP_ID_REGS, 0); vcpu = vm_vcpu_add(vm, 0, guest_code); + kvm_arch_vm_finalize_vcpus(vm); /* Check for AARCH64 only system */ val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); @@ -780,13 +783,10 @@ int main(void) ksft_print_header(); - test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) + - ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) + - ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) + - ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + - ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + - ARRAY_SIZE(ftr_id_aa64mmfr3_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) - - ARRAY_SIZE(test_regs) + 3 + MPAM_IDREG_TEST + MTE_IDREG_TEST; + test_cnt = 3 + MPAM_IDREG_TEST + MTE_IDREG_TEST; + for (i = 0; i < ARRAY_SIZE(test_regs); i++) + for (j = 0; test_regs[i].ftr_bits[j].type != FTR_END; j++) + test_cnt++; ksft_set_plan(test_cnt); diff --git a/tools/testing/selftests/kvm/arm64/smccc_filter.c b/tools/testing/selftests/kvm/arm64/smccc_filter.c index 2d189f3da228..1763b9d45400 100644 --- a/tools/testing/selftests/kvm/arm64/smccc_filter.c +++ b/tools/testing/selftests/kvm/arm64/smccc_filter.c @@ -22,8 +22,20 @@ enum smccc_conduit { SMC_INSN, }; +static bool test_runs_at_el2(void) +{ + struct kvm_vm *vm = vm_create(1); + struct kvm_vcpu_init init; + + kvm_get_default_vcpu_target(vm, &init); + kvm_vm_free(vm); + + return init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2); +} + #define for_each_conduit(conduit) \ - for (conduit = HVC_INSN; conduit <= SMC_INSN; conduit++) + for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \ + conduit <= SMC_INSN; conduit++) static void guest_main(uint32_t func_id, enum smccc_conduit conduit) { @@ -64,7 +76,7 @@ static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu) struct kvm_vm *vm; vm = vm_create(1); - vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); + kvm_get_default_vcpu_target(vm, &init); /* * Enable in-kernel emulation of PSCI to ensure that calls are denied @@ -73,6 +85,7 @@ static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu) init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2); *vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main); + kvm_arch_vm_finalize_vcpus(vm); return vm; } diff --git a/tools/testing/selftests/kvm/arm64/vgic_init.c b/tools/testing/selftests/kvm/arm64/vgic_init.c index a8e0f46bc0ab..8d6d3a4ae4db 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_init.c +++ b/tools/testing/selftests/kvm/arm64/vgic_init.c @@ -994,6 +994,8 @@ int main(int ac, char **av) int pa_bits; int cnt_impl = 0; + test_disable_default_vgic(); + pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits; max_phys_size = 1ULL << pa_bits; diff --git a/tools/testing/selftests/kvm/arm64/vgic_irq.c b/tools/testing/selftests/kvm/arm64/vgic_irq.c index a09dd423c2d7..6338f5bbdb70 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_irq.c +++ b/tools/testing/selftests/kvm/arm64/vgic_irq.c @@ -752,7 +752,6 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) vcpu_args_set(vcpu, 1, args_gva); gic_fd = vgic_v3_setup(vm, 1, nr_irqs); - __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping"); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handlers[args.eoi_split][args.level_sensitive]); @@ -802,6 +801,9 @@ int main(int argc, char **argv) int opt; bool eoi_split = false; + TEST_REQUIRE(kvm_supports_vgic_v3()); + test_disable_default_vgic(); + while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) { switch (opt) { case 'n': diff --git a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c index fc4fe52fb6f8..87922a89b134 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c +++ b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c @@ -27,7 +27,7 @@ static vm_paddr_t gpa_base; static struct kvm_vm *vm; static struct kvm_vcpu **vcpus; -static int gic_fd, its_fd; +static int its_fd; static struct test_data { bool request_vcpus_stop; @@ -214,9 +214,6 @@ static void setup_test_data(void) static void setup_gic(void) { - gic_fd = vgic_v3_setup(vm, test_data.nr_cpus, 64); - __TEST_REQUIRE(gic_fd >= 0, "Failed to create GICv3"); - its_fd = vgic_its_setup(vm); } @@ -355,7 +352,6 @@ static void setup_vm(void) static void destroy_vm(void) { close(its_fd); - close(gic_fd); kvm_vm_free(vm); free(vcpus); } @@ -374,6 +370,8 @@ int main(int argc, char **argv) u32 nr_threads; int c; + TEST_REQUIRE(kvm_supports_vgic_v3()); + while ((c = getopt(argc, argv, "hv:d:e:i:")) != -1) { switch (c) { case 'v': diff --git a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c index a0c4ab839155..ae36325c022f 100644 --- a/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c @@ -28,7 +28,6 @@ struct vpmu_vm { struct kvm_vm *vm; struct kvm_vcpu *vcpu; - int gic_fd; }; static struct vpmu_vm vpmu_vm; @@ -45,11 +44,6 @@ static uint64_t get_pmcr_n(uint64_t pmcr) return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); } -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) -{ - u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N); -} - static uint64_t get_counters_mask(uint64_t n) { uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); @@ -415,10 +409,6 @@ static void create_vpmu_vm(void *guest_code) .attr = KVM_ARM_VCPU_PMU_V3_IRQ, .addr = (uint64_t)&irq, }; - struct kvm_device_attr init_attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .attr = KVM_ARM_VCPU_PMU_V3_INIT, - }; /* The test creates the vpmu_vm multiple times. Ensure a clean state */ memset(&vpmu_vm, 0, sizeof(vpmu_vm)); @@ -431,13 +421,12 @@ static void create_vpmu_vm(void *guest_code) } /* Create vCPU with PMUv3 */ - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); + kvm_get_default_vcpu_target(vpmu_vm.vm, &init); init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code); vcpu_init_descriptor_tables(vpmu_vm.vcpu); - vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64); - __TEST_REQUIRE(vpmu_vm.gic_fd >= 0, - "Failed to create vgic-v3, skipping"); + + kvm_arch_vm_finalize_vcpus(vpmu_vm.vm); /* Make sure that PMUv3 support is indicated in the ID register */ dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1)); @@ -446,14 +435,11 @@ static void create_vpmu_vm(void *guest_code) pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); - /* Initialize vPMU */ vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); - vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr); } static void destroy_vpmu_vm(void) { - close(vpmu_vm.gic_fd); kvm_vm_free(vpmu_vm.vm); } @@ -475,33 +461,28 @@ static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) } } -static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) +static void test_create_vpmu_vm_with_nr_counters(unsigned int nr_counters, bool expect_fail) { struct kvm_vcpu *vcpu; - uint64_t pmcr, pmcr_orig; + unsigned int prev; + int ret; create_vpmu_vm(guest_code); vcpu = vpmu_vm.vcpu; - pmcr_orig = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); - pmcr = pmcr_orig; + prev = get_pmcr_n(vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0))); - /* - * Setting a larger value of PMCR.N should not modify the field, and - * return a success. - */ - set_pmcr_n(&pmcr, pmcr_n); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr); - pmcr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)); + ret = __vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PMU_V3_CTRL, + KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS, &nr_counters); if (expect_fail) - TEST_ASSERT(pmcr_orig == pmcr, - "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx", - pmcr, pmcr_n); + TEST_ASSERT(ret && errno == EINVAL, + "Setting more PMU counters (%u) than available (%u) unexpectedly succeeded", + nr_counters, prev); else - TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr), - "Failed to update PMCR.N to %lu (received: %lu)", - pmcr_n, get_pmcr_n(pmcr)); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret)); + + vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PMU_V3_CTRL, KVM_ARM_VCPU_PMU_V3_INIT, NULL); } /* @@ -516,11 +497,11 @@ static void run_access_test(uint64_t pmcr_n) pr_debug("Test with pmcr_n %lu\n", pmcr_n); - test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); + test_create_vpmu_vm_with_nr_counters(pmcr_n, false); vcpu = vpmu_vm.vcpu; /* Save the initial sp to restore them later to run the guest again */ - sp = vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1)); + sp = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1)); run_vcpu(vcpu, pmcr_n); @@ -528,11 +509,11 @@ static void run_access_test(uint64_t pmcr_n) * Reset and re-initialize the vCPU, and run the guest code again to * check if PMCR_EL0.N is preserved. */ - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); + kvm_get_default_vcpu_target(vpmu_vm.vm, &init); init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); aarch64_vcpu_setup(vcpu, &init); vcpu_init_descriptor_tables(vcpu); - vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp); vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); run_vcpu(vcpu, pmcr_n); @@ -557,7 +538,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n) uint64_t set_reg_id, clr_reg_id, reg_val; uint64_t valid_counters_mask, max_counters_mask; - test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); + test_create_vpmu_vm_with_nr_counters(pmcr_n, false); vcpu = vpmu_vm.vcpu; valid_counters_mask = get_counters_mask(pmcr_n); @@ -611,7 +592,7 @@ static void run_error_test(uint64_t pmcr_n) { pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n); - test_create_vpmu_vm_with_pmcr_n(pmcr_n, true); + test_create_vpmu_vm_with_nr_counters(pmcr_n, true); destroy_vpmu_vm(); } @@ -629,11 +610,25 @@ static uint64_t get_pmcr_n_limit(void) return get_pmcr_n(pmcr); } +static bool kvm_supports_nr_counters_attr(void) +{ + bool supported; + + create_vpmu_vm(NULL); + supported = !__vcpu_has_device_attr(vpmu_vm.vcpu, KVM_ARM_VCPU_PMU_V3_CTRL, + KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS); + destroy_vpmu_vm(); + + return supported; +} + int main(void) { uint64_t i, pmcr_n; TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); + TEST_REQUIRE(kvm_supports_vgic_v3()); + TEST_REQUIRE(kvm_supports_nr_counters_attr()); pmcr_n = get_pmcr_n_limit(); for (i = 0; i <= pmcr_n; i++) { diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index e79817bd0e29..0a1ea1d1e2d8 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -20,38 +20,6 @@ #include "guest_modes.h" #include "ucall_common.h" -#ifdef __aarch64__ -#include "arm64/vgic.h" - -static int gic_fd; - -static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) -{ - /* - * The test can still run even if hardware does not support GICv3, as it - * is only an optimization to reduce guest exits. - */ - gic_fd = vgic_v3_setup(vm, nr_vcpus, 64); -} - -static void arch_cleanup_vm(struct kvm_vm *vm) -{ - if (gic_fd > 0) - close(gic_fd); -} - -#else /* __aarch64__ */ - -static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus) -{ -} - -static void arch_cleanup_vm(struct kvm_vm *vm) -{ -} - -#endif - /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/ #define TEST_HOST_LOOP_N 2UL @@ -166,8 +134,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, dirty_log_manual_caps); - arch_setup_vm(vm, nr_vcpus); - /* Start the iterations */ iteration = 0; host_quit = false; @@ -285,7 +251,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) } memstress_free_bitmaps(bitmaps, p->slots); - arch_cleanup_vm(vm); memstress_destroy_vm(vm); } diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 23593d9eeba9..d58a641b0e6a 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -585,6 +585,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu, log_mode_create_vm_done(vm); *vcpu = vm_vcpu_add(vm, 0, guest_code); + kvm_arch_vm_finalize_vcpus(vm); return vm; } diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c index 91f05f78e824..f4644c9d2d3b 100644 --- a/tools/testing/selftests/kvm/get-reg-list.c +++ b/tools/testing/selftests/kvm/get-reg-list.c @@ -116,10 +116,13 @@ void __weak finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) } #ifdef __aarch64__ -static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init) +static void prepare_vcpu_init(struct kvm_vm *vm, struct vcpu_reg_list *c, + struct kvm_vcpu_init *init) { struct vcpu_reg_sublist *s; + vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, init); + for_each_sublist(c, s) if (s->capability) init->features[s->feature / 32] |= 1 << (s->feature % 32); @@ -127,10 +130,10 @@ static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *ini static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm) { - struct kvm_vcpu_init init = { .target = -1, }; + struct kvm_vcpu_init init; struct kvm_vcpu *vcpu; - prepare_vcpu_init(c, &init); + prepare_vcpu_init(vm, c, &init); vcpu = __vm_vcpu_add(vm, 0); aarch64_vcpu_setup(vcpu, &init); diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index ce687f8d248f..b3ca6737f304 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -13,12 +13,16 @@ #include <linux/bitmap.h> #include <linux/falloc.h> +#include <linux/sizes.h> +#include <setjmp.h> +#include <signal.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/stat.h> #include "kvm_util.h" #include "test_util.h" +#include "ucall_common.h" static void test_file_read_write(int fd) { @@ -34,12 +38,83 @@ static void test_file_read_write(int fd) "pwrite on a guest_mem fd should fail"); } -static void test_mmap(int fd, size_t page_size) +static void test_mmap_supported(int fd, size_t page_size, size_t total_size) +{ + const char val = 0xaa; + char *mem; + size_t i; + int ret; + + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + TEST_ASSERT(mem == MAP_FAILED, "Copy-on-write not allowed by guest_memfd."); + + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed."); + + memset(mem, val, total_size); + for (i = 0; i < total_size; i++) + TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); + + ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, + page_size); + TEST_ASSERT(!ret, "fallocate the first page should succeed."); + + for (i = 0; i < page_size; i++) + TEST_ASSERT_EQ(READ_ONCE(mem[i]), 0x00); + for (; i < total_size; i++) + TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); + + memset(mem, val, page_size); + for (i = 0; i < total_size; i++) + TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); + + ret = munmap(mem, total_size); + TEST_ASSERT(!ret, "munmap() should succeed."); +} + +static sigjmp_buf jmpbuf; +void fault_sigbus_handler(int signum) +{ + siglongjmp(jmpbuf, 1); +} + +static void test_fault_overflow(int fd, size_t page_size, size_t total_size) +{ + struct sigaction sa_old, sa_new = { + .sa_handler = fault_sigbus_handler, + }; + size_t map_size = total_size * 4; + const char val = 0xaa; + char *mem; + size_t i; + int ret; + + mem = mmap(NULL, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should succeed."); + + sigaction(SIGBUS, &sa_new, &sa_old); + if (sigsetjmp(jmpbuf, 1) == 0) { + memset(mem, 0xaa, map_size); + TEST_ASSERT(false, "memset() should have triggered SIGBUS."); + } + sigaction(SIGBUS, &sa_old, NULL); + + for (i = 0; i < total_size; i++) + TEST_ASSERT_EQ(READ_ONCE(mem[i]), val); + + ret = munmap(mem, map_size); + TEST_ASSERT(!ret, "munmap() should succeed."); +} + +static void test_mmap_not_supported(int fd, size_t page_size, size_t total_size) { char *mem; mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); TEST_ASSERT_EQ(mem, MAP_FAILED); + + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT_EQ(mem, MAP_FAILED); } static void test_file_size(int fd, size_t page_size, size_t total_size) @@ -120,80 +195,187 @@ static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size) } } -static void test_create_guest_memfd_invalid(struct kvm_vm *vm) +static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm, + uint64_t guest_memfd_flags, + size_t page_size) { - size_t page_size = getpagesize(); - uint64_t flag; size_t size; int fd; for (size = 1; size < page_size; size++) { - fd = __vm_create_guest_memfd(vm, size, 0); - TEST_ASSERT(fd == -1 && errno == EINVAL, + fd = __vm_create_guest_memfd(vm, size, guest_memfd_flags); + TEST_ASSERT(fd < 0 && errno == EINVAL, "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL", size); } - - for (flag = BIT(0); flag; flag <<= 1) { - fd = __vm_create_guest_memfd(vm, page_size, flag); - TEST_ASSERT(fd == -1 && errno == EINVAL, - "guest_memfd() with flag '0x%lx' should fail with EINVAL", - flag); - } } static void test_create_guest_memfd_multiple(struct kvm_vm *vm) { int fd1, fd2, ret; struct stat st1, st2; + size_t page_size = getpagesize(); - fd1 = __vm_create_guest_memfd(vm, 4096, 0); + fd1 = __vm_create_guest_memfd(vm, page_size, 0); TEST_ASSERT(fd1 != -1, "memfd creation should succeed"); ret = fstat(fd1, &st1); TEST_ASSERT(ret != -1, "memfd fstat should succeed"); - TEST_ASSERT(st1.st_size == 4096, "memfd st_size should match requested size"); + TEST_ASSERT(st1.st_size == page_size, "memfd st_size should match requested size"); - fd2 = __vm_create_guest_memfd(vm, 8192, 0); + fd2 = __vm_create_guest_memfd(vm, page_size * 2, 0); TEST_ASSERT(fd2 != -1, "memfd creation should succeed"); ret = fstat(fd2, &st2); TEST_ASSERT(ret != -1, "memfd fstat should succeed"); - TEST_ASSERT(st2.st_size == 8192, "second memfd st_size should match requested size"); + TEST_ASSERT(st2.st_size == page_size * 2, "second memfd st_size should match requested size"); ret = fstat(fd1, &st1); TEST_ASSERT(ret != -1, "memfd fstat should succeed"); - TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size"); + TEST_ASSERT(st1.st_size == page_size, "first memfd st_size should still match requested size"); TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers"); close(fd2); close(fd1); } -int main(int argc, char *argv[]) +static void test_guest_memfd_flags(struct kvm_vm *vm, uint64_t valid_flags) { - size_t page_size; - size_t total_size; + size_t page_size = getpagesize(); + uint64_t flag; int fd; - struct kvm_vm *vm; - TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); + for (flag = BIT(0); flag; flag <<= 1) { + fd = __vm_create_guest_memfd(vm, page_size, flag); + if (flag & valid_flags) { + TEST_ASSERT(fd >= 0, + "guest_memfd() with flag '0x%lx' should succeed", + flag); + close(fd); + } else { + TEST_ASSERT(fd < 0 && errno == EINVAL, + "guest_memfd() with flag '0x%lx' should fail with EINVAL", + flag); + } + } +} + +static void test_guest_memfd(unsigned long vm_type) +{ + uint64_t flags = 0; + struct kvm_vm *vm; + size_t total_size; + size_t page_size; + int fd; page_size = getpagesize(); total_size = page_size * 4; - vm = vm_create_barebones(); + vm = vm_create_barebones_type(vm_type); + + if (vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP)) + flags |= GUEST_MEMFD_FLAG_MMAP; - test_create_guest_memfd_invalid(vm); test_create_guest_memfd_multiple(vm); + test_create_guest_memfd_invalid_sizes(vm, flags, page_size); - fd = vm_create_guest_memfd(vm, total_size, 0); + fd = vm_create_guest_memfd(vm, total_size, flags); test_file_read_write(fd); - test_mmap(fd, page_size); + + if (flags & GUEST_MEMFD_FLAG_MMAP) { + test_mmap_supported(fd, page_size, total_size); + test_fault_overflow(fd, page_size, total_size); + } else { + test_mmap_not_supported(fd, page_size, total_size); + } + test_file_size(fd, page_size, total_size); test_fallocate(fd, page_size, total_size); test_invalid_punch_hole(fd, page_size, total_size); + test_guest_memfd_flags(vm, flags); + close(fd); + kvm_vm_free(vm); +} + +static void guest_code(uint8_t *mem, uint64_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + __GUEST_ASSERT(mem[i] == 0xaa, + "Guest expected 0xaa at offset %lu, got 0x%x", i, mem[i]); + + memset(mem, 0xff, size); + GUEST_DONE(); +} + +static void test_guest_memfd_guest(void) +{ + /* + * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back + * the guest's code, stack, and page tables, and low memory contains + * the PCI hole and other MMIO regions that need to be avoided. + */ + const uint64_t gpa = SZ_4G; + const int slot = 1; + + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + uint8_t *mem; + size_t size; + int fd, i; + + if (!kvm_has_cap(KVM_CAP_GUEST_MEMFD_MMAP)) + return; + + vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, guest_code); + + TEST_ASSERT(vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_MMAP), + "Default VM type should always support guest_memfd mmap()"); + + size = vm->page_size; + fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP); + vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, NULL, fd, 0); + + mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); + memset(mem, 0xaa, size); + munmap(mem, size); + + virt_pg_map(vm, gpa, gpa); + vcpu_args_set(vcpu, 2, gpa, size); + vcpu_run(vcpu); + + TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); + + mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap() on guest_memfd failed"); + for (i = 0; i < size; i++) + TEST_ASSERT_EQ(mem[i], 0xff); + + close(fd); + kvm_vm_free(vm); +} + +int main(int argc, char *argv[]) +{ + unsigned long vm_types, vm_type; + + TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD)); + + /* + * Not all architectures support KVM_CAP_VM_TYPES. However, those that + * support guest_memfd have that support for the default VM type. + */ + vm_types = kvm_check_cap(KVM_CAP_VM_TYPES); + if (!vm_types) + vm_types = BIT(VM_TYPE_DEFAULT); + + for_each_set_bit(vm_type, &vm_types, BITS_PER_TYPE(vm_types)) + test_guest_memfd(vm_type); + + test_guest_memfd_guest(); } diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h index bf461de34785..e2c4e9f0010f 100644 --- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h +++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h @@ -155,4 +155,28 @@ static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec) timer_set_tval(timer, msec_to_cycles(msec)); } +static inline u32 vcpu_get_vtimer_irq(struct kvm_vcpu *vcpu) +{ + u32 intid; + u64 attr; + + attr = vcpu_has_el2(vcpu) ? KVM_ARM_VCPU_TIMER_IRQ_HVTIMER : + KVM_ARM_VCPU_TIMER_IRQ_VTIMER; + vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, attr, &intid); + + return intid; +} + +static inline u32 vcpu_get_ptimer_irq(struct kvm_vcpu *vcpu) +{ + u32 intid; + u64 attr; + + attr = vcpu_has_el2(vcpu) ? KVM_ARM_VCPU_TIMER_IRQ_HPTIMER : + KVM_ARM_VCPU_TIMER_IRQ_PTIMER; + vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL, attr, &intid); + + return intid; +} + #endif /* SELFTEST_KVM_ARCH_TIMER_H */ diff --git a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h index e43a57d99b56..b973bb2c64a6 100644 --- a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h +++ b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h @@ -2,6 +2,9 @@ #ifndef SELFTEST_KVM_UTIL_ARCH_H #define SELFTEST_KVM_UTIL_ARCH_H -struct kvm_vm_arch {}; +struct kvm_vm_arch { + bool has_gic; + int gic_fd; +}; #endif // SELFTEST_KVM_UTIL_ARCH_H diff --git a/tools/testing/selftests/kvm/include/arm64/processor.h b/tools/testing/selftests/kvm/include/arm64/processor.h index 255fed769a8a..6f481475c135 100644 --- a/tools/testing/selftests/kvm/include/arm64/processor.h +++ b/tools/testing/selftests/kvm/include/arm64/processor.h @@ -175,6 +175,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, handler_fn handler); +uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level); uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); static inline void cpu_relax(void) @@ -300,4 +301,77 @@ void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, /* Execute a Wait For Interrupt instruction. */ void wfi(void); +void test_wants_mte(void); +void test_disable_default_vgic(void); + +bool vm_supports_el2(struct kvm_vm *vm); +static bool vcpu_has_el2(struct kvm_vcpu *vcpu) +{ + return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2); +} + +#define MAPPED_EL2_SYSREG(el2, el1) \ + case SYS_##el1: \ + if (vcpu_has_el2(vcpu)) \ + alias = SYS_##el2; \ + break + + +static __always_inline u64 ctxt_reg_alias(struct kvm_vcpu *vcpu, u32 encoding) +{ + u32 alias = encoding; + + BUILD_BUG_ON(!__builtin_constant_p(encoding)); + + switch (encoding) { + MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1); + MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1); + MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1); + MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1); + MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1); + MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1); + MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1); + MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1); + MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1); + MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1); + MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1); + MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1); + MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1); + MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1); + MAPPED_EL2_SYSREG(POR_EL2, POR_EL1); + MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1); + MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1); + MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1); + MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1); + MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1); + MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1); + MAPPED_EL2_SYSREG(CNTHCTL_EL2, CNTKCTL_EL1); + case SYS_SP_EL1: + if (!vcpu_has_el2(vcpu)) + return ARM64_CORE_REG(sp_el1); + + alias = SYS_SP_EL2; + break; + default: + BUILD_BUG(); + } + + return KVM_ARM64_SYS_REG(alias); +} + +void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init); + +static inline unsigned int get_current_el(void) +{ + return (read_sysreg(CurrentEL) >> 2) & 0x3; +} + +#define do_smccc(...) \ +do { \ + if (get_current_el() == 2) \ + smccc_smc(__VA_ARGS__); \ + else \ + smccc_hvc(__VA_ARGS__); \ +} while (0) + #endif /* SELFTEST_KVM_PROCESSOR_H */ diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h index c481d0c00a5d..688beccc9436 100644 --- a/tools/testing/selftests/kvm/include/arm64/vgic.h +++ b/tools/testing/selftests/kvm/include/arm64/vgic.h @@ -16,6 +16,9 @@ ((uint64_t)(flags) << 12) | \ index) +bool kvm_supports_vgic_v3(void); +int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +void __vgic_v3_init(int fd); int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); #define VGIC_MAX_RESERVED 1023 diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 23a506d7eca3..26cc30290e76 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -64,6 +64,9 @@ struct kvm_vcpu { #ifdef __x86_64__ struct kvm_cpuid2 *cpuid; #endif +#ifdef __aarch64__ + struct kvm_vcpu_init init; +#endif struct kvm_binary_stats stats; struct kvm_dirty_gfn *dirty_gfns; uint32_t fetch_index; @@ -260,13 +263,18 @@ int __open_path_or_exit(const char *path, int flags, const char *enoent_help); int open_path_or_exit(const char *path, int flags); int open_kvm_dev_path_or_exit(void); -bool get_kvm_param_bool(const char *param); -bool get_kvm_intel_param_bool(const char *param); -bool get_kvm_amd_param_bool(const char *param); +int kvm_get_module_param_integer(const char *module_name, const char *param); +bool kvm_get_module_param_bool(const char *module_name, const char *param); + +static inline bool get_kvm_param_bool(const char *param) +{ + return kvm_get_module_param_bool("kvm", param); +} -int get_kvm_param_integer(const char *param); -int get_kvm_intel_param_integer(const char *param); -int get_kvm_amd_param_integer(const char *param); +static inline int get_kvm_param_integer(const char *param) +{ + return kvm_get_module_param_integer("kvm", param); +} unsigned int kvm_check_cap(long cap); @@ -1257,7 +1265,9 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) */ void kvm_selftest_arch_init(void); -void kvm_arch_vm_post_create(struct kvm_vm *vm); +void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus); +void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm); +void kvm_arch_vm_release(struct kvm_vm *vm); bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr); diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h index 162f303d9daa..e58282488beb 100644 --- a/tools/testing/selftests/kvm/include/riscv/processor.h +++ b/tools/testing/selftests/kvm/include/riscv/processor.h @@ -9,6 +9,7 @@ #include <linux/stringify.h> #include <asm/csr.h> +#include <asm/vdso/processor.h> #include "kvm_util.h" #define INSN_OPCODE_MASK 0x007c diff --git a/tools/testing/selftests/kvm/include/x86/pmu.h b/tools/testing/selftests/kvm/include/x86/pmu.h index 3c10c4dc0ae8..72575eadb63a 100644 --- a/tools/testing/selftests/kvm/include/x86/pmu.h +++ b/tools/testing/selftests/kvm/include/x86/pmu.h @@ -5,8 +5,11 @@ #ifndef SELFTEST_KVM_PMU_H #define SELFTEST_KVM_PMU_H +#include <stdbool.h> #include <stdint.h> +#include <linux/bits.h> + #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 /* @@ -61,6 +64,11 @@ #define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00) #define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00) #define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01) +#define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02) +#define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00) +#define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01) +#define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02) +#define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01) #define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00) #define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00) @@ -80,6 +88,11 @@ enum intel_pmu_architectural_events { INTEL_ARCH_BRANCHES_RETIRED_INDEX, INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX, INTEL_ARCH_TOPDOWN_SLOTS_INDEX, + INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX, + INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX, + INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX, + INTEL_ARCH_TOPDOWN_RETIRING_INDEX, + INTEL_ARCH_LBR_INSERTS_INDEX, NR_INTEL_ARCH_EVENTS, }; @@ -94,4 +107,17 @@ enum amd_pmu_zen_events { extern const uint64_t intel_pmu_arch_events[]; extern const uint64_t amd_pmu_zen_events[]; +enum pmu_errata { + INSTRUCTIONS_RETIRED_OVERCOUNT, + BRANCHES_RETIRED_OVERCOUNT, +}; +extern uint64_t pmu_errata_mask; + +void kvm_init_pmu_errata(void); + +static inline bool this_pmu_has_errata(enum pmu_errata errata) +{ + return pmu_errata_mask & BIT_ULL(errata); +} + #endif /* SELFTEST_KVM_PMU_H */ diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h index 2efb05c2f2fb..fbe875eafca5 100644 --- a/tools/testing/selftests/kvm/include/x86/processor.h +++ b/tools/testing/selftests/kvm/include/x86/processor.h @@ -34,6 +34,8 @@ extern uint64_t guest_tsc_khz; #define NMI_VECTOR 0x02 +const char *ex_str(int vector); + #define X86_EFLAGS_FIXED (1u << 1) #define X86_CR4_VME (1ul << 0) @@ -265,7 +267,7 @@ struct kvm_x86_cpu_property { #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15) #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23) #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31) -#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7) +#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12) #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31) #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4) #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12) @@ -332,6 +334,11 @@ struct kvm_x86_pmu_feature { #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5) #define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6) #define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7) +#define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8) +#define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9) +#define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10) +#define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11) +#define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12) #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0) #define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1) @@ -1179,6 +1186,12 @@ struct idt_entry { void vm_install_exception_handler(struct kvm_vm *vm, int vector, void (*handler)(struct ex_regs *)); +/* + * Exception fixup morphs #DE to an arbitrary magic vector so that '0' can be + * used to signal "no expcetion". + */ +#define KVM_MAGIC_DE_VECTOR 0xff + /* If a toddler were to say "abracadabra". */ #define KVM_EXCEPTION_MAGIC 0xabacadabaULL @@ -1314,6 +1327,26 @@ static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) bool kvm_is_tdp_enabled(void); +static inline bool get_kvm_intel_param_bool(const char *param) +{ + return kvm_get_module_param_bool("kvm_intel", param); +} + +static inline bool get_kvm_amd_param_bool(const char *param) +{ + return kvm_get_module_param_bool("kvm_amd", param); +} + +static inline int get_kvm_intel_param_integer(const char *param) +{ + return kvm_get_module_param_integer("kvm_intel", param); +} + +static inline int get_kvm_amd_param_integer(const char *param) +{ + return kvm_get_module_param_integer("kvm_amd", param); +} + static inline bool kvm_is_pmu_enabled(void) { return get_kvm_param_bool("enable_pmu"); diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c index eb115123d741..369a4c87dd8f 100644 --- a/tools/testing/selftests/kvm/lib/arm64/processor.c +++ b/tools/testing/selftests/kvm/lib/arm64/processor.c @@ -12,6 +12,7 @@ #include "kvm_util.h" #include "processor.h" #include "ucall_common.h" +#include "vgic.h" #include <linux/bitfield.h> #include <linux/sizes.h> @@ -185,7 +186,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) _virt_pg_map(vm, vaddr, paddr, attr_idx); } -uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) +uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level) { uint64_t *ptep; @@ -195,17 +196,23 @@ uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; if (!ptep) goto unmapped_gva; + if (level == 0) + return ptep; switch (vm->pgtable_levels) { case 4: ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; if (!ptep) goto unmapped_gva; + if (level == 1) + break; /* fall through */ case 3: ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; if (!ptep) goto unmapped_gva; + if (level == 2) + break; /* fall through */ case 2: ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; @@ -223,6 +230,11 @@ unmapped_gva: exit(EXIT_FAILURE); } +uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) +{ + return virt_get_pte_hva_at_level(vm, gva, 3); +} + vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) { uint64_t *ptep = virt_get_pte_hva(vm, gva); @@ -266,31 +278,49 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) } } +bool vm_supports_el2(struct kvm_vm *vm) +{ + const char *value = getenv("NV"); + + if (value && *value == '0') + return false; + + return vm_check_cap(vm, KVM_CAP_ARM_EL2) && vm->arch.has_gic; +} + +void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init) +{ + struct kvm_vcpu_init preferred = {}; + + vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred); + if (vm_supports_el2(vm)) + preferred.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2); + + *init = preferred; +} + void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) { struct kvm_vcpu_init default_init = { .target = -1, }; struct kvm_vm *vm = vcpu->vm; uint64_t sctlr_el1, tcr_el1, ttbr0_el1; - if (!init) + if (!init) { + kvm_get_default_vcpu_target(vm, &default_init); init = &default_init; - - if (init->target == -1) { - struct kvm_vcpu_init preferred; - vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred); - init->target = preferred.target; } vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init); + vcpu->init = *init; /* * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 * registers, which the variable argument list macros do. */ - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_CPACR_EL1), 3 << 20); - sctlr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1)); - tcr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1)); + sctlr_el1 = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SCTLR_EL1)); + tcr_el1 = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TCR_EL1)); /* Configure base granule size */ switch (vm->mode) { @@ -357,11 +387,17 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) if (use_lpa2_pte_format(vm)) tcr_el1 |= TCR_DS; - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SCTLR_EL1), sctlr_el1); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TCR_EL1), tcr_el1); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_MAIR_EL1), DEFAULT_MAIR_EL1); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_TTBR0_EL1), ttbr0_el1); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id); + + if (!vcpu_has_el2(vcpu)) + return; + + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_HCR_EL2), + HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H); } void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) @@ -395,7 +431,7 @@ static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, aarch64_vcpu_setup(vcpu, init); - vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size); return vcpu; } @@ -465,7 +501,7 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) { extern char vectors; - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); + vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors); } void route_exception(struct ex_regs *regs, int vector) @@ -653,3 +689,39 @@ void wfi(void) { asm volatile("wfi"); } + +static bool request_mte; +static bool request_vgic = true; + +void test_wants_mte(void) +{ + request_mte = true; +} + +void test_disable_default_vgic(void) +{ + request_vgic = false; +} + +void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus) +{ + if (request_mte && vm_check_cap(vm, KVM_CAP_ARM_MTE)) + vm_enable_cap(vm, KVM_CAP_ARM_MTE, 0); + + if (request_vgic && kvm_supports_vgic_v3()) { + vm->arch.gic_fd = __vgic_v3_setup(vm, nr_vcpus, 64); + vm->arch.has_gic = true; + } +} + +void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm) +{ + if (vm->arch.has_gic) + __vgic_v3_init(vm->arch.gic_fd); +} + +void kvm_arch_vm_release(struct kvm_vm *vm) +{ + if (vm->arch.has_gic) + close(vm->arch.gic_fd); +} diff --git a/tools/testing/selftests/kvm/lib/arm64/vgic.c b/tools/testing/selftests/kvm/lib/arm64/vgic.c index 4427f43f73ea..d0f7bd0984b8 100644 --- a/tools/testing/selftests/kvm/lib/arm64/vgic.c +++ b/tools/testing/selftests/kvm/lib/arm64/vgic.c @@ -15,6 +15,17 @@ #include "gic.h" #include "gic_v3.h" +bool kvm_supports_vgic_v3(void) +{ + struct kvm_vm *vm = vm_create_barebones(); + int r; + + r = __kvm_test_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3); + kvm_vm_free(vm); + + return !r; +} + /* * vGIC-v3 default host setup * @@ -30,24 +41,11 @@ * redistributor regions of the guest. Since it depends on the number of * vCPUs for the VM, it must be called after all the vCPUs have been created. */ -int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) +int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) { int gic_fd; uint64_t attr; - struct list_head *iter; - unsigned int nr_gic_pages, nr_vcpus_created = 0; - - TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty"); - - /* - * Make sure that the caller is infact calling this - * function after all the vCPUs are added. - */ - list_for_each(iter, &vm->vcpus) - nr_vcpus_created++; - TEST_ASSERT(nr_vcpus == nr_vcpus_created, - "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)", - nr_vcpus, nr_vcpus_created); + unsigned int nr_gic_pages; /* Distributor setup */ gic_fd = __kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3); @@ -56,9 +54,6 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, &nr_irqs); - kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, - KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); - attr = GICD_BASE_GPA; kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_DIST, &attr); @@ -73,10 +68,39 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) KVM_VGIC_V3_REDIST_SIZE * nr_vcpus); virt_map(vm, GICR_BASE_GPA, GICR_BASE_GPA, nr_gic_pages); - kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + return gic_fd; +} + +void __vgic_v3_init(int fd) +{ + kvm_device_attr_set(fd, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); +} - return gic_fd; +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) +{ + unsigned int nr_vcpus_created = 0; + struct list_head *iter; + int fd; + + TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty"); + + /* + * Make sure that the caller is infact calling this + * function after all the vCPUs are added. + */ + list_for_each(iter, &vm->vcpus) + nr_vcpus_created++; + TEST_ASSERT(nr_vcpus == nr_vcpus_created, + "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)", + nr_vcpus, nr_vcpus_created); + + fd = __vgic_v3_setup(vm, nr_vcpus, nr_irqs); + if (fd < 0) + return fd; + + __vgic_v3_init(fd); + return fd; } /* should only work for level sensitive interrupts */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index c3f5142b0a54..6743fbd9bd67 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -24,7 +24,7 @@ uint32_t guest_random_seed; struct guest_random_state guest_rng; static uint32_t last_guest_seed; -static int vcpu_mmap_sz(void); +static size_t vcpu_mmap_sz(void); int __open_path_or_exit(const char *path, int flags, const char *enoent_help) { @@ -95,7 +95,7 @@ static ssize_t get_module_param(const char *module_name, const char *param, return bytes_read; } -static int get_module_param_integer(const char *module_name, const char *param) +int kvm_get_module_param_integer(const char *module_name, const char *param) { /* * 16 bytes to hold a 64-bit value (1 byte per char), 1 byte for the @@ -119,7 +119,7 @@ static int get_module_param_integer(const char *module_name, const char *param) return atoi_paranoid(value); } -static bool get_module_param_bool(const char *module_name, const char *param) +bool kvm_get_module_param_bool(const char *module_name, const char *param) { char value; ssize_t r; @@ -135,36 +135,6 @@ static bool get_module_param_bool(const char *module_name, const char *param) TEST_FAIL("Unrecognized value '%c' for boolean module param", value); } -bool get_kvm_param_bool(const char *param) -{ - return get_module_param_bool("kvm", param); -} - -bool get_kvm_intel_param_bool(const char *param) -{ - return get_module_param_bool("kvm_intel", param); -} - -bool get_kvm_amd_param_bool(const char *param) -{ - return get_module_param_bool("kvm_amd", param); -} - -int get_kvm_param_integer(const char *param) -{ - return get_module_param_integer("kvm", param); -} - -int get_kvm_intel_param_integer(const char *param) -{ - return get_module_param_integer("kvm_intel", param); -} - -int get_kvm_amd_param_integer(const char *param) -{ - return get_module_param_integer("kvm_amd", param); -} - /* * Capability * @@ -517,7 +487,7 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus, guest_rng = new_guest_random_state(guest_random_seed); sync_global_to_guest(vm, guest_rng); - kvm_arch_vm_post_create(vm); + kvm_arch_vm_post_create(vm, nr_runnable_vcpus); return vm; } @@ -555,6 +525,7 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus, for (i = 0; i < nr_vcpus; ++i) vcpus[i] = vm_vcpu_add(vm, i, guest_code); + kvm_arch_vm_finalize_vcpus(vm); return vm; } @@ -805,6 +776,8 @@ void kvm_vm_release(struct kvm_vm *vmp) /* Free cached stats metadata and close FD */ kvm_stats_release(&vmp->stats); + + kvm_arch_vm_release(vmp); } static void __vm_mem_region_delete(struct kvm_vm *vm, @@ -1321,14 +1294,14 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, } /* Returns the size of a vCPU's kvm_run structure. */ -static int vcpu_mmap_sz(void) +static size_t vcpu_mmap_sz(void) { int dev_fd, ret; dev_fd = open_kvm_dev_path_or_exit(); ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); - TEST_ASSERT(ret >= sizeof(struct kvm_run), + TEST_ASSERT(ret >= 0 && ret >= sizeof(struct kvm_run), KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret)); close(dev_fd); @@ -1369,7 +1342,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm); TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " - "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", + "smaller than expected, vcpu_mmap_sz: %zi expected_min: %zi", vcpu_mmap_sz(), sizeof(*vcpu->run)); vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); @@ -2330,7 +2303,15 @@ void kvm_get_stat(struct kvm_binary_stats *stats, const char *name, TEST_FAIL("Unable to find stat '%s'", name); } -__weak void kvm_arch_vm_post_create(struct kvm_vm *vm) +__weak void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus) +{ +} + +__weak void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm) +{ +} + +__weak void kvm_arch_vm_release(struct kvm_vm *vm) { } diff --git a/tools/testing/selftests/kvm/lib/x86/pmu.c b/tools/testing/selftests/kvm/lib/x86/pmu.c index f31f0427c17c..34cb57d1d671 100644 --- a/tools/testing/selftests/kvm/lib/x86/pmu.c +++ b/tools/testing/selftests/kvm/lib/x86/pmu.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include "kvm_util.h" +#include "processor.h" #include "pmu.h" const uint64_t intel_pmu_arch_events[] = { @@ -19,6 +20,11 @@ const uint64_t intel_pmu_arch_events[] = { INTEL_ARCH_BRANCHES_RETIRED, INTEL_ARCH_BRANCHES_MISPREDICTED, INTEL_ARCH_TOPDOWN_SLOTS, + INTEL_ARCH_TOPDOWN_BE_BOUND, + INTEL_ARCH_TOPDOWN_BAD_SPEC, + INTEL_ARCH_TOPDOWN_FE_BOUND, + INTEL_ARCH_TOPDOWN_RETIRING, + INTEL_ARCH_LBR_INSERTS, }; kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); @@ -29,3 +35,46 @@ const uint64_t amd_pmu_zen_events[] = { AMD_ZEN_BRANCHES_MISPREDICTED, }; kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS); + +/* + * For Intel Atom CPUs, the PMU events "Instruction Retired" or + * "Branch Instruction Retired" may be overcounted for some certain + * instructions, like FAR CALL/JMP, RETF, IRET, VMENTRY/VMEXIT/VMPTRLD + * and complex SGX/SMX/CSTATE instructions/flows. + * + * The detailed information can be found in the errata (section SRF7): + * https://edc.intel.com/content/www/us/en/design/products-and-solutions/processors-and-chipsets/sierra-forest/xeon-6700-series-processor-with-e-cores-specification-update/errata-details/ + * + * For the Atom platforms before Sierra Forest (including Sierra Forest), + * Both 2 events "Instruction Retired" and "Branch Instruction Retired" would + * be overcounted on these certain instructions, but for Clearwater Forest + * only "Instruction Retired" event is overcounted on these instructions. + */ +static uint64_t get_pmu_errata(void) +{ + if (!this_cpu_is_intel()) + return 0; + + if (this_cpu_family() != 0x6) + return 0; + + switch (this_cpu_model()) { + case 0xDD: /* Clearwater Forest */ + return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT); + case 0xAF: /* Sierra Forest */ + case 0x4D: /* Avaton, Rangely */ + case 0x5F: /* Denverton */ + case 0x86: /* Jacobsville */ + return BIT_ULL(INSTRUCTIONS_RETIRED_OVERCOUNT) | + BIT_ULL(BRANCHES_RETIRED_OVERCOUNT); + default: + return 0; + } +} + +uint64_t pmu_errata_mask; + +void kvm_init_pmu_errata(void) +{ + pmu_errata_mask = get_pmu_errata(); +} diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c index d4c19ac885a9..c748cd9b2eef 100644 --- a/tools/testing/selftests/kvm/lib/x86/processor.c +++ b/tools/testing/selftests/kvm/lib/x86/processor.c @@ -6,6 +6,7 @@ #include "linux/bitmap.h" #include "test_util.h" #include "kvm_util.h" +#include "pmu.h" #include "processor.h" #include "sev.h" @@ -23,6 +24,39 @@ bool host_cpu_is_intel; bool is_forced_emulation_enabled; uint64_t guest_tsc_khz; +const char *ex_str(int vector) +{ + switch (vector) { +#define VEC_STR(v) case v##_VECTOR: return "#" #v + case DE_VECTOR: return "no exception"; + case KVM_MAGIC_DE_VECTOR: return "#DE"; + VEC_STR(DB); + VEC_STR(NMI); + VEC_STR(BP); + VEC_STR(OF); + VEC_STR(BR); + VEC_STR(UD); + VEC_STR(NM); + VEC_STR(DF); + VEC_STR(TS); + VEC_STR(NP); + VEC_STR(SS); + VEC_STR(GP); + VEC_STR(PF); + VEC_STR(MF); + VEC_STR(AC); + VEC_STR(MC); + VEC_STR(XM); + VEC_STR(VE); + VEC_STR(CP); + VEC_STR(HV); + VEC_STR(VC); + VEC_STR(SX); + default: return "#??"; +#undef VEC_STR + } +} + static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) { fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx " @@ -557,7 +591,7 @@ static bool kvm_fixup_exception(struct ex_regs *regs) return false; if (regs->vector == DE_VECTOR) - return false; + regs->vector = KVM_MAGIC_DE_VECTOR; regs->rip = regs->r11; regs->r9 = regs->vector; @@ -625,7 +659,7 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) REPORT_GUEST_ASSERT(uc); } -void kvm_arch_vm_post_create(struct kvm_vm *vm) +void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus) { int r; @@ -638,6 +672,7 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm) sync_global_to_guest(vm, host_cpu_is_intel); sync_global_to_guest(vm, host_cpu_is_amd); sync_global_to_guest(vm, is_forced_emulation_enabled); + sync_global_to_guest(vm, pmu_errata_mask); if (is_sev_vm(vm)) { struct kvm_sev_init init = { 0 }; @@ -1269,6 +1304,8 @@ void kvm_selftest_arch_init(void) host_cpu_is_intel = this_cpu_is_intel(); host_cpu_is_amd = this_cpu_is_amd(); is_forced_emulation_enabled = kvm_is_forced_emulation_enabled(); + + kvm_init_pmu_errata(); } bool sys_clocksource_is_based_on_tsc(void) diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index c81a84990eab..3cdfa3b19b85 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -22,6 +22,7 @@ #include "processor.h" #include "test_util.h" #include "guest_modes.h" +#include "ucall_common.h" #define DUMMY_MEMSLOT_INDEX 7 diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index e3711beff7f3..5087d082c4b0 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -25,6 +25,7 @@ #include <test_util.h> #include <kvm_util.h> #include <processor.h> +#include <ucall_common.h> #define MEM_EXTRA_SIZE SZ_64K diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index a0b7dabb5040..705ab3d7778b 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -80,9 +80,11 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCF: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZCMOP: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFBFMIN: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFH: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFHMIN: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOP: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICCRSE: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR: @@ -103,6 +105,8 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZTSO: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFBFMIN: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFBFWMA: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFHMIN: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKB: @@ -128,6 +132,7 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SUSP: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA: + case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_FWFT: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL: case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR: return true; @@ -255,6 +260,8 @@ static const char *config_id_to_str(const char *prefix, __u64 id) return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)"; case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)"; + case KVM_REG_RISCV_CONFIG_REG(zicbop_block_size): + return "KVM_REG_RISCV_CONFIG_REG(zicbop_block_size)"; case KVM_REG_RISCV_CONFIG_REG(mvendorid): return "KVM_REG_RISCV_CONFIG_REG(mvendorid)"; case KVM_REG_RISCV_CONFIG_REG(marchid): @@ -532,9 +539,11 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) KVM_ISA_EXT_ARR(ZCF), KVM_ISA_EXT_ARR(ZCMOP), KVM_ISA_EXT_ARR(ZFA), + KVM_ISA_EXT_ARR(ZFBFMIN), KVM_ISA_EXT_ARR(ZFH), KVM_ISA_EXT_ARR(ZFHMIN), KVM_ISA_EXT_ARR(ZICBOM), + KVM_ISA_EXT_ARR(ZICBOP), KVM_ISA_EXT_ARR(ZICBOZ), KVM_ISA_EXT_ARR(ZICCRSE), KVM_ISA_EXT_ARR(ZICNTR), @@ -555,6 +564,8 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) KVM_ISA_EXT_ARR(ZTSO), KVM_ISA_EXT_ARR(ZVBB), KVM_ISA_EXT_ARR(ZVBC), + KVM_ISA_EXT_ARR(ZVFBFMIN), + KVM_ISA_EXT_ARR(ZVFBFWMA), KVM_ISA_EXT_ARR(ZVFH), KVM_ISA_EXT_ARR(ZVFHMIN), KVM_ISA_EXT_ARR(ZVKB), @@ -627,6 +638,7 @@ static const char *sbi_ext_single_id_to_str(__u64 reg_off) KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SUSP), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA), + KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_FWFT), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL), KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR), }; @@ -683,6 +695,19 @@ static const char *sbi_sta_id_to_str(__u64 reg_off) return strdup_printf("KVM_REG_RISCV_SBI_STA | %lld /* UNKNOWN */", reg_off); } +static const char *sbi_fwft_id_to_str(__u64 reg_off) +{ + switch (reg_off) { + case 0: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.enable)"; + case 1: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.flags)"; + case 2: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.value)"; + case 3: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.enable)"; + case 4: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.flags)"; + case 5: return "KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.value)"; + } + return strdup_printf("KVM_REG_RISCV_SBI_FWFT | %lld /* UNKNOWN */", reg_off); +} + static const char *sbi_id_to_str(const char *prefix, __u64 id) { __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_STATE); @@ -695,6 +720,8 @@ static const char *sbi_id_to_str(const char *prefix, __u64 id) switch (reg_subtype) { case KVM_REG_RISCV_SBI_STA: return sbi_sta_id_to_str(reg_off); + case KVM_REG_RISCV_SBI_FWFT: + return sbi_fwft_id_to_str(reg_off); } return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off); @@ -780,10 +807,13 @@ void print_reg(const char *prefix, __u64 id) */ static __u64 base_regs[] = { KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbop_block_size), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp), @@ -859,11 +889,26 @@ static __u64 sbi_sta_regs[] = { KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi), }; +static __u64 sbi_fwft_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_FWFT, + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.enable), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.flags), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(misaligned_deleg.value), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.enable), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.flags), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_FWFT | KVM_REG_RISCV_SBI_FWFT_REG(pointer_masking.value), +}; + static __u64 zicbom_regs[] = { KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM, }; +static __u64 zicbop_regs[] = { + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbop_block_size), + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOP, +}; + static __u64 zicboz_regs[] = { KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ, @@ -1010,8 +1055,13 @@ static __u64 vector_regs[] = { #define SUBLIST_SBI_STA \ {"sbi-sta", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_STA, \ .regs = sbi_sta_regs, .regs_n = ARRAY_SIZE(sbi_sta_regs),} +#define SUBLIST_SBI_FWFT \ + {"sbi-fwft", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_FWFT, \ + .regs = sbi_fwft_regs, .regs_n = ARRAY_SIZE(sbi_fwft_regs),} #define SUBLIST_ZICBOM \ {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),} +#define SUBLIST_ZICBOP \ + {"zicbop", .feature = KVM_RISCV_ISA_EXT_ZICBOP, .regs = zicbop_regs, .regs_n = ARRAY_SIZE(zicbop_regs),} #define SUBLIST_ZICBOZ \ {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),} #define SUBLIST_AIA \ @@ -1092,6 +1142,7 @@ KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA); KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU); KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN); KVM_SBI_EXT_SIMPLE_CONFIG(susp, SUSP); +KVM_SBI_EXT_SUBLIST_CONFIG(fwft, FWFT); KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA); KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F); @@ -1127,9 +1178,11 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD); KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF); KVM_ISA_EXT_SIMPLE_CONFIG(zcmop, ZCMOP); KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA); +KVM_ISA_EXT_SIMPLE_CONFIG(zfbfmin, ZFBFMIN); KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH); KVM_ISA_EXT_SIMPLE_CONFIG(zfhmin, ZFHMIN); KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM); +KVM_ISA_EXT_SUBLIST_CONFIG(zicbop, ZICBOP); KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ); KVM_ISA_EXT_SIMPLE_CONFIG(ziccrse, ZICCRSE); KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR); @@ -1150,6 +1203,8 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT); KVM_ISA_EXT_SIMPLE_CONFIG(ztso, ZTSO); KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB); KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC); +KVM_ISA_EXT_SIMPLE_CONFIG(zvfbfmin, ZVFBFMIN); +KVM_ISA_EXT_SIMPLE_CONFIG(zvfbfwma, ZVFBFWMA); KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH); KVM_ISA_EXT_SIMPLE_CONFIG(zvfhmin, ZVFHMIN); KVM_ISA_EXT_SIMPLE_CONFIG(zvkb, ZVKB); @@ -1167,6 +1222,7 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_sbi_pmu, &config_sbi_dbcn, &config_sbi_susp, + &config_sbi_fwft, &config_aia, &config_fp_f, &config_fp_d, @@ -1201,9 +1257,11 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_zcf, &config_zcmop, &config_zfa, + &config_zfbfmin, &config_zfh, &config_zfhmin, &config_zicbom, + &config_zicbop, &config_zicboz, &config_ziccrse, &config_zicntr, @@ -1224,6 +1282,8 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_ztso, &config_zvbb, &config_zvbc, + &config_zvfbfmin, + &config_zvfbfwma, &config_zvfh, &config_zvfhmin, &config_zvkb, diff --git a/tools/testing/selftests/kvm/s390/cmma_test.c b/tools/testing/selftests/kvm/s390/cmma_test.c index 85cc8c18d6e7..e39a724fe860 100644 --- a/tools/testing/selftests/kvm/s390/cmma_test.c +++ b/tools/testing/selftests/kvm/s390/cmma_test.c @@ -145,7 +145,7 @@ static void finish_vm_setup(struct kvm_vm *vm) slot0 = memslot2region(vm, 0); ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); - kvm_arch_vm_post_create(vm); + kvm_arch_vm_post_create(vm, 0); } static struct kvm_vm *create_vm_two_memslots(void) diff --git a/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c b/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c index 27255880dabd..aded795d42be 100644 --- a/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c +++ b/tools/testing/selftests/kvm/s390/cpumodel_subfuncs_test.c @@ -291,7 +291,7 @@ int main(int argc, char *argv[]) ksft_test_result_pass("%s\n", testlist[idx].subfunc_name); free(array); } else { - ksft_test_result_skip("%s feature is not avaialable\n", + ksft_test_result_skip("%s feature is not available\n", testlist[idx].subfunc_name); } } diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c index cce2520af720..8edc1fca345b 100644 --- a/tools/testing/selftests/kvm/steal_time.c +++ b/tools/testing/selftests/kvm/steal_time.c @@ -118,7 +118,7 @@ static int64_t smccc(uint32_t func, uint64_t arg) { struct arm_smccc_res res; - smccc_hvc(func, arg, 0, 0, 0, 0, 0, 0, &res); + do_smccc(func, arg, 0, 0, 0, 0, 0, 0, &res); return res.a0; } diff --git a/tools/testing/selftests/kvm/x86/fastops_test.c b/tools/testing/selftests/kvm/x86/fastops_test.c index 2ac89d6c1e46..8926cfe0e209 100644 --- a/tools/testing/selftests/kvm/x86/fastops_test.c +++ b/tools/testing/selftests/kvm/x86/fastops_test.c @@ -8,14 +8,21 @@ * to set RFLAGS.CF based on whether or not the input is even or odd, so that * instructions like ADC and SBB are deterministic. */ +#define fastop(__insn) \ + "bt $0, %[bt_val]\n\t" \ + __insn "\n\t" \ + "pushfq\n\t" \ + "pop %[flags]\n\t" + +#define flags_constraint(flags_val) [flags]"=r"(flags_val) +#define bt_constraint(__bt_val) [bt_val]"rm"((uint32_t)__bt_val) + #define guest_execute_fastop_1(FEP, insn, __val, __flags) \ ({ \ - __asm__ __volatile__("bt $0, %[val]\n\t" \ - FEP insn " %[val]\n\t" \ - "pushfq\n\t" \ - "pop %[flags]\n\t" \ - : [val]"+r"(__val), [flags]"=r"(__flags) \ - : : "cc", "memory"); \ + __asm__ __volatile__(fastop(FEP insn " %[val]") \ + : [val]"+r"(__val), flags_constraint(__flags) \ + : bt_constraint(__val) \ + : "cc", "memory"); \ }) #define guest_test_fastop_1(insn, type_t, __val) \ @@ -36,12 +43,10 @@ #define guest_execute_fastop_2(FEP, insn, __input, __output, __flags) \ ({ \ - __asm__ __volatile__("bt $0, %[output]\n\t" \ - FEP insn " %[input], %[output]\n\t" \ - "pushfq\n\t" \ - "pop %[flags]\n\t" \ - : [output]"+r"(__output), [flags]"=r"(__flags) \ - : [input]"r"(__input) : "cc", "memory"); \ + __asm__ __volatile__(fastop(FEP insn " %[input], %[output]") \ + : [output]"+r"(__output), flags_constraint(__flags) \ + : [input]"r"(__input), bt_constraint(__output) \ + : "cc", "memory"); \ }) #define guest_test_fastop_2(insn, type_t, __val1, __val2) \ @@ -63,12 +68,10 @@ #define guest_execute_fastop_cl(FEP, insn, __shift, __output, __flags) \ ({ \ - __asm__ __volatile__("bt $0, %[output]\n\t" \ - FEP insn " %%cl, %[output]\n\t" \ - "pushfq\n\t" \ - "pop %[flags]\n\t" \ - : [output]"+r"(__output), [flags]"=r"(__flags) \ - : "c"(__shift) : "cc", "memory"); \ + __asm__ __volatile__(fastop(FEP insn " %%cl, %[output]") \ + : [output]"+r"(__output), flags_constraint(__flags) \ + : "c"(__shift), bt_constraint(__output) \ + : "cc", "memory"); \ }) #define guest_test_fastop_cl(insn, type_t, __val1, __val2) \ @@ -89,6 +92,42 @@ ex_flags, insn, shift, (uint64_t)input, flags); \ }) +#define guest_execute_fastop_div(__KVM_ASM_SAFE, insn, __a, __d, __rm, __flags) \ +({ \ + uint64_t ign_error_code; \ + uint8_t vector; \ + \ + __asm__ __volatile__(fastop(__KVM_ASM_SAFE(insn " %[denom]")) \ + : "+a"(__a), "+d"(__d), flags_constraint(__flags), \ + KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ + : [denom]"rm"(__rm), bt_constraint(__rm) \ + : "cc", "memory", KVM_ASM_SAFE_CLOBBERS); \ + vector; \ +}) + +#define guest_test_fastop_div(insn, type_t, __val1, __val2) \ +({ \ + type_t _a = __val1, _d = __val1, rm = __val2; \ + type_t a = _a, d = _d, ex_a = _a, ex_d = _d; \ + uint64_t flags, ex_flags; \ + uint8_t v, ex_v; \ + \ + ex_v = guest_execute_fastop_div(KVM_ASM_SAFE, insn, ex_a, ex_d, rm, ex_flags); \ + v = guest_execute_fastop_div(KVM_ASM_SAFE_FEP, insn, a, d, rm, flags); \ + \ + GUEST_ASSERT_EQ(v, ex_v); \ + __GUEST_ASSERT(v == ex_v, \ + "Wanted vector 0x%x for '%s 0x%lx:0x%lx/0x%lx', got 0x%x", \ + ex_v, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, v); \ + __GUEST_ASSERT(a == ex_a && d == ex_d, \ + "Wanted 0x%lx:0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx:0x%lx",\ + (uint64_t)ex_a, (uint64_t)ex_d, insn, (uint64_t)_a, \ + (uint64_t)_d, (uint64_t)rm, (uint64_t)a, (uint64_t)d); \ + __GUEST_ASSERT(v || ex_v || (flags == ex_flags), \ + "Wanted flags 0x%lx for '%s 0x%lx:0x%lx/0x%lx', got 0x%lx", \ + ex_flags, insn, (uint64_t)_a, (uint64_t)_d, (uint64_t)rm, flags);\ +}) + static const uint64_t vals[] = { 0, 1, @@ -115,14 +154,16 @@ do { \ guest_test_fastop_2("add" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("adc" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("and" suffix, type_t, vals[i], vals[j]); \ +if (sizeof(type_t) != 1) { \ guest_test_fastop_2("bsf" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("bsr" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("bt" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("btc" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("btr" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("bts" suffix, type_t, vals[i], vals[j]); \ - guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("imul" suffix, type_t, vals[i], vals[j]); \ +} \ + guest_test_fastop_2("cmp" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("or" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("sbb" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_2("sub" suffix, type_t, vals[i], vals[j]); \ @@ -136,12 +177,15 @@ do { \ guest_test_fastop_cl("sar" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_cl("shl" suffix, type_t, vals[i], vals[j]); \ guest_test_fastop_cl("shr" suffix, type_t, vals[i], vals[j]); \ + \ + guest_test_fastop_div("div" suffix, type_t, vals[i], vals[j]); \ } \ } \ } while (0) static void guest_code(void) { + guest_test_fastops(uint8_t, "b"); guest_test_fastops(uint16_t, "w"); guest_test_fastops(uint32_t, "l"); guest_test_fastops(uint64_t, "q"); diff --git a/tools/testing/selftests/kvm/x86/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86/hyperv_cpuid.c index c863a689aa98..3c21af811d8f 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86/hyperv_cpuid.c @@ -45,7 +45,7 @@ static void test_hv_cpuid(struct kvm_vcpu *vcpu, bool evmcs_expected) TEST_ASSERT((entry->function >= 0x40000000) && (entry->function <= 0x40000082), - "function %x is our of supported range", + "function %x is out of supported range", entry->function); TEST_ASSERT(entry->index == 0, diff --git a/tools/testing/selftests/kvm/x86/hyperv_features.c b/tools/testing/selftests/kvm/x86/hyperv_features.c index 068e9c69710d..99d327084172 100644 --- a/tools/testing/selftests/kvm/x86/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86/hyperv_features.c @@ -54,12 +54,12 @@ static void guest_msr(struct msr_data *msr) if (msr->fault_expected) __GUEST_ASSERT(vector == GP_VECTOR, - "Expected #GP on %sMSR(0x%x), got vector '0x%x'", - msr->write ? "WR" : "RD", msr->idx, vector); + "Expected #GP on %sMSR(0x%x), got %s", + msr->write ? "WR" : "RD", msr->idx, ex_str(vector)); else __GUEST_ASSERT(!vector, - "Expected success on %sMSR(0x%x), got vector '0x%x'", - msr->write ? "WR" : "RD", msr->idx, vector); + "Expected success on %sMSR(0x%x), got %s", + msr->write ? "WR" : "RD", msr->idx, ex_str(vector)); if (vector || is_write_only_msr(msr->idx)) goto done; @@ -102,12 +102,12 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall) vector = __hyperv_hypercall(hcall->control, input, output, &res); if (hcall->ud_expected) { __GUEST_ASSERT(vector == UD_VECTOR, - "Expected #UD for control '%lu', got vector '0x%x'", - hcall->control, vector); + "Expected #UD for control '%lu', got %s", + hcall->control, ex_str(vector)); } else { __GUEST_ASSERT(!vector, - "Expected no exception for control '%lu', got vector '0x%x'", - hcall->control, vector); + "Expected no exception for control '%lu', got %s", + hcall->control, ex_str(vector)); GUEST_ASSERT_EQ(res, hcall->expect); } diff --git a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c index 0eb371c62ab8..e45c028d2a7e 100644 --- a/tools/testing/selftests/kvm/x86/monitor_mwait_test.c +++ b/tools/testing/selftests/kvm/x86/monitor_mwait_test.c @@ -30,12 +30,12 @@ do { \ \ if (fault_wanted) \ __GUEST_ASSERT((vector) == UD_VECTOR, \ - "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", \ - testcase, vector); \ + "Expected #UD on " insn " for testcase '0x%x', got %s", \ + testcase, ex_str(vector)); \ else \ __GUEST_ASSERT(!(vector), \ - "Expected success on " insn " for testcase '0x%x', got '0x%x'", \ - testcase, vector); \ + "Expected success on " insn " for testcase '0x%x', got %s", \ + testcase, ex_str(vector)); \ } while (0) static void guest_monitor_wait(void *arg) diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c index 8aaaf25b6111..bb215230cc8a 100644 --- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c @@ -75,6 +75,11 @@ static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx) [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL }, [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL }, [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED }, + [INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BE_BOUND, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BAD_SPEC, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_FE_BOUND, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_TOPDOWN_RETIRING_INDEX] = { X86_PMU_FEATURE_TOPDOWN_RETIRING, X86_PMU_FEATURE_NULL }, + [INTEL_ARCH_LBR_INSERTS_INDEX] = { X86_PMU_FEATURE_LBR_INSERTS, X86_PMU_FEATURE_NULL }, }; kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS); @@ -158,10 +163,18 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr switch (idx) { case INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX: - GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED); + /* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */ + if (this_pmu_has_errata(INSTRUCTIONS_RETIRED_OVERCOUNT)) + GUEST_ASSERT(count >= NUM_INSNS_RETIRED); + else + GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED); break; case INTEL_ARCH_BRANCHES_RETIRED_INDEX: - GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED); + /* Relax precise count check due to VM-EXIT/VM-ENTRY overcount issue */ + if (this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT)) + GUEST_ASSERT(count >= NUM_BRANCH_INSNS_RETIRED); + else + GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED); break; case INTEL_ARCH_LLC_REFERENCES_INDEX: case INTEL_ARCH_LLC_MISSES_INDEX: @@ -171,9 +184,12 @@ static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr fallthrough; case INTEL_ARCH_CPU_CYCLES_INDEX: case INTEL_ARCH_REFERENCE_CYCLES_INDEX: + case INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX: + case INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX: GUEST_ASSERT_NE(count, 0); break; case INTEL_ARCH_TOPDOWN_SLOTS_INDEX: + case INTEL_ARCH_TOPDOWN_RETIRING_INDEX: __GUEST_ASSERT(count >= NUM_INSNS_RETIRED, "Expected top-down slots >= %u, got count = %lu", NUM_INSNS_RETIRED, count); @@ -311,7 +327,7 @@ static void guest_test_arch_events(void) } static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, - uint8_t length, uint8_t unavailable_mask) + uint8_t length, uint32_t unavailable_mask) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; @@ -320,6 +336,9 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, if (!pmu_version) return; + unavailable_mask &= GENMASK(X86_PROPERTY_PMU_EVENTS_MASK.hi_bit, + X86_PROPERTY_PMU_EVENTS_MASK.lo_bit); + vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events, pmu_version, perf_capabilities); @@ -344,8 +363,8 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, #define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector) \ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ - "Expected %s on " #insn "(0x%x), got vector %u", \ - expect_gp ? "#GP" : "no fault", msr, vector) \ + "Expected %s on " #insn "(0x%x), got %s", \ + expect_gp ? "#GP" : "no fault", msr, ex_str(vector)) \ #define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \ __GUEST_ASSERT(val == expected, \ @@ -575,6 +594,26 @@ static void test_intel_counters(void) }; /* + * To keep the total runtime reasonable, test only a handful of select, + * semi-arbitrary values for the mask of unavailable PMU events. Test + * 0 (all events available) and all ones (no events available) as well + * as alternating bit sequencues, e.g. to detect if KVM is checking the + * wrong bit(s). + */ + const uint32_t unavailable_masks[] = { + 0x0, + 0xffffffffu, + 0xaaaaaaaau, + 0x55555555u, + 0xf0f0f0f0u, + 0x0f0f0f0fu, + 0xa0a0a0a0u, + 0x0a0a0a0au, + 0x50505050u, + 0x05050505u, + }; + + /* * Test up to PMU v5, which is the current maximum version defined by * Intel, i.e. is the last version that is guaranteed to be backwards * compatible with KVM's existing behavior. @@ -611,16 +650,7 @@ static void test_intel_counters(void) pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n", v, perf_caps[i]); - /* - * To keep the total runtime reasonable, test every - * possible non-zero, non-reserved bitmap combination - * only with the native PMU version and the full bit - * vector length. - */ - if (v == pmu_version) { - for (k = 1; k < (BIT(NR_INTEL_ARCH_EVENTS) - 1); k++) - test_arch_events(v, perf_caps[i], NR_INTEL_ARCH_EVENTS, k); - } + /* * Test single bits for all PMU version and lengths up * the number of events +1 (to verify KVM doesn't do @@ -629,11 +659,8 @@ static void test_intel_counters(void) * ones i.e. all events being available and unavailable. */ for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) { - test_arch_events(v, perf_caps[i], j, 0); - test_arch_events(v, perf_caps[i], j, 0xff); - - for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++) - test_arch_events(v, perf_caps[i], j, BIT(k)); + for (k = 1; k < ARRAY_SIZE(unavailable_masks); k++) + test_arch_events(v, perf_caps[i], j, unavailable_masks[k]); } pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n", diff --git a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c index c15513cd74d1..1c5b7611db24 100644 --- a/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86/pmu_event_filter_test.c @@ -214,8 +214,10 @@ static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event) do { \ uint64_t br = pmc_results.branches_retired; \ uint64_t ir = pmc_results.instructions_retired; \ + bool br_matched = this_pmu_has_errata(BRANCHES_RETIRED_OVERCOUNT) ? \ + br >= NUM_BRANCHES : br == NUM_BRANCHES; \ \ - if (br && br != NUM_BRANCHES) \ + if (br && !br_matched) \ pr_info("%s: Branch instructions retired = %lu (expected %u)\n", \ __func__, br, NUM_BRANCHES); \ TEST_ASSERT(br, "%s: Branch instructions retired = %lu (expected > 0)", \ diff --git a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c index a1f5ff45d518..7ff6f62e20a3 100644 --- a/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_pmu_caps_test.c @@ -29,7 +29,7 @@ static union perf_capabilities { u64 pebs_baseline:1; u64 perf_metrics:1; u64 pebs_output_pt_available:1; - u64 anythread_deprecated:1; + u64 pebs_timing_info:1; }; u64 capabilities; } host_cap; @@ -44,6 +44,7 @@ static const union perf_capabilities immutable_caps = { .pebs_arch_reg = 1, .pebs_format = -1, .pebs_baseline = 1, + .pebs_timing_info = 1, }; static const union perf_capabilities format_caps = { @@ -56,8 +57,8 @@ static void guest_test_perf_capabilities_gp(uint64_t val) uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); __GUEST_ASSERT(vector == GP_VECTOR, - "Expected #GP for value '0x%lx', got vector '0x%x'", - val, vector); + "Expected #GP for value '0x%lx', got %s", + val, ex_str(vector)); } static void guest_code(uint64_t current_val) diff --git a/tools/testing/selftests/kvm/x86/xapic_state_test.c b/tools/testing/selftests/kvm/x86/xapic_state_test.c index fdebff1165c7..3b4814c55722 100644 --- a/tools/testing/selftests/kvm/x86/xapic_state_test.c +++ b/tools/testing/selftests/kvm/x86/xapic_state_test.c @@ -120,8 +120,8 @@ static void test_icr(struct xapic_vcpu *x) __test_icr(x, icr | i); /* - * Send all flavors of IPIs to non-existent vCPUs. TODO: use number of - * vCPUs, not vcpu.id + 1. Arbitrarily use vector 0xff. + * Send all flavors of IPIs to non-existent vCPUs. Arbitrarily use + * vector 0xff. */ icr = APIC_INT_ASSERT | 0xff; for (i = 0; i < 0xff; i++) { diff --git a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c index c8a5c5e51661..d038c1571729 100644 --- a/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86/xcr0_cpuid_test.c @@ -81,13 +81,13 @@ static void guest_code(void) vector = xsetbv_safe(0, XFEATURE_MASK_FP); __GUEST_ASSERT(!vector, - "Expected success on XSETBV(FP), got vector '0x%x'", - vector); + "Expected success on XSETBV(FP), got %s", + ex_str(vector)); vector = xsetbv_safe(0, supported_xcr0); __GUEST_ASSERT(!vector, - "Expected success on XSETBV(0x%lx), got vector '0x%x'", - supported_xcr0, vector); + "Expected success on XSETBV(0x%lx), got %s", + supported_xcr0, ex_str(vector)); for (i = 0; i < 64; i++) { if (supported_xcr0 & BIT_ULL(i)) @@ -95,8 +95,8 @@ static void guest_code(void) vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i)); __GUEST_ASSERT(vector == GP_VECTOR, - "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got vector '0x%x'", - BIT_ULL(i), supported_xcr0, vector); + "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got %s", + BIT_ULL(i), supported_xcr0, ex_str(vector)); } GUEST_DONE(); diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h index 67d544aaa9a3..06c840e81c8b 100644 --- a/tools/testing/selftests/rseq/rseq-riscv.h +++ b/tools/testing/selftests/rseq/rseq-riscv.h @@ -8,6 +8,7 @@ * exception when executed in all modes. */ #include <endian.h> +#include <asm/fence.h> #if defined(__BYTE_ORDER) ? (__BYTE_ORDER == __LITTLE_ENDIAN) : defined(__LITTLE_ENDIAN) #define RSEQ_SIG 0xf1401073 /* csrr mhartid, x0 */ @@ -24,8 +25,6 @@ #define REG_L __REG_SEL("ld ", "lw ") #define REG_S __REG_SEL("sd ", "sw ") -#define RISCV_FENCE(p, s) \ - __asm__ __volatile__ ("fence " #p "," #s : : : "memory") #define rseq_smp_mb() RISCV_FENCE(rw, rw) #define rseq_smp_rmb() RISCV_FENCE(r, r) #define rseq_smp_wmb() RISCV_FENCE(w, w) diff --git a/tools/testing/selftests/vfio/.gitignore b/tools/testing/selftests/vfio/.gitignore new file mode 100644 index 000000000000..7fadc19d3bca --- /dev/null +++ b/tools/testing/selftests/vfio/.gitignore @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +* +!/**/ +!*.c +!*.h +!*.S +!*.sh +!*.mk +!.gitignore +!Makefile diff --git a/tools/testing/selftests/vfio/Makefile b/tools/testing/selftests/vfio/Makefile new file mode 100644 index 000000000000..324ba0175a33 --- /dev/null +++ b/tools/testing/selftests/vfio/Makefile @@ -0,0 +1,21 @@ +CFLAGS = $(KHDR_INCLUDES) +TEST_GEN_PROGS += vfio_dma_mapping_test +TEST_GEN_PROGS += vfio_iommufd_setup_test +TEST_GEN_PROGS += vfio_pci_device_test +TEST_GEN_PROGS += vfio_pci_driver_test +TEST_PROGS_EXTENDED := run.sh +include ../lib.mk +include lib/libvfio.mk + +CFLAGS += -I$(top_srcdir)/tools/include +CFLAGS += -MD +CFLAGS += $(EXTRA_CFLAGS) + +$(TEST_GEN_PROGS): %: %.o $(LIBVFIO_O) + $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $< $(LIBVFIO_O) $(LDLIBS) -o $@ + +TEST_GEN_PROGS_O = $(patsubst %, %.o, $(TEST_GEN_PROGS)) +TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_PROGS_O) $(LIBVFIO_O)) +-include $(TEST_DEP_FILES) + +EXTRA_CLEAN += $(TEST_GEN_PROGS_O) $(TEST_DEP_FILES) diff --git a/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c b/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c new file mode 100644 index 000000000000..0ca2cbc2a316 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <stdint.h> +#include <unistd.h> + +#include <linux/bits.h> +#include <linux/errno.h> +#include <linux/idxd.h> +#include <linux/io.h> +#include <linux/pci_ids.h> +#include <linux/sizes.h> + +#include <vfio_util.h> + +#include "registers.h" + +/* Vectors 1+ are available for work queue completion interrupts. */ +#define MSIX_VECTOR 1 + +struct dsa_state { + /* Descriptors for copy and batch operations. */ + struct dsa_hw_desc batch[32]; + struct dsa_hw_desc copy[1024]; + + /* Completion records for copy and batch operations. */ + struct dsa_completion_record copy_completion; + struct dsa_completion_record batch_completion; + + /* Cached device registers (and derived data) for easy access */ + union gen_cap_reg gen_cap; + union wq_cap_reg wq_cap; + union group_cap_reg group_cap; + union engine_cap_reg engine_cap; + union offsets_reg table_offsets; + void *wqcfg_table; + void *grpcfg_table; + u64 max_batches; + u64 max_copies_per_batch; + + /* The number of ongoing memcpy operations. */ + u64 memcpy_count; + + /* Buffers used by dsa_send_msi() to generate an interrupt */ + u64 send_msi_src; + u64 send_msi_dst; +}; + +static inline struct dsa_state *to_dsa_state(struct vfio_pci_device *device) +{ + return device->driver.region.vaddr; +} + +static bool dsa_int_handle_request_required(struct vfio_pci_device *device) +{ + void *bar0 = device->bars[0].vaddr; + union gen_cap_reg gen_cap; + u32 cmd_cap; + + gen_cap.bits = readq(bar0 + IDXD_GENCAP_OFFSET); + if (!gen_cap.cmd_cap) + return false; + + cmd_cap = readl(bar0 + IDXD_CMDCAP_OFFSET); + return (cmd_cap >> IDXD_CMD_REQUEST_INT_HANDLE) & 1; +} + +static int dsa_probe(struct vfio_pci_device *device) +{ + if (!vfio_pci_device_match(device, PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_DSA_SPR0)) + return -EINVAL; + + if (dsa_int_handle_request_required(device)) { + printf("Device requires requesting interrupt handles\n"); + return -EINVAL; + } + + return 0; +} + +static void dsa_check_sw_err(struct vfio_pci_device *device) +{ + void *reg = device->bars[0].vaddr + IDXD_SWERR_OFFSET; + union sw_err_reg err = {}; + int i; + + for (i = 0; i < ARRAY_SIZE(err.bits); i++) { + err.bits[i] = readq(reg + offsetof(union sw_err_reg, bits[i])); + + /* No errors */ + if (i == 0 && !err.valid) + return; + } + + fprintf(stderr, "SWERR: 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n", + err.bits[0], err.bits[1], err.bits[2], err.bits[3]); + + fprintf(stderr, " valid: 0x%x\n", err.valid); + fprintf(stderr, " overflow: 0x%x\n", err.overflow); + fprintf(stderr, " desc_valid: 0x%x\n", err.desc_valid); + fprintf(stderr, " wq_idx_valid: 0x%x\n", err.wq_idx_valid); + fprintf(stderr, " batch: 0x%x\n", err.batch); + fprintf(stderr, " fault_rw: 0x%x\n", err.fault_rw); + fprintf(stderr, " priv: 0x%x\n", err.priv); + fprintf(stderr, " error: 0x%x\n", err.error); + fprintf(stderr, " wq_idx: 0x%x\n", err.wq_idx); + fprintf(stderr, " operation: 0x%x\n", err.operation); + fprintf(stderr, " pasid: 0x%x\n", err.pasid); + fprintf(stderr, " batch_idx: 0x%x\n", err.batch_idx); + fprintf(stderr, " invalid_flags: 0x%x\n", err.invalid_flags); + fprintf(stderr, " fault_addr: 0x%lx\n", err.fault_addr); + + VFIO_FAIL("Software Error Detected!\n"); +} + +static void dsa_command(struct vfio_pci_device *device, u32 cmd) +{ + union idxd_command_reg cmd_reg = { .cmd = cmd }; + u32 sleep_ms = 1, attempts = 5000 / sleep_ms; + void *bar0 = device->bars[0].vaddr; + u32 status; + u8 err; + + writel(cmd_reg.bits, bar0 + IDXD_CMD_OFFSET); + + for (;;) { + dsa_check_sw_err(device); + + status = readl(bar0 + IDXD_CMDSTS_OFFSET); + if (!(status & IDXD_CMDSTS_ACTIVE)) + break; + + VFIO_ASSERT_GT(--attempts, 0); + usleep(sleep_ms * 1000); + } + + err = status & IDXD_CMDSTS_ERR_MASK; + VFIO_ASSERT_EQ(err, 0, "Error issuing command 0x%x: 0x%x\n", cmd, err); +} + +static void dsa_wq_init(struct vfio_pci_device *device) +{ + struct dsa_state *dsa = to_dsa_state(device); + union wq_cap_reg wq_cap = dsa->wq_cap; + union wqcfg wqcfg; + u64 wqcfg_size; + int i; + + VFIO_ASSERT_GT((u32)wq_cap.num_wqs, 0); + + wqcfg = (union wqcfg) { + .wq_size = wq_cap.total_wq_size, + .mode = 1, + .priority = 1, + /* + * Disable Address Translation Service (if enabled) so that VFIO + * selftests using this driver can generate I/O page faults. + */ + .wq_ats_disable = wq_cap.wq_ats_support, + .max_xfer_shift = dsa->gen_cap.max_xfer_shift, + .max_batch_shift = dsa->gen_cap.max_batch_shift, + .op_config[0] = BIT(DSA_OPCODE_MEMMOVE) | BIT(DSA_OPCODE_BATCH), + }; + + wqcfg_size = 1UL << (wq_cap.wqcfg_size + IDXD_WQCFG_MIN); + + for (i = 0; i < wqcfg_size / sizeof(wqcfg.bits[0]); i++) + writel(wqcfg.bits[i], dsa->wqcfg_table + offsetof(union wqcfg, bits[i])); +} + +static void dsa_group_init(struct vfio_pci_device *device) +{ + struct dsa_state *dsa = to_dsa_state(device); + union group_cap_reg group_cap = dsa->group_cap; + union engine_cap_reg engine_cap = dsa->engine_cap; + + VFIO_ASSERT_GT((u32)group_cap.num_groups, 0); + VFIO_ASSERT_GT((u32)engine_cap.num_engines, 0); + + /* Assign work queue 0 and engine 0 to group 0 */ + writeq(1, dsa->grpcfg_table + offsetof(struct grpcfg, wqs[0])); + writeq(1, dsa->grpcfg_table + offsetof(struct grpcfg, engines)); +} + +static void dsa_register_cache_init(struct vfio_pci_device *device) +{ + struct dsa_state *dsa = to_dsa_state(device); + void *bar0 = device->bars[0].vaddr; + + dsa->gen_cap.bits = readq(bar0 + IDXD_GENCAP_OFFSET); + dsa->wq_cap.bits = readq(bar0 + IDXD_WQCAP_OFFSET); + dsa->group_cap.bits = readq(bar0 + IDXD_GRPCAP_OFFSET); + dsa->engine_cap.bits = readq(bar0 + IDXD_ENGCAP_OFFSET); + + dsa->table_offsets.bits[0] = readq(bar0 + IDXD_TABLE_OFFSET); + dsa->table_offsets.bits[1] = readq(bar0 + IDXD_TABLE_OFFSET + 8); + + dsa->wqcfg_table = bar0 + dsa->table_offsets.wqcfg * IDXD_TABLE_MULT; + dsa->grpcfg_table = bar0 + dsa->table_offsets.grpcfg * IDXD_TABLE_MULT; + + dsa->max_batches = 1U << (dsa->wq_cap.total_wq_size + IDXD_WQCFG_MIN); + dsa->max_batches = min(dsa->max_batches, ARRAY_SIZE(dsa->batch)); + + dsa->max_copies_per_batch = 1UL << dsa->gen_cap.max_batch_shift; + dsa->max_copies_per_batch = min(dsa->max_copies_per_batch, ARRAY_SIZE(dsa->copy)); +} + +static void dsa_init(struct vfio_pci_device *device) +{ + struct dsa_state *dsa = to_dsa_state(device); + + VFIO_ASSERT_GE(device->driver.region.size, sizeof(*dsa)); + + vfio_pci_config_writew(device, PCI_COMMAND, + PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | + PCI_COMMAND_INTX_DISABLE); + + dsa_command(device, IDXD_CMD_RESET_DEVICE); + + dsa_register_cache_init(device); + dsa_wq_init(device); + dsa_group_init(device); + + dsa_command(device, IDXD_CMD_ENABLE_DEVICE); + dsa_command(device, IDXD_CMD_ENABLE_WQ); + + vfio_pci_msix_enable(device, MSIX_VECTOR, 1); + + device->driver.max_memcpy_count = + dsa->max_batches * dsa->max_copies_per_batch; + device->driver.max_memcpy_size = 1UL << dsa->gen_cap.max_xfer_shift; + device->driver.msi = MSIX_VECTOR; +} + +static void dsa_remove(struct vfio_pci_device *device) +{ + dsa_command(device, IDXD_CMD_RESET_DEVICE); + vfio_pci_msix_disable(device); +} + +static int dsa_completion_wait(struct vfio_pci_device *device, + struct dsa_completion_record *completion) +{ + u8 status; + + for (;;) { + dsa_check_sw_err(device); + + status = READ_ONCE(completion->status); + if (status) + break; + + usleep(1000); + } + + if (status == DSA_COMP_SUCCESS) + return 0; + + printf("Error detected during memcpy operation: 0x%x\n", status); + return -1; +} + +static void dsa_copy_desc_init(struct vfio_pci_device *device, + struct dsa_hw_desc *desc, + iova_t src, iova_t dst, u64 size, + bool interrupt) +{ + struct dsa_state *dsa = to_dsa_state(device); + u16 flags; + + flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; + + if (interrupt) + flags |= IDXD_OP_FLAG_RCI; + + *desc = (struct dsa_hw_desc) { + .opcode = DSA_OPCODE_MEMMOVE, + .flags = flags, + .priv = 1, + .src_addr = src, + .dst_addr = dst, + .xfer_size = size, + .completion_addr = to_iova(device, &dsa->copy_completion), + .int_handle = interrupt ? MSIX_VECTOR : 0, + }; +} + +static void dsa_batch_desc_init(struct vfio_pci_device *device, + struct dsa_hw_desc *desc, + u64 count) +{ + struct dsa_state *dsa = to_dsa_state(device); + + *desc = (struct dsa_hw_desc) { + .opcode = DSA_OPCODE_BATCH, + .flags = IDXD_OP_FLAG_CRAV, + .priv = 1, + .completion_addr = to_iova(device, &dsa->batch_completion), + .desc_list_addr = to_iova(device, &dsa->copy[0]), + .desc_count = count, + }; +} + +static void dsa_desc_write(struct vfio_pci_device *device, struct dsa_hw_desc *desc) +{ + /* Write the contents (not address) of the 64-byte descriptor to the device. */ + iosubmit_cmds512(device->bars[2].vaddr, desc, 1); +} + +static void dsa_memcpy_one(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, bool interrupt) +{ + struct dsa_state *dsa = to_dsa_state(device); + + memset(&dsa->copy_completion, 0, sizeof(dsa->copy_completion)); + + dsa_copy_desc_init(device, &dsa->copy[0], src, dst, size, interrupt); + dsa_desc_write(device, &dsa->copy[0]); +} + +static void dsa_memcpy_batch(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, u64 count) +{ + struct dsa_state *dsa = to_dsa_state(device); + int i; + + memset(&dsa->batch_completion, 0, sizeof(dsa->batch_completion)); + + for (i = 0; i < ARRAY_SIZE(dsa->copy); i++) { + struct dsa_hw_desc *copy_desc = &dsa->copy[i]; + + dsa_copy_desc_init(device, copy_desc, src, dst, size, false); + + /* Don't request completions for individual copies. */ + copy_desc->flags &= ~IDXD_OP_FLAG_RCR; + } + + for (i = 0; i < ARRAY_SIZE(dsa->batch) && count; i++) { + struct dsa_hw_desc *batch_desc = &dsa->batch[i]; + int nr_copies; + + nr_copies = min(count, dsa->max_copies_per_batch); + count -= nr_copies; + + /* + * Batches must have at least 2 copies, so handle the case where + * there is exactly 1 copy left by doing one less copy in this + * batch and then 2 in the next. + */ + if (count == 1) { + nr_copies--; + count++; + } + + dsa_batch_desc_init(device, batch_desc, nr_copies); + + /* Request a completion for the last batch. */ + if (!count) + batch_desc->flags |= IDXD_OP_FLAG_RCR; + + dsa_desc_write(device, batch_desc); + } + + VFIO_ASSERT_EQ(count, 0, "Failed to start %lu copies.\n", count); +} + +static void dsa_memcpy_start(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, u64 count) +{ + struct dsa_state *dsa = to_dsa_state(device); + + /* DSA devices require at least 2 copies per batch. */ + if (count == 1) + dsa_memcpy_one(device, src, dst, size, false); + else + dsa_memcpy_batch(device, src, dst, size, count); + + dsa->memcpy_count = count; +} + +static int dsa_memcpy_wait(struct vfio_pci_device *device) +{ + struct dsa_state *dsa = to_dsa_state(device); + int r; + + if (dsa->memcpy_count == 1) + r = dsa_completion_wait(device, &dsa->copy_completion); + else + r = dsa_completion_wait(device, &dsa->batch_completion); + + dsa->memcpy_count = 0; + + return r; +} + +static void dsa_send_msi(struct vfio_pci_device *device) +{ + struct dsa_state *dsa = to_dsa_state(device); + + dsa_memcpy_one(device, + to_iova(device, &dsa->send_msi_src), + to_iova(device, &dsa->send_msi_dst), + sizeof(dsa->send_msi_src), true); + + VFIO_ASSERT_EQ(dsa_completion_wait(device, &dsa->copy_completion), 0); +} + +const struct vfio_pci_driver_ops dsa_ops = { + .name = "dsa", + .probe = dsa_probe, + .init = dsa_init, + .remove = dsa_remove, + .memcpy_start = dsa_memcpy_start, + .memcpy_wait = dsa_memcpy_wait, + .send_msi = dsa_send_msi, +}; diff --git a/tools/testing/selftests/vfio/lib/drivers/dsa/registers.h b/tools/testing/selftests/vfio/lib/drivers/dsa/registers.h new file mode 120000 index 000000000000..bde657c3c2af --- /dev/null +++ b/tools/testing/selftests/vfio/lib/drivers/dsa/registers.h @@ -0,0 +1 @@ +../../../../../../../drivers/dma/idxd/registers.h
\ No newline at end of file diff --git a/tools/testing/selftests/vfio/lib/drivers/ioat/hw.h b/tools/testing/selftests/vfio/lib/drivers/ioat/hw.h new file mode 120000 index 000000000000..8ab52ddd4458 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/drivers/ioat/hw.h @@ -0,0 +1 @@ +../../../../../../../drivers/dma/ioat/hw.h
\ No newline at end of file diff --git a/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c b/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c new file mode 100644 index 000000000000..c3b91d9b1f59 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <stdint.h> +#include <unistd.h> + +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/pci_ids.h> +#include <linux/sizes.h> + +#include <vfio_util.h> + +#include "hw.h" +#include "registers.h" + +#define IOAT_DMACOUNT_MAX UINT16_MAX + +struct ioat_state { + /* Single descriptor used to issue DMA memcpy operations */ + struct ioat_dma_descriptor desc; + + /* Copy buffers used by ioat_send_msi() to generate an interrupt. */ + u64 send_msi_src; + u64 send_msi_dst; +}; + +static inline struct ioat_state *to_ioat_state(struct vfio_pci_device *device) +{ + return device->driver.region.vaddr; +} + +static inline void *ioat_channel_registers(struct vfio_pci_device *device) +{ + return device->bars[0].vaddr + IOAT_CHANNEL_MMIO_SIZE; +} + +static int ioat_probe(struct vfio_pci_device *device) +{ + u8 version; + int r; + + if (!vfio_pci_device_match(device, PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_IOAT_SKX)) + return -EINVAL; + + VFIO_ASSERT_NOT_NULL(device->bars[0].vaddr); + + version = readb(device->bars[0].vaddr + IOAT_VER_OFFSET); + switch (version) { + case IOAT_VER_3_2: + case IOAT_VER_3_3: + r = 0; + break; + default: + printf("ioat: Unsupported version: 0x%x\n", version); + r = -EINVAL; + } + return r; +} + +static u64 ioat_channel_status(void *bar) +{ + return readq(bar + IOAT_CHANSTS_OFFSET) & IOAT_CHANSTS_STATUS; +} + +static void ioat_clear_errors(struct vfio_pci_device *device) +{ + void *registers = ioat_channel_registers(device); + u32 errors; + + errors = vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET); + vfio_pci_config_writel(device, IOAT_PCI_CHANERR_INT_OFFSET, errors); + + errors = vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET); + vfio_pci_config_writel(device, IOAT_PCI_CHANERR_INT_OFFSET, errors); + + errors = readl(registers + IOAT_CHANERR_OFFSET); + writel(errors, registers + IOAT_CHANERR_OFFSET); +} + +static void ioat_reset(struct vfio_pci_device *device) +{ + void *registers = ioat_channel_registers(device); + u32 sleep_ms = 1, attempts = 5000 / sleep_ms; + u8 chancmd; + + ioat_clear_errors(device); + + writeb(IOAT_CHANCMD_RESET, registers + IOAT2_CHANCMD_OFFSET); + + for (;;) { + chancmd = readb(registers + IOAT2_CHANCMD_OFFSET); + if (!(chancmd & IOAT_CHANCMD_RESET)) + break; + + VFIO_ASSERT_GT(--attempts, 0); + usleep(sleep_ms * 1000); + } + + VFIO_ASSERT_EQ(ioat_channel_status(registers), IOAT_CHANSTS_HALTED); +} + +static void ioat_init(struct vfio_pci_device *device) +{ + struct ioat_state *ioat = to_ioat_state(device); + u8 intrctrl; + + VFIO_ASSERT_GE(device->driver.region.size, sizeof(*ioat)); + + vfio_pci_config_writew(device, PCI_COMMAND, + PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | + PCI_COMMAND_INTX_DISABLE); + + ioat_reset(device); + + /* Enable the use of MXI-x interrupts for channel interrupts. */ + intrctrl = IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; + writeb(intrctrl, device->bars[0].vaddr + IOAT_INTRCTRL_OFFSET); + + vfio_pci_msix_enable(device, 0, device->msix_info.count); + + device->driver.msi = 0; + device->driver.max_memcpy_size = + 1UL << readb(device->bars[0].vaddr + IOAT_XFERCAP_OFFSET); + device->driver.max_memcpy_count = IOAT_DMACOUNT_MAX; +} + +static void ioat_remove(struct vfio_pci_device *device) +{ + ioat_reset(device); + vfio_pci_msix_disable(device); +} + +static void ioat_handle_error(struct vfio_pci_device *device) +{ + void *registers = ioat_channel_registers(device); + + printf("Error detected during memcpy operation!\n" + " CHANERR: 0x%x\n" + " CHANERR_INT: 0x%x\n" + " DMAUNCERRSTS: 0x%x\n", + readl(registers + IOAT_CHANERR_OFFSET), + vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET), + vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET)); + + ioat_reset(device); +} + +static int ioat_memcpy_wait(struct vfio_pci_device *device) +{ + void *registers = ioat_channel_registers(device); + u64 status; + int r = 0; + + /* Wait until all operations complete. */ + for (;;) { + status = ioat_channel_status(registers); + if (status == IOAT_CHANSTS_DONE) + break; + + if (status == IOAT_CHANSTS_HALTED) { + ioat_handle_error(device); + return -1; + } + } + + /* Put the channel into the SUSPENDED state. */ + writeb(IOAT_CHANCMD_SUSPEND, registers + IOAT2_CHANCMD_OFFSET); + for (;;) { + status = ioat_channel_status(registers); + if (status == IOAT_CHANSTS_SUSPENDED) + break; + } + + return r; +} + +static void __ioat_memcpy_start(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, + u16 count, bool interrupt) +{ + void *registers = ioat_channel_registers(device); + struct ioat_state *ioat = to_ioat_state(device); + u64 desc_iova; + u16 chanctrl; + + desc_iova = to_iova(device, &ioat->desc); + ioat->desc = (struct ioat_dma_descriptor) { + .ctl_f.op = IOAT_OP_COPY, + .ctl_f.int_en = interrupt, + .src_addr = src, + .dst_addr = dst, + .size = size, + .next = desc_iova, + }; + + /* Tell the device the address of the descriptor. */ + writeq(desc_iova, registers + IOAT2_CHAINADDR_OFFSET); + + /* (Re)Enable the channel interrupt and abort on any errors */ + chanctrl = IOAT_CHANCTRL_INT_REARM | IOAT_CHANCTRL_ANY_ERR_ABORT_EN; + writew(chanctrl, registers + IOAT_CHANCTRL_OFFSET); + + /* Kick off @count DMA copy operation(s). */ + writew(count, registers + IOAT_CHAN_DMACOUNT_OFFSET); +} + +static void ioat_memcpy_start(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, + u64 count) +{ + __ioat_memcpy_start(device, src, dst, size, count, false); +} + +static void ioat_send_msi(struct vfio_pci_device *device) +{ + struct ioat_state *ioat = to_ioat_state(device); + + __ioat_memcpy_start(device, + to_iova(device, &ioat->send_msi_src), + to_iova(device, &ioat->send_msi_dst), + sizeof(ioat->send_msi_src), 1, true); + + VFIO_ASSERT_EQ(ioat_memcpy_wait(device), 0); +} + +const struct vfio_pci_driver_ops ioat_ops = { + .name = "ioat", + .probe = ioat_probe, + .init = ioat_init, + .remove = ioat_remove, + .memcpy_start = ioat_memcpy_start, + .memcpy_wait = ioat_memcpy_wait, + .send_msi = ioat_send_msi, +}; diff --git a/tools/testing/selftests/vfio/lib/drivers/ioat/registers.h b/tools/testing/selftests/vfio/lib/drivers/ioat/registers.h new file mode 120000 index 000000000000..0b809cfd8fe6 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/drivers/ioat/registers.h @@ -0,0 +1 @@ +../../../../../../../drivers/dma/ioat/registers.h
\ No newline at end of file diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h new file mode 100644 index 000000000000..ed31606e01b7 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/include/vfio_util.h @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H +#define SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H + +#include <fcntl.h> +#include <string.h> +#include <linux/vfio.h> +#include <linux/list.h> +#include <linux/pci_regs.h> + +#include "../../../kselftest.h" + +#define VFIO_LOG_AND_EXIT(...) do { \ + fprintf(stderr, " " __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(KSFT_FAIL); \ +} while (0) + +#define VFIO_ASSERT_OP(_lhs, _rhs, _op, ...) do { \ + typeof(_lhs) __lhs = (_lhs); \ + typeof(_rhs) __rhs = (_rhs); \ + \ + if (__lhs _op __rhs) \ + break; \ + \ + fprintf(stderr, "%s:%u: Assertion Failure\n\n", __FILE__, __LINE__); \ + fprintf(stderr, " Expression: " #_lhs " " #_op " " #_rhs "\n"); \ + fprintf(stderr, " Observed: %#lx %s %#lx\n", \ + (u64)__lhs, #_op, (u64)__rhs); \ + fprintf(stderr, " [errno: %d - %s]\n", errno, strerror(errno)); \ + VFIO_LOG_AND_EXIT(__VA_ARGS__); \ +} while (0) + +#define VFIO_ASSERT_EQ(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, ==, ##__VA_ARGS__) +#define VFIO_ASSERT_NE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, !=, ##__VA_ARGS__) +#define VFIO_ASSERT_LT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <, ##__VA_ARGS__) +#define VFIO_ASSERT_LE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <=, ##__VA_ARGS__) +#define VFIO_ASSERT_GT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >, ##__VA_ARGS__) +#define VFIO_ASSERT_GE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >=, ##__VA_ARGS__) +#define VFIO_ASSERT_TRUE(_a, ...) VFIO_ASSERT_NE(false, (_a), ##__VA_ARGS__) +#define VFIO_ASSERT_FALSE(_a, ...) VFIO_ASSERT_EQ(false, (_a), ##__VA_ARGS__) +#define VFIO_ASSERT_NULL(_a, ...) VFIO_ASSERT_EQ(NULL, _a, ##__VA_ARGS__) +#define VFIO_ASSERT_NOT_NULL(_a, ...) VFIO_ASSERT_NE(NULL, _a, ##__VA_ARGS__) + +#define VFIO_FAIL(_fmt, ...) do { \ + fprintf(stderr, "%s:%u: FAIL\n\n", __FILE__, __LINE__); \ + VFIO_LOG_AND_EXIT(_fmt, ##__VA_ARGS__); \ +} while (0) + +struct vfio_iommu_mode { + const char *name; + const char *container_path; + unsigned long iommu_type; +}; + +/* + * Generator for VFIO selftests fixture variants that replicate across all + * possible IOMMU modes. Tests must define FIXTURE_VARIANT_ADD_IOMMU_MODE() + * which should then use FIXTURE_VARIANT_ADD() to create the variant. + */ +#define FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(...) \ +FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1_iommu, ##__VA_ARGS__); \ +FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1v2_iommu, ##__VA_ARGS__); \ +FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1, ##__VA_ARGS__); \ +FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1v2, ##__VA_ARGS__); \ +FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd, ##__VA_ARGS__) + +struct vfio_pci_bar { + struct vfio_region_info info; + void *vaddr; +}; + +typedef u64 iova_t; + +#define INVALID_IOVA UINT64_MAX + +struct vfio_dma_region { + struct list_head link; + void *vaddr; + iova_t iova; + u64 size; +}; + +struct vfio_pci_device; + +struct vfio_pci_driver_ops { + const char *name; + + /** + * @probe() - Check if the driver supports the given device. + * + * Return: 0 on success, non-0 on failure. + */ + int (*probe)(struct vfio_pci_device *device); + + /** + * @init() - Initialize the driver for @device. + * + * Must be called after device->driver.region has been initialized. + */ + void (*init)(struct vfio_pci_device *device); + + /** + * remove() - Deinitialize the driver for @device. + */ + void (*remove)(struct vfio_pci_device *device); + + /** + * memcpy_start() - Kick off @count repeated memcpy operations from + * [@src, @src + @size) to [@dst, @dst + @size). + * + * Guarantees: + * - The device will attempt DMA reads on [src, src + size). + * - The device will attempt DMA writes on [dst, dst + size). + * - The device will not generate any interrupts. + * + * memcpy_start() returns immediately, it does not wait for the + * copies to complete. + */ + void (*memcpy_start)(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, u64 count); + + /** + * memcpy_wait() - Wait until the memcpy operations started by + * memcpy_start() have finished. + * + * Guarantees: + * - All in-flight DMAs initiated by memcpy_start() are fully complete + * before memcpy_wait() returns. + * + * Returns non-0 if the driver detects that an error occurred during the + * memcpy, 0 otherwise. + */ + int (*memcpy_wait)(struct vfio_pci_device *device); + + /** + * send_msi() - Make the device send the MSI device->driver.msi. + * + * Guarantees: + * - The device will send the MSI once. + */ + void (*send_msi)(struct vfio_pci_device *device); +}; + +struct vfio_pci_driver { + const struct vfio_pci_driver_ops *ops; + bool initialized; + bool memcpy_in_progress; + + /* Region to be used by the driver (e.g. for in-memory descriptors) */ + struct vfio_dma_region region; + + /* The maximum size that can be passed to memcpy_start(). */ + u64 max_memcpy_size; + + /* The maximum count that can be passed to memcpy_start(). */ + u64 max_memcpy_count; + + /* The MSI vector the device will signal in ops->send_msi(). */ + int msi; +}; + +struct vfio_pci_device { + int fd; + + const struct vfio_iommu_mode *iommu_mode; + int group_fd; + int container_fd; + + int iommufd; + u32 ioas_id; + + struct vfio_device_info info; + struct vfio_region_info config_space; + struct vfio_pci_bar bars[PCI_STD_NUM_BARS]; + + struct vfio_irq_info msi_info; + struct vfio_irq_info msix_info; + + struct list_head dma_regions; + + /* eventfds for MSI and MSI-x interrupts */ + int msi_eventfds[PCI_MSIX_FLAGS_QSIZE + 1]; + + struct vfio_pci_driver driver; +}; + +/* + * Return the BDF string of the device that the test should use. + * + * If a BDF string is provided by the user on the command line (as the last + * element of argv[]), then this function will return that and decrement argc + * by 1. + * + * Otherwise this function will attempt to use the environment variable + * $VFIO_SELFTESTS_BDF. + * + * If BDF cannot be determined then the test will exit with KSFT_SKIP. + */ +const char *vfio_selftests_get_bdf(int *argc, char *argv[]); +const char *vfio_pci_get_cdev_path(const char *bdf); + +extern const char *default_iommu_mode; + +struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode); +void vfio_pci_device_cleanup(struct vfio_pci_device *device); +void vfio_pci_device_reset(struct vfio_pci_device *device); + +void vfio_pci_dma_map(struct vfio_pci_device *device, + struct vfio_dma_region *region); +void vfio_pci_dma_unmap(struct vfio_pci_device *device, + struct vfio_dma_region *region); + +void vfio_pci_config_access(struct vfio_pci_device *device, bool write, + size_t config, size_t size, void *data); + +#define vfio_pci_config_read(_device, _offset, _type) ({ \ + _type __data; \ + vfio_pci_config_access((_device), false, _offset, sizeof(__data), &__data); \ + __data; \ +}) + +#define vfio_pci_config_readb(_d, _o) vfio_pci_config_read(_d, _o, u8) +#define vfio_pci_config_readw(_d, _o) vfio_pci_config_read(_d, _o, u16) +#define vfio_pci_config_readl(_d, _o) vfio_pci_config_read(_d, _o, u32) + +#define vfio_pci_config_write(_device, _offset, _value, _type) do { \ + _type __data = (_value); \ + vfio_pci_config_access((_device), true, _offset, sizeof(_type), &__data); \ +} while (0) + +#define vfio_pci_config_writeb(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u8) +#define vfio_pci_config_writew(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u16) +#define vfio_pci_config_writel(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u32) + +void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index, + u32 vector, int count); +void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index); +void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector); + +static inline void fcntl_set_nonblock(int fd) +{ + int r; + + r = fcntl(fd, F_GETFL, 0); + VFIO_ASSERT_NE(r, -1, "F_GETFL failed for fd %d\n", fd); + + r = fcntl(fd, F_SETFL, r | O_NONBLOCK); + VFIO_ASSERT_NE(r, -1, "F_SETFL O_NONBLOCK failed for fd %d\n", fd); +} + +static inline void vfio_pci_msi_enable(struct vfio_pci_device *device, + u32 vector, int count) +{ + vfio_pci_irq_enable(device, VFIO_PCI_MSI_IRQ_INDEX, vector, count); +} + +static inline void vfio_pci_msi_disable(struct vfio_pci_device *device) +{ + vfio_pci_irq_disable(device, VFIO_PCI_MSI_IRQ_INDEX); +} + +static inline void vfio_pci_msix_enable(struct vfio_pci_device *device, + u32 vector, int count) +{ + vfio_pci_irq_enable(device, VFIO_PCI_MSIX_IRQ_INDEX, vector, count); +} + +static inline void vfio_pci_msix_disable(struct vfio_pci_device *device) +{ + vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX); +} + +iova_t __to_iova(struct vfio_pci_device *device, void *vaddr); +iova_t to_iova(struct vfio_pci_device *device, void *vaddr); + +static inline bool vfio_pci_device_match(struct vfio_pci_device *device, + u16 vendor_id, u16 device_id) +{ + return (vendor_id == vfio_pci_config_readw(device, PCI_VENDOR_ID)) && + (device_id == vfio_pci_config_readw(device, PCI_DEVICE_ID)); +} + +void vfio_pci_driver_probe(struct vfio_pci_device *device); +void vfio_pci_driver_init(struct vfio_pci_device *device); +void vfio_pci_driver_remove(struct vfio_pci_device *device); +int vfio_pci_driver_memcpy(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size); +void vfio_pci_driver_memcpy_start(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, + u64 count); +int vfio_pci_driver_memcpy_wait(struct vfio_pci_device *device); +void vfio_pci_driver_send_msi(struct vfio_pci_device *device); + +#endif /* SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H */ diff --git a/tools/testing/selftests/vfio/lib/libvfio.mk b/tools/testing/selftests/vfio/lib/libvfio.mk new file mode 100644 index 000000000000..5d11c3a89a28 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/libvfio.mk @@ -0,0 +1,24 @@ +include $(top_srcdir)/scripts/subarch.include +ARCH ?= $(SUBARCH) + +VFIO_DIR := $(selfdir)/vfio + +LIBVFIO_C := lib/vfio_pci_device.c +LIBVFIO_C += lib/vfio_pci_driver.c + +ifeq ($(ARCH:x86_64=x86),x86) +LIBVFIO_C += lib/drivers/ioat/ioat.c +LIBVFIO_C += lib/drivers/dsa/dsa.c +endif + +LIBVFIO_O := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBVFIO_C)) + +LIBVFIO_O_DIRS := $(shell dirname $(LIBVFIO_O) | uniq) +$(shell mkdir -p $(LIBVFIO_O_DIRS)) + +CFLAGS += -I$(VFIO_DIR)/lib/include + +$(LIBVFIO_O): $(OUTPUT)/%.o : $(VFIO_DIR)/%.c + $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ + +EXTRA_CLEAN += $(LIBVFIO_O) diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c new file mode 100644 index 000000000000..0921b2451ba5 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <dirent.h> +#include <fcntl.h> +#include <libgen.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include <sys/eventfd.h> +#include <sys/ioctl.h> +#include <sys/mman.h> + +#include <uapi/linux/types.h> +#include <linux/limits.h> +#include <linux/mman.h> +#include <linux/types.h> +#include <linux/vfio.h> +#include <linux/iommufd.h> + +#include "../../../kselftest.h" +#include <vfio_util.h> + +#define PCI_SYSFS_PATH "/sys/bus/pci/devices" + +#define ioctl_assert(_fd, _op, _arg) do { \ + void *__arg = (_arg); \ + int __ret = ioctl((_fd), (_op), (__arg)); \ + VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \ +} while (0) + +iova_t __to_iova(struct vfio_pci_device *device, void *vaddr) +{ + struct vfio_dma_region *region; + + list_for_each_entry(region, &device->dma_regions, link) { + if (vaddr < region->vaddr) + continue; + + if (vaddr >= region->vaddr + region->size) + continue; + + return region->iova + (vaddr - region->vaddr); + } + + return INVALID_IOVA; +} + +iova_t to_iova(struct vfio_pci_device *device, void *vaddr) +{ + iova_t iova; + + iova = __to_iova(device, vaddr); + VFIO_ASSERT_NE(iova, INVALID_IOVA, "%p is not mapped into device.\n", vaddr); + + return iova; +} + +static void vfio_pci_irq_set(struct vfio_pci_device *device, + u32 index, u32 vector, u32 count, int *fds) +{ + u8 buf[sizeof(struct vfio_irq_set) + sizeof(int) * count] = {}; + struct vfio_irq_set *irq = (void *)&buf; + int *irq_fds = (void *)&irq->data; + + irq->argsz = sizeof(buf); + irq->flags = VFIO_IRQ_SET_ACTION_TRIGGER; + irq->index = index; + irq->start = vector; + irq->count = count; + + if (count) { + irq->flags |= VFIO_IRQ_SET_DATA_EVENTFD; + memcpy(irq_fds, fds, sizeof(int) * count); + } else { + irq->flags |= VFIO_IRQ_SET_DATA_NONE; + } + + ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, irq); +} + +void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector) +{ + struct vfio_irq_set irq = { + .argsz = sizeof(irq), + .flags = VFIO_IRQ_SET_ACTION_TRIGGER | VFIO_IRQ_SET_DATA_NONE, + .index = index, + .start = vector, + .count = 1, + }; + + ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, &irq); +} + +static void check_supported_irq_index(u32 index) +{ + /* VFIO selftests only supports MSI and MSI-x for now. */ + VFIO_ASSERT_TRUE(index == VFIO_PCI_MSI_IRQ_INDEX || + index == VFIO_PCI_MSIX_IRQ_INDEX, + "Unsupported IRQ index: %u\n", index); +} + +void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index, u32 vector, + int count) +{ + int i; + + check_supported_irq_index(index); + + for (i = vector; i < vector + count; i++) { + VFIO_ASSERT_LT(device->msi_eventfds[i], 0); + device->msi_eventfds[i] = eventfd(0, 0); + VFIO_ASSERT_GE(device->msi_eventfds[i], 0); + } + + vfio_pci_irq_set(device, index, vector, count, device->msi_eventfds + vector); +} + +void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index) +{ + int i; + + check_supported_irq_index(index); + + for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) { + if (device->msi_eventfds[i] < 0) + continue; + + VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0); + device->msi_eventfds[i] = -1; + } + + vfio_pci_irq_set(device, index, 0, 0, NULL); +} + +static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index, + struct vfio_irq_info *irq_info) +{ + irq_info->argsz = sizeof(*irq_info); + irq_info->index = index; + + ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info); +} + +static void vfio_iommu_dma_map(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + struct vfio_iommu_type1_dma_map args = { + .argsz = sizeof(args), + .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, + .vaddr = (u64)region->vaddr, + .iova = region->iova, + .size = region->size, + }; + + ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &args); +} + +static void iommufd_dma_map(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + struct iommu_ioas_map args = { + .size = sizeof(args), + .flags = IOMMU_IOAS_MAP_READABLE | + IOMMU_IOAS_MAP_WRITEABLE | + IOMMU_IOAS_MAP_FIXED_IOVA, + .user_va = (u64)region->vaddr, + .iova = region->iova, + .length = region->size, + .ioas_id = device->ioas_id, + }; + + ioctl_assert(device->iommufd, IOMMU_IOAS_MAP, &args); +} + +void vfio_pci_dma_map(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + if (device->iommufd) + iommufd_dma_map(device, region); + else + vfio_iommu_dma_map(device, region); + + list_add(®ion->link, &device->dma_regions); +} + +static void vfio_iommu_dma_unmap(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + struct vfio_iommu_type1_dma_unmap args = { + .argsz = sizeof(args), + .iova = region->iova, + .size = region->size, + }; + + ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &args); +} + +static void iommufd_dma_unmap(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + struct iommu_ioas_unmap args = { + .size = sizeof(args), + .iova = region->iova, + .length = region->size, + .ioas_id = device->ioas_id, + }; + + ioctl_assert(device->iommufd, IOMMU_IOAS_UNMAP, &args); +} + +void vfio_pci_dma_unmap(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + if (device->iommufd) + iommufd_dma_unmap(device, region); + else + vfio_iommu_dma_unmap(device, region); + + list_del(®ion->link); +} + +static void vfio_pci_region_get(struct vfio_pci_device *device, int index, + struct vfio_region_info *info) +{ + memset(info, 0, sizeof(*info)); + + info->argsz = sizeof(*info); + info->index = index; + + ioctl_assert(device->fd, VFIO_DEVICE_GET_REGION_INFO, info); +} + +static void vfio_pci_bar_map(struct vfio_pci_device *device, int index) +{ + struct vfio_pci_bar *bar = &device->bars[index]; + int prot = 0; + + VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS); + VFIO_ASSERT_NULL(bar->vaddr); + VFIO_ASSERT_TRUE(bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP); + + if (bar->info.flags & VFIO_REGION_INFO_FLAG_READ) + prot |= PROT_READ; + if (bar->info.flags & VFIO_REGION_INFO_FLAG_WRITE) + prot |= PROT_WRITE; + + bar->vaddr = mmap(NULL, bar->info.size, prot, MAP_FILE | MAP_SHARED, + device->fd, bar->info.offset); + VFIO_ASSERT_NE(bar->vaddr, MAP_FAILED); +} + +static void vfio_pci_bar_unmap(struct vfio_pci_device *device, int index) +{ + struct vfio_pci_bar *bar = &device->bars[index]; + + VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS); + VFIO_ASSERT_NOT_NULL(bar->vaddr); + + VFIO_ASSERT_EQ(munmap(bar->vaddr, bar->info.size), 0); + bar->vaddr = NULL; +} + +static void vfio_pci_bar_unmap_all(struct vfio_pci_device *device) +{ + int i; + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (device->bars[i].vaddr) + vfio_pci_bar_unmap(device, i); + } +} + +void vfio_pci_config_access(struct vfio_pci_device *device, bool write, + size_t config, size_t size, void *data) +{ + struct vfio_region_info *config_space = &device->config_space; + int ret; + + if (write) + ret = pwrite(device->fd, data, size, config_space->offset + config); + else + ret = pread(device->fd, data, size, config_space->offset + config); + + VFIO_ASSERT_EQ(ret, size, "Failed to %s PCI config space: 0x%lx\n", + write ? "write to" : "read from", config); +} + +void vfio_pci_device_reset(struct vfio_pci_device *device) +{ + ioctl_assert(device->fd, VFIO_DEVICE_RESET, NULL); +} + +static unsigned int vfio_pci_get_group_from_dev(const char *bdf) +{ + char dev_iommu_group_path[PATH_MAX] = {0}; + char sysfs_path[PATH_MAX] = {0}; + unsigned int group; + int ret; + + snprintf(sysfs_path, PATH_MAX, "%s/%s/iommu_group", PCI_SYSFS_PATH, bdf); + + ret = readlink(sysfs_path, dev_iommu_group_path, sizeof(dev_iommu_group_path)); + VFIO_ASSERT_NE(ret, -1, "Failed to get the IOMMU group for device: %s\n", bdf); + + ret = sscanf(basename(dev_iommu_group_path), "%u", &group); + VFIO_ASSERT_EQ(ret, 1, "Failed to get the IOMMU group for device: %s\n", bdf); + + return group; +} + +static void vfio_pci_group_setup(struct vfio_pci_device *device, const char *bdf) +{ + struct vfio_group_status group_status = { + .argsz = sizeof(group_status), + }; + char group_path[32]; + int group; + + group = vfio_pci_get_group_from_dev(bdf); + snprintf(group_path, sizeof(group_path), "/dev/vfio/%d", group); + + device->group_fd = open(group_path, O_RDWR); + VFIO_ASSERT_GE(device->group_fd, 0, "open(%s) failed\n", group_path); + + ioctl_assert(device->group_fd, VFIO_GROUP_GET_STATUS, &group_status); + VFIO_ASSERT_TRUE(group_status.flags & VFIO_GROUP_FLAGS_VIABLE); + + ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->container_fd); +} + +static void vfio_pci_container_setup(struct vfio_pci_device *device, const char *bdf) +{ + unsigned long iommu_type = device->iommu_mode->iommu_type; + const char *path = device->iommu_mode->container_path; + int version; + int ret; + + device->container_fd = open(path, O_RDWR); + VFIO_ASSERT_GE(device->container_fd, 0, "open(%s) failed\n", path); + + version = ioctl(device->container_fd, VFIO_GET_API_VERSION); + VFIO_ASSERT_EQ(version, VFIO_API_VERSION, "Unsupported version: %d\n", version); + + vfio_pci_group_setup(device, bdf); + + ret = ioctl(device->container_fd, VFIO_CHECK_EXTENSION, iommu_type); + VFIO_ASSERT_GT(ret, 0, "VFIO IOMMU type %lu not supported\n", iommu_type); + + ioctl_assert(device->container_fd, VFIO_SET_IOMMU, (void *)iommu_type); + + device->fd = ioctl(device->group_fd, VFIO_GROUP_GET_DEVICE_FD, bdf); + VFIO_ASSERT_GE(device->fd, 0); +} + +static void vfio_pci_device_setup(struct vfio_pci_device *device) +{ + int i; + + device->info.argsz = sizeof(device->info); + ioctl_assert(device->fd, VFIO_DEVICE_GET_INFO, &device->info); + + vfio_pci_region_get(device, VFIO_PCI_CONFIG_REGION_INDEX, &device->config_space); + + /* Sanity check VFIO does not advertise mmap for config space */ + VFIO_ASSERT_TRUE(!(device->config_space.flags & VFIO_REGION_INFO_FLAG_MMAP), + "PCI config space should not support mmap()\n"); + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + struct vfio_pci_bar *bar = device->bars + i; + + vfio_pci_region_get(device, i, &bar->info); + if (bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP) + vfio_pci_bar_map(device, i); + } + + vfio_pci_irq_get(device, VFIO_PCI_MSI_IRQ_INDEX, &device->msi_info); + vfio_pci_irq_get(device, VFIO_PCI_MSIX_IRQ_INDEX, &device->msix_info); + + for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) + device->msi_eventfds[i] = -1; +} + +const char *vfio_pci_get_cdev_path(const char *bdf) +{ + char dir_path[PATH_MAX]; + struct dirent *entry; + char *cdev_path; + DIR *dir; + + cdev_path = calloc(PATH_MAX, 1); + VFIO_ASSERT_NOT_NULL(cdev_path); + + snprintf(dir_path, sizeof(dir_path), "/sys/bus/pci/devices/%s/vfio-dev/", bdf); + + dir = opendir(dir_path); + VFIO_ASSERT_NOT_NULL(dir, "Failed to open directory %s\n", dir_path); + + while ((entry = readdir(dir)) != NULL) { + /* Find the file that starts with "vfio" */ + if (strncmp("vfio", entry->d_name, 4)) + continue; + + snprintf(cdev_path, PATH_MAX, "/dev/vfio/devices/%s", entry->d_name); + break; + } + + VFIO_ASSERT_NE(cdev_path[0], 0, "Failed to find vfio cdev file.\n"); + VFIO_ASSERT_EQ(closedir(dir), 0); + + return cdev_path; +} + +/* Reminder: Keep in sync with FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(). */ +static const struct vfio_iommu_mode iommu_modes[] = { + { + .name = "vfio_type1_iommu", + .container_path = "/dev/vfio/vfio", + .iommu_type = VFIO_TYPE1_IOMMU, + }, + { + .name = "vfio_type1v2_iommu", + .container_path = "/dev/vfio/vfio", + .iommu_type = VFIO_TYPE1v2_IOMMU, + }, + { + .name = "iommufd_compat_type1", + .container_path = "/dev/iommu", + .iommu_type = VFIO_TYPE1_IOMMU, + }, + { + .name = "iommufd_compat_type1v2", + .container_path = "/dev/iommu", + .iommu_type = VFIO_TYPE1v2_IOMMU, + }, + { + .name = "iommufd", + }, +}; + +const char *default_iommu_mode = "iommufd"; + +static const struct vfio_iommu_mode *lookup_iommu_mode(const char *iommu_mode) +{ + int i; + + if (!iommu_mode) + iommu_mode = default_iommu_mode; + + for (i = 0; i < ARRAY_SIZE(iommu_modes); i++) { + if (strcmp(iommu_mode, iommu_modes[i].name)) + continue; + + return &iommu_modes[i]; + } + + VFIO_FAIL("Unrecognized IOMMU mode: %s\n", iommu_mode); +} + +static void vfio_device_bind_iommufd(int device_fd, int iommufd) +{ + struct vfio_device_bind_iommufd args = { + .argsz = sizeof(args), + .iommufd = iommufd, + }; + + ioctl_assert(device_fd, VFIO_DEVICE_BIND_IOMMUFD, &args); +} + +static u32 iommufd_ioas_alloc(int iommufd) +{ + struct iommu_ioas_alloc args = { + .size = sizeof(args), + }; + + ioctl_assert(iommufd, IOMMU_IOAS_ALLOC, &args); + return args.out_ioas_id; +} + +static void vfio_device_attach_iommufd_pt(int device_fd, u32 pt_id) +{ + struct vfio_device_attach_iommufd_pt args = { + .argsz = sizeof(args), + .pt_id = pt_id, + }; + + ioctl_assert(device_fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &args); +} + +static void vfio_pci_iommufd_setup(struct vfio_pci_device *device, const char *bdf) +{ + const char *cdev_path = vfio_pci_get_cdev_path(bdf); + + device->fd = open(cdev_path, O_RDWR); + VFIO_ASSERT_GE(device->fd, 0); + free((void *)cdev_path); + + /* + * Require device->iommufd to be >0 so that a simple non-0 check can be + * used to check if iommufd is enabled. In practice open() will never + * return 0 unless stdin is closed. + */ + device->iommufd = open("/dev/iommu", O_RDWR); + VFIO_ASSERT_GT(device->iommufd, 0); + + vfio_device_bind_iommufd(device->fd, device->iommufd); + device->ioas_id = iommufd_ioas_alloc(device->iommufd); + vfio_device_attach_iommufd_pt(device->fd, device->ioas_id); +} + +struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode) +{ + struct vfio_pci_device *device; + + device = calloc(1, sizeof(*device)); + VFIO_ASSERT_NOT_NULL(device); + + INIT_LIST_HEAD(&device->dma_regions); + + device->iommu_mode = lookup_iommu_mode(iommu_mode); + + if (device->iommu_mode->container_path) + vfio_pci_container_setup(device, bdf); + else + vfio_pci_iommufd_setup(device, bdf); + + vfio_pci_device_setup(device); + vfio_pci_driver_probe(device); + + return device; +} + +void vfio_pci_device_cleanup(struct vfio_pci_device *device) +{ + int i; + + if (device->driver.initialized) + vfio_pci_driver_remove(device); + + vfio_pci_bar_unmap_all(device); + + VFIO_ASSERT_EQ(close(device->fd), 0); + + for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) { + if (device->msi_eventfds[i] < 0) + continue; + + VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0); + } + + if (device->iommufd) { + VFIO_ASSERT_EQ(close(device->iommufd), 0); + } else { + VFIO_ASSERT_EQ(close(device->group_fd), 0); + VFIO_ASSERT_EQ(close(device->container_fd), 0); + } + + free(device); +} + +static bool is_bdf(const char *str) +{ + unsigned int s, b, d, f; + int length, count; + + count = sscanf(str, "%4x:%2x:%2x.%2x%n", &s, &b, &d, &f, &length); + return count == 4 && length == strlen(str); +} + +const char *vfio_selftests_get_bdf(int *argc, char *argv[]) +{ + char *bdf; + + if (*argc > 1 && is_bdf(argv[*argc - 1])) + return argv[--(*argc)]; + + bdf = getenv("VFIO_SELFTESTS_BDF"); + if (bdf) { + VFIO_ASSERT_TRUE(is_bdf(bdf), "Invalid BDF: %s\n", bdf); + return bdf; + } + + fprintf(stderr, "Unable to determine which device to use, skipping test.\n"); + fprintf(stderr, "\n"); + fprintf(stderr, "To pass the device address via environment variable:\n"); + fprintf(stderr, "\n"); + fprintf(stderr, " export VFIO_SELFTESTS_BDF=segment:bus:device.function\n"); + fprintf(stderr, " %s [options]\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "To pass the device address via argv:\n"); + fprintf(stderr, "\n"); + fprintf(stderr, " %s [options] segment:bus:device.function\n", argv[0]); + fprintf(stderr, "\n"); + exit(KSFT_SKIP); +} diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_driver.c b/tools/testing/selftests/vfio/lib/vfio_pci_driver.c new file mode 100644 index 000000000000..e5e8723ecb41 --- /dev/null +++ b/tools/testing/selftests/vfio/lib/vfio_pci_driver.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <stdio.h> + +#include "../../../kselftest.h" +#include <vfio_util.h> + +#ifdef __x86_64__ +extern struct vfio_pci_driver_ops dsa_ops; +extern struct vfio_pci_driver_ops ioat_ops; +#endif + +static struct vfio_pci_driver_ops *driver_ops[] = { +#ifdef __x86_64__ + &dsa_ops, + &ioat_ops, +#endif +}; + +void vfio_pci_driver_probe(struct vfio_pci_device *device) +{ + struct vfio_pci_driver_ops *ops; + int i; + + VFIO_ASSERT_NULL(device->driver.ops); + + for (i = 0; i < ARRAY_SIZE(driver_ops); i++) { + ops = driver_ops[i]; + + if (ops->probe(device)) + continue; + + printf("Driver found: %s\n", ops->name); + device->driver.ops = ops; + } +} + +static void vfio_check_driver_op(struct vfio_pci_driver *driver, void *op, + const char *op_name) +{ + VFIO_ASSERT_NOT_NULL(driver->ops); + VFIO_ASSERT_NOT_NULL(op, "Driver has no %s()\n", op_name); + VFIO_ASSERT_EQ(driver->initialized, op != driver->ops->init); + VFIO_ASSERT_EQ(driver->memcpy_in_progress, op == driver->ops->memcpy_wait); +} + +#define VFIO_CHECK_DRIVER_OP(_driver, _op) do { \ + struct vfio_pci_driver *__driver = (_driver); \ + vfio_check_driver_op(__driver, __driver->ops->_op, #_op); \ +} while (0) + +void vfio_pci_driver_init(struct vfio_pci_device *device) +{ + struct vfio_pci_driver *driver = &device->driver; + + VFIO_ASSERT_NOT_NULL(driver->region.vaddr); + VFIO_CHECK_DRIVER_OP(driver, init); + + driver->ops->init(device); + + driver->initialized = true; + + printf("%s: region: vaddr %p, iova 0x%lx, size 0x%lx\n", + driver->ops->name, + driver->region.vaddr, + driver->region.iova, + driver->region.size); + + printf("%s: max_memcpy_size 0x%lx, max_memcpy_count 0x%lx\n", + driver->ops->name, + driver->max_memcpy_size, + driver->max_memcpy_count); +} + +void vfio_pci_driver_remove(struct vfio_pci_device *device) +{ + struct vfio_pci_driver *driver = &device->driver; + + VFIO_CHECK_DRIVER_OP(driver, remove); + + driver->ops->remove(device); + driver->initialized = false; +} + +void vfio_pci_driver_send_msi(struct vfio_pci_device *device) +{ + struct vfio_pci_driver *driver = &device->driver; + + VFIO_CHECK_DRIVER_OP(driver, send_msi); + + driver->ops->send_msi(device); +} + +void vfio_pci_driver_memcpy_start(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size, + u64 count) +{ + struct vfio_pci_driver *driver = &device->driver; + + VFIO_ASSERT_LE(size, driver->max_memcpy_size); + VFIO_ASSERT_LE(count, driver->max_memcpy_count); + VFIO_CHECK_DRIVER_OP(driver, memcpy_start); + + driver->ops->memcpy_start(device, src, dst, size, count); + driver->memcpy_in_progress = true; +} + +int vfio_pci_driver_memcpy_wait(struct vfio_pci_device *device) +{ + struct vfio_pci_driver *driver = &device->driver; + int r; + + VFIO_CHECK_DRIVER_OP(driver, memcpy_wait); + + r = driver->ops->memcpy_wait(device); + driver->memcpy_in_progress = false; + + return r; +} + +int vfio_pci_driver_memcpy(struct vfio_pci_device *device, + iova_t src, iova_t dst, u64 size) +{ + vfio_pci_driver_memcpy_start(device, src, dst, size, 1); + + return vfio_pci_driver_memcpy_wait(device); +} diff --git a/tools/testing/selftests/vfio/run.sh b/tools/testing/selftests/vfio/run.sh new file mode 100755 index 000000000000..0476b6d7adc3 --- /dev/null +++ b/tools/testing/selftests/vfio/run.sh @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +# Global variables initialized in main() and then used during cleanup() when +# the script exits. +declare DEVICE_BDF +declare NEW_DRIVER +declare OLD_DRIVER +declare OLD_NUMVFS +declare DRIVER_OVERRIDE + +function write_to() { + # Unfortunately set -x does not show redirects so use echo to manually + # tell the user what commands are being run. + echo "+ echo \"${2}\" > ${1}" + echo "${2}" > ${1} +} + +function bind() { + write_to /sys/bus/pci/drivers/${2}/bind ${1} +} + +function unbind() { + write_to /sys/bus/pci/drivers/${2}/unbind ${1} +} + +function set_sriov_numvfs() { + write_to /sys/bus/pci/devices/${1}/sriov_numvfs ${2} +} + +function set_driver_override() { + write_to /sys/bus/pci/devices/${1}/driver_override ${2} +} + +function clear_driver_override() { + set_driver_override ${1} "" +} + +function cleanup() { + if [ "${NEW_DRIVER}" ]; then unbind ${DEVICE_BDF} ${NEW_DRIVER} ; fi + if [ "${DRIVER_OVERRIDE}" ]; then clear_driver_override ${DEVICE_BDF} ; fi + if [ "${OLD_DRIVER}" ]; then bind ${DEVICE_BDF} ${OLD_DRIVER} ; fi + if [ "${OLD_NUMVFS}" ]; then set_sriov_numvfs ${DEVICE_BDF} ${OLD_NUMVFS} ; fi +} + +function usage() { + echo "usage: $0 [-d segment:bus:device.function] [-s] [-h] [cmd ...]" >&2 + echo >&2 + echo " -d: The BDF of the device to use for the test (required)" >&2 + echo " -h: Show this help message" >&2 + echo " -s: Drop into a shell rather than running a command" >&2 + echo >&2 + echo " cmd: The command to run and arguments to pass to it." >&2 + echo " Required when not using -s. The SBDF will be " >&2 + echo " appended to the argument list." >&2 + exit 1 +} + +function main() { + local shell + + while getopts "d:hs" opt; do + case $opt in + d) DEVICE_BDF="$OPTARG" ;; + s) shell=true ;; + *) usage ;; + esac + done + + # Shift past all optional arguments. + shift $((OPTIND - 1)) + + # Check that the user passed in the command to run. + [ ! "${shell}" ] && [ $# = 0 ] && usage + + # Check that the user passed in a BDF. + [ "${DEVICE_BDF}" ] || usage + + trap cleanup EXIT + set -e + + test -d /sys/bus/pci/devices/${DEVICE_BDF} + + if [ -f /sys/bus/pci/devices/${DEVICE_BDF}/sriov_numvfs ]; then + OLD_NUMVFS=$(cat /sys/bus/pci/devices/${DEVICE_BDF}/sriov_numvfs) + set_sriov_numvfs ${DEVICE_BDF} 0 + fi + + if [ -L /sys/bus/pci/devices/${DEVICE_BDF}/driver ]; then + OLD_DRIVER=$(basename $(readlink -m /sys/bus/pci/devices/${DEVICE_BDF}/driver)) + unbind ${DEVICE_BDF} ${OLD_DRIVER} + fi + + set_driver_override ${DEVICE_BDF} vfio-pci + DRIVER_OVERRIDE=true + + bind ${DEVICE_BDF} vfio-pci + NEW_DRIVER=vfio-pci + + echo + if [ "${shell}" ]; then + echo "Dropping into ${SHELL} with VFIO_SELFTESTS_BDF=${DEVICE_BDF}" + VFIO_SELFTESTS_BDF=${DEVICE_BDF} ${SHELL} + else + "$@" ${DEVICE_BDF} + fi + echo +} + +main "$@" diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c new file mode 100644 index 000000000000..ab19c54a774d --- /dev/null +++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <stdio.h> +#include <sys/mman.h> +#include <unistd.h> + +#include <linux/limits.h> +#include <linux/mman.h> +#include <linux/sizes.h> +#include <linux/vfio.h> + +#include <vfio_util.h> + +#include "../kselftest_harness.h" + +static const char *device_bdf; + +struct iommu_mapping { + u64 pgd; + u64 p4d; + u64 pud; + u64 pmd; + u64 pte; +}; + +static void parse_next_value(char **line, u64 *value) +{ + char *token; + + token = strtok_r(*line, " \t|\n", line); + if (!token) + return; + + /* Caller verifies `value`. No need to check return value. */ + sscanf(token, "0x%lx", value); +} + +static int intel_iommu_mapping_get(const char *bdf, u64 iova, + struct iommu_mapping *mapping) +{ + char iommu_mapping_path[PATH_MAX], line[PATH_MAX]; + u64 line_iova = -1; + int ret = -ENOENT; + FILE *file; + char *rest; + + snprintf(iommu_mapping_path, sizeof(iommu_mapping_path), + "/sys/kernel/debug/iommu/intel/%s/domain_translation_struct", + bdf); + + printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path); + + file = fopen(iommu_mapping_path, "r"); + VFIO_ASSERT_NOT_NULL(file, "fopen(%s) failed", iommu_mapping_path); + + while (fgets(line, sizeof(line), file)) { + rest = line; + + parse_next_value(&rest, &line_iova); + if (line_iova != (iova / getpagesize())) + continue; + + /* + * Ensure each struct field is initialized in case of empty + * page table values. + */ + memset(mapping, 0, sizeof(*mapping)); + parse_next_value(&rest, &mapping->pgd); + parse_next_value(&rest, &mapping->p4d); + parse_next_value(&rest, &mapping->pud); + parse_next_value(&rest, &mapping->pmd); + parse_next_value(&rest, &mapping->pte); + + ret = 0; + break; + } + + fclose(file); + + if (ret) + printf("IOVA not found\n"); + + return ret; +} + +static int iommu_mapping_get(const char *bdf, u64 iova, + struct iommu_mapping *mapping) +{ + if (!access("/sys/kernel/debug/iommu/intel", F_OK)) + return intel_iommu_mapping_get(bdf, iova, mapping); + + return -EOPNOTSUPP; +} + +FIXTURE(vfio_dma_mapping_test) { + struct vfio_pci_device *device; +}; + +FIXTURE_VARIANT(vfio_dma_mapping_test) { + const char *iommu_mode; + u64 size; + int mmap_flags; +}; + +#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode, _name, _size, _mmap_flags) \ +FIXTURE_VARIANT_ADD(vfio_dma_mapping_test, _iommu_mode ## _ ## _name) { \ + .iommu_mode = #_iommu_mode, \ + .size = (_size), \ + .mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE | (_mmap_flags), \ +} + +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0); +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB); +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); + +FIXTURE_SETUP(vfio_dma_mapping_test) +{ + self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); +} + +FIXTURE_TEARDOWN(vfio_dma_mapping_test) +{ + vfio_pci_device_cleanup(self->device); +} + +TEST_F(vfio_dma_mapping_test, dma_map_unmap) +{ + const u64 size = variant->size ?: getpagesize(); + const int flags = variant->mmap_flags; + struct vfio_dma_region region; + struct iommu_mapping mapping; + u64 mapping_size = size; + int rc; + + region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); + + /* Skip the test if there aren't enough HugeTLB pages available. */ + if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED) + SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno); + else + ASSERT_NE(region.vaddr, MAP_FAILED); + + region.iova = (u64)region.vaddr; + region.size = size; + + vfio_pci_dma_map(self->device, ®ion); + printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova); + + ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr)); + + rc = iommu_mapping_get(device_bdf, region.iova, &mapping); + if (rc == -EOPNOTSUPP) + goto unmap; + + /* + * IOMMUFD compatibility-mode does not support huge mappings when + * using VFIO_TYPE1_IOMMU. + */ + if (!strcmp(variant->iommu_mode, "iommufd_compat_type1")) + mapping_size = SZ_4K; + + ASSERT_EQ(0, rc); + printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova); + printf("PGD: 0x%016lx\n", mapping.pgd); + printf("P4D: 0x%016lx\n", mapping.p4d); + printf("PUD: 0x%016lx\n", mapping.pud); + printf("PMD: 0x%016lx\n", mapping.pmd); + printf("PTE: 0x%016lx\n", mapping.pte); + + switch (mapping_size) { + case SZ_4K: + ASSERT_NE(0, mapping.pte); + break; + case SZ_2M: + ASSERT_EQ(0, mapping.pte); + ASSERT_NE(0, mapping.pmd); + break; + case SZ_1G: + ASSERT_EQ(0, mapping.pte); + ASSERT_EQ(0, mapping.pmd); + ASSERT_NE(0, mapping.pud); + break; + default: + VFIO_FAIL("Unrecognized size: 0x%lx\n", mapping_size); + } + +unmap: + vfio_pci_dma_unmap(self->device, ®ion); + printf("Unmapped IOVA 0x%lx\n", region.iova); + ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr)); + ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); + + ASSERT_TRUE(!munmap(region.vaddr, size)); +} + +int main(int argc, char *argv[]) +{ + device_bdf = vfio_selftests_get_bdf(&argc, argv); + return test_harness_run(argc, argv); +} diff --git a/tools/testing/selftests/vfio/vfio_iommufd_setup_test.c b/tools/testing/selftests/vfio/vfio_iommufd_setup_test.c new file mode 100644 index 000000000000..3655106b912d --- /dev/null +++ b/tools/testing/selftests/vfio/vfio_iommufd_setup_test.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <uapi/linux/types.h> +#include <linux/limits.h> +#include <linux/sizes.h> +#include <linux/vfio.h> +#include <linux/iommufd.h> + +#include <stdint.h> +#include <stdio.h> +#include <sys/ioctl.h> +#include <unistd.h> + +#include <vfio_util.h> +#include "../kselftest_harness.h" + +static const char iommu_dev_path[] = "/dev/iommu"; +static const char *cdev_path; + +static int vfio_device_bind_iommufd_ioctl(int cdev_fd, int iommufd) +{ + struct vfio_device_bind_iommufd bind_args = { + .argsz = sizeof(bind_args), + .iommufd = iommufd, + }; + + return ioctl(cdev_fd, VFIO_DEVICE_BIND_IOMMUFD, &bind_args); +} + +static int vfio_device_get_info_ioctl(int cdev_fd) +{ + struct vfio_device_info info_args = { .argsz = sizeof(info_args) }; + + return ioctl(cdev_fd, VFIO_DEVICE_GET_INFO, &info_args); +} + +static int vfio_device_ioas_alloc_ioctl(int iommufd, struct iommu_ioas_alloc *alloc_args) +{ + *alloc_args = (struct iommu_ioas_alloc){ + .size = sizeof(struct iommu_ioas_alloc), + }; + + return ioctl(iommufd, IOMMU_IOAS_ALLOC, alloc_args); +} + +static int vfio_device_attach_iommufd_pt_ioctl(int cdev_fd, u32 pt_id) +{ + struct vfio_device_attach_iommufd_pt attach_args = { + .argsz = sizeof(attach_args), + .pt_id = pt_id, + }; + + return ioctl(cdev_fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_args); +} + +static int vfio_device_detach_iommufd_pt_ioctl(int cdev_fd) +{ + struct vfio_device_detach_iommufd_pt detach_args = { + .argsz = sizeof(detach_args), + }; + + return ioctl(cdev_fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_args); +} + +FIXTURE(vfio_cdev) { + int cdev_fd; + int iommufd; +}; + +FIXTURE_SETUP(vfio_cdev) +{ + ASSERT_LE(0, (self->cdev_fd = open(cdev_path, O_RDWR, 0))); + ASSERT_LE(0, (self->iommufd = open(iommu_dev_path, O_RDWR, 0))); +} + +FIXTURE_TEARDOWN(vfio_cdev) +{ + ASSERT_EQ(0, close(self->cdev_fd)); + ASSERT_EQ(0, close(self->iommufd)); +} + +TEST_F(vfio_cdev, bind) +{ + ASSERT_EQ(0, vfio_device_bind_iommufd_ioctl(self->cdev_fd, self->iommufd)); + ASSERT_EQ(0, vfio_device_get_info_ioctl(self->cdev_fd)); +} + +TEST_F(vfio_cdev, get_info_without_bind_fails) +{ + ASSERT_NE(0, vfio_device_get_info_ioctl(self->cdev_fd)); +} + +TEST_F(vfio_cdev, bind_bad_iommufd_fails) +{ + ASSERT_NE(0, vfio_device_bind_iommufd_ioctl(self->cdev_fd, -2)); +} + +TEST_F(vfio_cdev, repeated_bind_fails) +{ + ASSERT_EQ(0, vfio_device_bind_iommufd_ioctl(self->cdev_fd, self->iommufd)); + ASSERT_NE(0, vfio_device_bind_iommufd_ioctl(self->cdev_fd, self->iommufd)); +} + +TEST_F(vfio_cdev, attach_detatch_pt) +{ + struct iommu_ioas_alloc alloc_args; + + ASSERT_EQ(0, vfio_device_bind_iommufd_ioctl(self->cdev_fd, self->iommufd)); + ASSERT_EQ(0, vfio_device_ioas_alloc_ioctl(self->iommufd, &alloc_args)); + ASSERT_EQ(0, vfio_device_attach_iommufd_pt_ioctl(self->cdev_fd, alloc_args.out_ioas_id)); + ASSERT_EQ(0, vfio_device_detach_iommufd_pt_ioctl(self->cdev_fd)); +} + +TEST_F(vfio_cdev, attach_invalid_pt_fails) +{ + ASSERT_EQ(0, vfio_device_bind_iommufd_ioctl(self->cdev_fd, self->iommufd)); + ASSERT_NE(0, vfio_device_attach_iommufd_pt_ioctl(self->cdev_fd, UINT32_MAX)); +} + +int main(int argc, char *argv[]) +{ + const char *device_bdf = vfio_selftests_get_bdf(&argc, argv); + + cdev_path = vfio_pci_get_cdev_path(device_bdf); + printf("Using cdev device %s\n", cdev_path); + + return test_harness_run(argc, argv); +} diff --git a/tools/testing/selftests/vfio/vfio_pci_device_test.c b/tools/testing/selftests/vfio/vfio_pci_device_test.c new file mode 100644 index 000000000000..7a270698e4d2 --- /dev/null +++ b/tools/testing/selftests/vfio/vfio_pci_device_test.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <fcntl.h> +#include <stdlib.h> + +#include <sys/ioctl.h> +#include <sys/mman.h> + +#include <linux/limits.h> +#include <linux/pci_regs.h> +#include <linux/sizes.h> +#include <linux/vfio.h> + +#include <vfio_util.h> + +#include "../kselftest_harness.h" + +static const char *device_bdf; + +/* + * Limit the number of MSIs enabled/disabled by the test regardless of the + * number of MSIs the device itself supports, e.g. to avoid hitting IRTE limits. + */ +#define MAX_TEST_MSI 16U + +FIXTURE(vfio_pci_device_test) { + struct vfio_pci_device *device; +}; + +FIXTURE_SETUP(vfio_pci_device_test) +{ + self->device = vfio_pci_device_init(device_bdf, default_iommu_mode); +} + +FIXTURE_TEARDOWN(vfio_pci_device_test) +{ + vfio_pci_device_cleanup(self->device); +} + +#define read_pci_id_from_sysfs(_file) ({ \ + char __sysfs_path[PATH_MAX]; \ + char __buf[32]; \ + int __fd; \ + \ + snprintf(__sysfs_path, PATH_MAX, "/sys/bus/pci/devices/%s/%s", device_bdf, _file); \ + ASSERT_GT((__fd = open(__sysfs_path, O_RDONLY)), 0); \ + ASSERT_GT(read(__fd, __buf, ARRAY_SIZE(__buf)), 0); \ + ASSERT_EQ(0, close(__fd)); \ + (u16)strtoul(__buf, NULL, 0); \ +}) + +TEST_F(vfio_pci_device_test, config_space_read_write) +{ + u16 vendor, device; + u16 command; + + /* Check that Vendor and Device match what the kernel reports. */ + vendor = read_pci_id_from_sysfs("vendor"); + device = read_pci_id_from_sysfs("device"); + ASSERT_TRUE(vfio_pci_device_match(self->device, vendor, device)); + + printf("Vendor: %04x, Device: %04x\n", vendor, device); + + command = vfio_pci_config_readw(self->device, PCI_COMMAND); + ASSERT_FALSE(command & PCI_COMMAND_MASTER); + + vfio_pci_config_writew(self->device, PCI_COMMAND, command | PCI_COMMAND_MASTER); + command = vfio_pci_config_readw(self->device, PCI_COMMAND); + ASSERT_TRUE(command & PCI_COMMAND_MASTER); + printf("Enabled Bus Mastering (command: %04x)\n", command); + + vfio_pci_config_writew(self->device, PCI_COMMAND, command & ~PCI_COMMAND_MASTER); + command = vfio_pci_config_readw(self->device, PCI_COMMAND); + ASSERT_FALSE(command & PCI_COMMAND_MASTER); + printf("Disabled Bus Mastering (command: %04x)\n", command); +} + +TEST_F(vfio_pci_device_test, validate_bars) +{ + struct vfio_pci_bar *bar; + int i; + + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + bar = &self->device->bars[i]; + + if (!(bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP)) { + printf("BAR %d does not support mmap()\n", i); + ASSERT_EQ(NULL, bar->vaddr); + continue; + } + + /* + * BARs that support mmap() should be automatically mapped by + * vfio_pci_device_init(). + */ + ASSERT_NE(NULL, bar->vaddr); + ASSERT_NE(0, bar->info.size); + printf("BAR %d mapped at %p (size 0x%llx)\n", i, bar->vaddr, bar->info.size); + } +} + +FIXTURE(vfio_pci_irq_test) { + struct vfio_pci_device *device; +}; + +FIXTURE_VARIANT(vfio_pci_irq_test) { + int irq_index; +}; + +FIXTURE_VARIANT_ADD(vfio_pci_irq_test, msi) { + .irq_index = VFIO_PCI_MSI_IRQ_INDEX, +}; + +FIXTURE_VARIANT_ADD(vfio_pci_irq_test, msix) { + .irq_index = VFIO_PCI_MSIX_IRQ_INDEX, +}; + +FIXTURE_SETUP(vfio_pci_irq_test) +{ + self->device = vfio_pci_device_init(device_bdf, default_iommu_mode); +} + +FIXTURE_TEARDOWN(vfio_pci_irq_test) +{ + vfio_pci_device_cleanup(self->device); +} + +TEST_F(vfio_pci_irq_test, enable_trigger_disable) +{ + bool msix = variant->irq_index == VFIO_PCI_MSIX_IRQ_INDEX; + int msi_eventfd; + u32 count; + u64 value; + int i; + + if (msix) + count = self->device->msix_info.count; + else + count = self->device->msi_info.count; + + count = min(count, MAX_TEST_MSI); + + if (!count) + SKIP(return, "MSI%s: not supported\n", msix ? "-x" : ""); + + vfio_pci_irq_enable(self->device, variant->irq_index, 0, count); + printf("MSI%s: enabled %d interrupts\n", msix ? "-x" : "", count); + + for (i = 0; i < count; i++) { + msi_eventfd = self->device->msi_eventfds[i]; + + fcntl_set_nonblock(msi_eventfd); + ASSERT_EQ(-1, read(msi_eventfd, &value, 8)); + ASSERT_EQ(EAGAIN, errno); + + vfio_pci_irq_trigger(self->device, variant->irq_index, i); + + ASSERT_EQ(8, read(msi_eventfd, &value, 8)); + ASSERT_EQ(1, value); + } + + vfio_pci_irq_disable(self->device, variant->irq_index); +} + +TEST_F(vfio_pci_device_test, reset) +{ + if (!(self->device->info.flags & VFIO_DEVICE_FLAGS_RESET)) + SKIP(return, "Device does not support reset\n"); + + vfio_pci_device_reset(self->device); +} + +int main(int argc, char *argv[]) +{ + device_bdf = vfio_selftests_get_bdf(&argc, argv); + return test_harness_run(argc, argv); +} diff --git a/tools/testing/selftests/vfio/vfio_pci_driver_test.c b/tools/testing/selftests/vfio/vfio_pci_driver_test.c new file mode 100644 index 000000000000..2dbd70b7db62 --- /dev/null +++ b/tools/testing/selftests/vfio/vfio_pci_driver_test.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <sys/ioctl.h> +#include <sys/mman.h> + +#include <linux/sizes.h> +#include <linux/vfio.h> + +#include <vfio_util.h> + +#include "../kselftest_harness.h" + +static const char *device_bdf; + +#define ASSERT_NO_MSI(_eventfd) do { \ + u64 __value; \ + \ + ASSERT_EQ(-1, read(_eventfd, &__value, 8)); \ + ASSERT_EQ(EAGAIN, errno); \ +} while (0) + +static void region_setup(struct vfio_pci_device *device, + struct vfio_dma_region *region, u64 size) +{ + const int flags = MAP_SHARED | MAP_ANONYMOUS; + const int prot = PROT_READ | PROT_WRITE; + void *vaddr; + + vaddr = mmap(NULL, size, prot, flags, -1, 0); + VFIO_ASSERT_NE(vaddr, MAP_FAILED); + + region->vaddr = vaddr; + region->iova = (u64)vaddr; + region->size = size; + + vfio_pci_dma_map(device, region); +} + +static void region_teardown(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + vfio_pci_dma_unmap(device, region); + VFIO_ASSERT_EQ(munmap(region->vaddr, region->size), 0); +} + +FIXTURE(vfio_pci_driver_test) { + struct vfio_pci_device *device; + struct vfio_dma_region memcpy_region; + void *vaddr; + int msi_fd; + + u64 size; + void *src; + void *dst; + iova_t src_iova; + iova_t dst_iova; + iova_t unmapped_iova; +}; + +FIXTURE_VARIANT(vfio_pci_driver_test) { + const char *iommu_mode; +}; + +#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \ +FIXTURE_VARIANT_ADD(vfio_pci_driver_test, _iommu_mode) { \ + .iommu_mode = #_iommu_mode, \ +} + +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); + +FIXTURE_SETUP(vfio_pci_driver_test) +{ + struct vfio_pci_driver *driver; + + self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); + + driver = &self->device->driver; + + region_setup(self->device, &self->memcpy_region, SZ_1G); + region_setup(self->device, &driver->region, SZ_2M); + + /* Any IOVA that doesn't overlap memcpy_region and driver->region. */ + self->unmapped_iova = 8UL * SZ_1G; + + vfio_pci_driver_init(self->device); + self->msi_fd = self->device->msi_eventfds[driver->msi]; + + /* + * Use the maximum size supported by the device for memcpy operations, + * slimmed down to fit into the memcpy region (divided by 2 so src and + * dst regions do not overlap). + */ + self->size = self->device->driver.max_memcpy_size; + self->size = min(self->size, self->memcpy_region.size / 2); + + self->src = self->memcpy_region.vaddr; + self->dst = self->src + self->size; + + self->src_iova = to_iova(self->device, self->src); + self->dst_iova = to_iova(self->device, self->dst); +} + +FIXTURE_TEARDOWN(vfio_pci_driver_test) +{ + struct vfio_pci_driver *driver = &self->device->driver; + + vfio_pci_driver_remove(self->device); + + region_teardown(self->device, &self->memcpy_region); + region_teardown(self->device, &driver->region); + + vfio_pci_device_cleanup(self->device); +} + +TEST_F(vfio_pci_driver_test, init_remove) +{ + int i; + + for (i = 0; i < 10; i++) { + vfio_pci_driver_remove(self->device); + vfio_pci_driver_init(self->device); + } +} + +TEST_F(vfio_pci_driver_test, memcpy_success) +{ + fcntl_set_nonblock(self->msi_fd); + + memset(self->src, 'x', self->size); + memset(self->dst, 'y', self->size); + + ASSERT_EQ(0, vfio_pci_driver_memcpy(self->device, + self->src_iova, + self->dst_iova, + self->size)); + + ASSERT_EQ(0, memcmp(self->src, self->dst, self->size)); + ASSERT_NO_MSI(self->msi_fd); +} + +TEST_F(vfio_pci_driver_test, memcpy_from_unmapped_iova) +{ + fcntl_set_nonblock(self->msi_fd); + + /* + * Ignore the return value since not all devices will detect and report + * accesses to unmapped IOVAs as errors. + */ + vfio_pci_driver_memcpy(self->device, self->unmapped_iova, + self->dst_iova, self->size); + + ASSERT_NO_MSI(self->msi_fd); +} + +TEST_F(vfio_pci_driver_test, memcpy_to_unmapped_iova) +{ + fcntl_set_nonblock(self->msi_fd); + + /* + * Ignore the return value since not all devices will detect and report + * accesses to unmapped IOVAs as errors. + */ + vfio_pci_driver_memcpy(self->device, self->src_iova, + self->unmapped_iova, self->size); + + ASSERT_NO_MSI(self->msi_fd); +} + +TEST_F(vfio_pci_driver_test, send_msi) +{ + u64 value; + + vfio_pci_driver_send_msi(self->device); + ASSERT_EQ(8, read(self->msi_fd, &value, 8)); + ASSERT_EQ(1, value); +} + +TEST_F(vfio_pci_driver_test, mix_and_match) +{ + u64 value; + int i; + + for (i = 0; i < 10; i++) { + memset(self->src, 'x', self->size); + memset(self->dst, 'y', self->size); + + ASSERT_EQ(0, vfio_pci_driver_memcpy(self->device, + self->src_iova, + self->dst_iova, + self->size)); + + ASSERT_EQ(0, memcmp(self->src, self->dst, self->size)); + + vfio_pci_driver_memcpy(self->device, + self->unmapped_iova, + self->dst_iova, + self->size); + + vfio_pci_driver_send_msi(self->device); + ASSERT_EQ(8, read(self->msi_fd, &value, 8)); + ASSERT_EQ(1, value); + } +} + +TEST_F_TIMEOUT(vfio_pci_driver_test, memcpy_storm, 60) +{ + struct vfio_pci_driver *driver = &self->device->driver; + u64 total_size; + u64 count; + + fcntl_set_nonblock(self->msi_fd); + + /* + * Perform up to 250GiB worth of DMA reads and writes across several + * memcpy operations. Some devices can support even more but the test + * will take too long. + */ + total_size = 250UL * SZ_1G; + count = min(total_size / self->size, driver->max_memcpy_count); + + printf("Kicking off %lu memcpys of size 0x%lx\n", count, self->size); + vfio_pci_driver_memcpy_start(self->device, + self->src_iova, + self->dst_iova, + self->size, count); + + ASSERT_EQ(0, vfio_pci_driver_memcpy_wait(self->device)); + ASSERT_NO_MSI(self->msi_fd); +} + +int main(int argc, char *argv[]) +{ + struct vfio_pci_device *device; + + device_bdf = vfio_selftests_get_bdf(&argc, argv); + + device = vfio_pci_device_init(device_bdf, default_iommu_mode); + if (!device->driver.ops) { + fprintf(stderr, "No driver found for device %s\n", device_bdf); + return KSFT_SKIP; + } + vfio_pci_device_cleanup(device); + + return test_harness_run(argc, argv); +} |