summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/cxl/Kbuild3
-rw-r--r--tools/testing/cxl/test/Kbuild1
-rw-r--r--tools/testing/cxl/test/cxl.c86
-rw-r--r--tools/testing/cxl/test/cxl_translate.c445
-rw-r--r--tools/testing/cxl/test/mem.c11
-rw-r--r--tools/testing/cxl/test/mock.c52
-rw-r--r--tools/testing/cxl/test/mock.h4
-rw-r--r--tools/testing/selftests/iommu/iommufd.c103
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h56
-rw-r--r--tools/testing/selftests/tpm2/tpm2.py4
-rw-r--r--tools/testing/selftests/vfio/Makefile10
-rw-r--r--tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c36
-rw-r--r--tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c18
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio.h26
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/assert.h54
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/iommu.h76
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/iova_allocator.h23
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h125
-rw-r--r--tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_driver.h97
-rw-r--r--tools/testing/selftests/vfio/lib/include/vfio_util.h331
-rw-r--r--tools/testing/selftests/vfio/lib/iommu.c465
-rw-r--r--tools/testing/selftests/vfio/lib/iova_allocator.c94
-rw-r--r--tools/testing/selftests/vfio/lib/libvfio.c78
-rw-r--r--tools/testing/selftests/vfio/lib/libvfio.mk23
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_device.c556
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_driver.c16
-rwxr-xr-xtools/testing/selftests/vfio/run.sh109
-rwxr-xr-xtools/testing/selftests/vfio/scripts/cleanup.sh41
-rwxr-xr-xtools/testing/selftests/vfio/scripts/lib.sh42
-rwxr-xr-xtools/testing/selftests/vfio/scripts/run.sh16
-rwxr-xr-xtools/testing/selftests/vfio/scripts/setup.sh48
-rw-r--r--tools/testing/selftests/vfio/vfio_dma_mapping_test.c46
-rw-r--r--tools/testing/selftests/vfio/vfio_iommufd_setup_test.c2
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_device_init_perf_test.c168
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_device_test.c12
-rw-r--r--tools/testing/selftests/vfio/vfio_pci_driver_test.c51
36 files changed, 2156 insertions, 1172 deletions
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 0d5ce4b74b9f..0e151d0572d1 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -4,13 +4,12 @@ ldflags-y += --wrap=is_acpi_device_node
ldflags-y += --wrap=acpi_evaluate_integer
ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=nvdimm_bus_register
-ldflags-y += --wrap=devm_cxl_port_enumerate_dports
ldflags-y += --wrap=cxl_await_media_ready
ldflags-y += --wrap=devm_cxl_add_rch_dport
-ldflags-y += --wrap=cxl_rcd_component_reg_phys
ldflags-y += --wrap=cxl_endpoint_parse_cdat
ldflags-y += --wrap=cxl_dport_init_ras_reporting
ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
+ldflags-y += --wrap=hmat_get_extended_linear_cache_size
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild
index 6b1927897856..af50972c8b6d 100644
--- a/tools/testing/cxl/test/Kbuild
+++ b/tools/testing/cxl/test/Kbuild
@@ -4,6 +4,7 @@ ccflags-y := -I$(srctree)/drivers/cxl/ -I$(srctree)/drivers/cxl/core
obj-m += cxl_test.o
obj-m += cxl_mock.o
obj-m += cxl_mock_mem.o
+obj-m += cxl_translate.o
cxl_test-y := cxl.o
cxl_mock-y := mock.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 2d135ca533d0..81e2aef3627a 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -15,6 +15,7 @@
#include "mock.h"
static int interleave_arithmetic;
+static bool extended_linear_cache;
#define FAKE_QTG_ID 42
@@ -26,6 +27,9 @@ static int interleave_arithmetic;
#define NR_CXL_PORT_DECODERS 8
#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
+#define MOCK_AUTO_REGION_SIZE_DEFAULT SZ_512M
+static int mock_auto_region_size = MOCK_AUTO_REGION_SIZE_DEFAULT;
+
static struct platform_device *cxl_acpi;
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
@@ -426,6 +430,22 @@ static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
return res;
}
+/* Only update CFMWS0 as this is used by the auto region. */
+static void cfmws_elc_update(struct acpi_cedt_cfmws *window, int index)
+{
+ if (!extended_linear_cache)
+ return;
+
+ if (index != 0)
+ return;
+
+ /*
+ * The window size should be 2x of the CXL region size where half is
+ * DRAM and half is CXL
+ */
+ window->window_size = mock_auto_region_size * 2;
+}
+
static int populate_cedt(void)
{
struct cxl_mock_res *res;
@@ -450,6 +470,7 @@ static int populate_cedt(void)
for (i = cfmws_start; i <= cfmws_end; i++) {
struct acpi_cedt_cfmws *window = mock_cfmws[i];
+ cfmws_elc_update(window, i);
res = alloc_mock_res(window->window_size, SZ_256M);
if (!res)
return -ENOMEM;
@@ -591,6 +612,25 @@ mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
return AE_OK;
}
+static int
+mock_hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *cache_size)
+{
+ struct acpi_cedt_cfmws *window = mock_cfmws[0];
+ struct resource cfmws0_res =
+ DEFINE_RES_MEM(window->base_hpa, window->window_size);
+
+ if (!extended_linear_cache ||
+ !resource_contains(&cfmws0_res, backing_res)) {
+ return hmat_get_extended_linear_cache_size(backing_res,
+ nid, cache_size);
+ }
+
+ *cache_size = mock_auto_region_size;
+
+ return 0;
+}
+
static struct pci_bus mock_pci_bus[NR_BRIDGES];
static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
[0] = {
@@ -738,7 +778,6 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
struct cxl_endpoint_decoder *cxled;
struct cxl_switch_decoder *cxlsd;
struct cxl_port *port, *iter;
- const int size = SZ_512M;
struct cxl_memdev *cxlmd;
struct cxl_dport *dport;
struct device *dev;
@@ -781,9 +820,11 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
}
base = window->base_hpa;
+ if (extended_linear_cache)
+ base += mock_auto_region_size;
cxld->hpa_range = (struct range) {
.start = base,
- .end = base + size - 1,
+ .end = base + mock_auto_region_size - 1,
};
cxld->interleave_ways = 2;
@@ -792,7 +833,8 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->flags = CXL_DECODER_F_ENABLE;
cxled->state = CXL_DECODER_STATE_AUTO;
port->commit_end = cxld->id;
- devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
+ devm_cxl_dpa_reserve(cxled, 0,
+ mock_auto_region_size / cxld->interleave_ways, 0);
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
@@ -841,7 +883,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->interleave_granularity = 4096;
cxld->hpa_range = (struct range) {
.start = base,
- .end = base + size - 1,
+ .end = base + mock_auto_region_size - 1,
};
put_device(dev);
}
@@ -995,37 +1037,6 @@ static int get_port_array(struct cxl_port *port,
return 0;
}
-static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
-{
- struct platform_device **array;
- int i, array_size;
- int rc;
-
- rc = get_port_array(port, &array, &array_size);
- if (rc)
- return rc;
-
- for (i = 0; i < array_size; i++) {
- struct platform_device *pdev = array[i];
- struct cxl_dport *dport;
-
- if (pdev->dev.parent != port->uport_dev) {
- dev_dbg(&port->dev, "%s: mismatch parent %s\n",
- dev_name(port->uport_dev),
- dev_name(pdev->dev.parent));
- continue;
- }
-
- dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
- CXL_RESOURCE_NONE);
-
- if (IS_ERR(dport))
- return PTR_ERR(dport);
- }
-
- return 0;
-}
-
static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port,
struct device *dport_dev)
{
@@ -1114,9 +1125,10 @@ static struct cxl_mock_ops cxl_mock_ops = {
.acpi_pci_find_root = mock_acpi_pci_find_root,
.devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup,
.devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup,
- .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
.devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
+ .hmat_get_extended_linear_cache_size =
+ mock_hmat_get_extended_linear_cache_size,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
@@ -1606,6 +1618,8 @@ static __exit void cxl_test_exit(void)
module_param(interleave_arithmetic, int, 0444);
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
+module_param(extended_linear_cache, bool, 0444);
+MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
module_init(cxl_test_init);
module_exit(cxl_test_exit);
MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/cxl/test/cxl_translate.c b/tools/testing/cxl/test/cxl_translate.c
new file mode 100644
index 000000000000..2200ae21795c
--- /dev/null
+++ b/tools/testing/cxl/test/cxl_translate.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2025 Intel Corporation. All rights reserved.
+
+/* Preface all log entries with "cxl_translate" */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <cxlmem.h>
+#include <cxl.h>
+
+/* Maximum number of test vectors and entry length */
+#define MAX_TABLE_ENTRIES 128
+#define MAX_ENTRY_LEN 128
+
+/* Expected number of parameters in each test vector */
+#define EXPECTED_PARAMS 7
+
+/* Module parameters for test vectors */
+static char *table[MAX_TABLE_ENTRIES];
+static int table_num;
+
+/* Interleave Arithmetic */
+#define MODULO_MATH 0
+#define XOR_MATH 1
+
+/*
+ * XOR mapping configuration
+ * The test data sets all use the same set of xormaps. When additional
+ * data sets arrive for validation, this static setup will need to
+ * be changed to accept xormaps as additional parameters.
+ */
+struct cxl_cxims_data *cximsd;
+static u64 xormaps[] = {
+ 0x2020900,
+ 0x4041200,
+ 0x1010400,
+ 0x800,
+};
+
+static int nr_maps = ARRAY_SIZE(xormaps);
+
+#define HBIW_TO_NR_MAPS_SIZE (CXL_DECODER_MAX_INTERLEAVE + 1)
+static const int hbiw_to_nr_maps[HBIW_TO_NR_MAPS_SIZE] = {
+ [1] = 0, [2] = 1, [3] = 0, [4] = 2, [6] = 1, [8] = 3, [12] = 2, [16] = 4
+};
+
+/**
+ * to_hpa - calculate an HPA offset from a DPA offset and position
+ *
+ * dpa_offset: device physical address offset
+ * pos: devices position in interleave
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: host physical address offset
+ */
+static u64 to_hpa(u64 dpa_offset, int pos, u8 r_eiw, u16 r_eig, u8 hb_ways,
+ u8 math)
+{
+ u64 hpa_offset;
+
+ /* Calculate base HPA offset from DPA and position */
+ hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, r_eiw, r_eig);
+
+ if (math == XOR_MATH) {
+ cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
+ if (cximsd->nr_maps)
+ return cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+ }
+ return hpa_offset;
+}
+
+/**
+ * to_dpa - translate an HPA offset to DPA offset
+ *
+ * hpa_offset: host physical address offset
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: device physical address offset
+ */
+static u64 to_dpa(u64 hpa_offset, u8 r_eiw, u16 r_eig, u8 hb_ways, u8 math)
+{
+ u64 offset = hpa_offset;
+
+ if (math == XOR_MATH) {
+ cximsd->nr_maps = hbiw_to_nr_maps[hb_ways];
+ if (cximsd->nr_maps)
+ offset =
+ cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+ }
+ return cxl_calculate_dpa_offset(offset, r_eiw, r_eig);
+}
+
+/**
+ * to_pos - extract an interleave position from an HPA offset
+ *
+ * hpa_offset: host physical address offset
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ *
+ * Returns: devices position in region interleave
+ */
+static u64 to_pos(u64 hpa_offset, u8 r_eiw, u16 r_eig, u8 hb_ways, u8 math)
+{
+ u64 offset = hpa_offset;
+
+ /* Reverse XOR mapping if specified */
+ if (math == XOR_MATH)
+ offset = cxl_do_xormap_calc(cximsd, hpa_offset, hb_ways);
+
+ return cxl_calculate_position(offset, r_eiw, r_eig);
+}
+
+/**
+ * run_translation_test - execute forward and reverse translations
+ *
+ * @dpa: device physical address
+ * @pos: expected position in region interleave
+ * @r_eiw: region encoded interleave ways
+ * @r_eig: region encoded interleave granularity
+ * @hb_ways: host bridge interleave ways
+ * @math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ * @expect_spa: expected system physical address
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+static int run_translation_test(u64 dpa, int pos, u8 r_eiw, u16 r_eig,
+ u8 hb_ways, int math, u64 expect_hpa)
+{
+ u64 translated_spa, reverse_dpa;
+ int reverse_pos;
+
+ /* Test Device to Host translation: DPA + POS -> SPA */
+ translated_spa = to_hpa(dpa, pos, r_eiw, r_eig, hb_ways, math);
+ if (translated_spa != expect_hpa) {
+ pr_err("Device to host failed: expected HPA %llu, got %llu\n",
+ expect_hpa, translated_spa);
+ return -1;
+ }
+
+ /* Test Host to Device DPA translation: SPA -> DPA */
+ reverse_dpa = to_dpa(translated_spa, r_eiw, r_eig, hb_ways, math);
+ if (reverse_dpa != dpa) {
+ pr_err("Host to Device DPA failed: expected %llu, got %llu\n",
+ dpa, reverse_dpa);
+ return -1;
+ }
+
+ /* Test Host to Device Position translation: SPA -> POS */
+ reverse_pos = to_pos(translated_spa, r_eiw, r_eig, hb_ways, math);
+ if (reverse_pos != pos) {
+ pr_err("Position lookup failed: expected %d, got %d\n", pos,
+ reverse_pos);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * parse_test_vector - parse a single test vector string
+ *
+ * entry: test vector string to parse
+ * dpa: device physical address
+ * pos: expected position in region interleave
+ * r_eiw: region encoded interleave ways
+ * r_eig: region encoded interleave granularity
+ * hb_ways: host bridge interleave ways
+ * math: interleave arithmetic (MODULO_MATH or XOR_MATH)
+ * expect_spa: expected system physical address
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int parse_test_vector(const char *entry, u64 *dpa, int *pos, u8 *r_eiw,
+ u16 *r_eig, u8 *hb_ways, int *math,
+ u64 *expect_hpa)
+{
+ unsigned int tmp_r_eiw, tmp_r_eig, tmp_hb_ways;
+ int parsed;
+
+ parsed = sscanf(entry, "%llu %d %u %u %u %d %llu", dpa, pos, &tmp_r_eiw,
+ &tmp_r_eig, &tmp_hb_ways, math, expect_hpa);
+
+ if (parsed != EXPECTED_PARAMS) {
+ pr_err("Parse error: expected %d parameters, got %d in '%s'\n",
+ EXPECTED_PARAMS, parsed, entry);
+ return -EINVAL;
+ }
+ if (tmp_r_eiw > U8_MAX || tmp_r_eig > U16_MAX || tmp_hb_ways > U8_MAX) {
+ pr_err("Parameter overflow in entry: '%s'\n", entry);
+ return -ERANGE;
+ }
+ if (*math != MODULO_MATH && *math != XOR_MATH) {
+ pr_err("Invalid math type %d in entry: '%s'\n", *math, entry);
+ return -EINVAL;
+ }
+ *r_eiw = tmp_r_eiw;
+ *r_eig = tmp_r_eig;
+ *hb_ways = tmp_hb_ways;
+
+ return 0;
+}
+
+/*
+ * setup_xor_mapping - Initialize XOR mapping data structure
+ *
+ * The test data sets all use the same HBIG so we can use one set
+ * of xormaps, and set the number to apply based on HBIW before
+ * calling cxl_do_xormap_calc().
+ *
+ * When additional data sets arrive for validation with different
+ * HBIG's this static setup will need to be updated.
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int setup_xor_mapping(void)
+{
+ if (nr_maps <= 0)
+ return -EINVAL;
+
+ cximsd = kzalloc(struct_size(cximsd, xormaps, nr_maps), GFP_KERNEL);
+ if (!cximsd)
+ return -ENOMEM;
+
+ memcpy(cximsd->xormaps, xormaps, nr_maps * sizeof(*cximsd->xormaps));
+ cximsd->nr_maps = nr_maps;
+
+ return 0;
+}
+
+static int test_random_params(void)
+{
+ u8 valid_eiws[] = { 0, 1, 2, 3, 4, 8, 9, 10 };
+ u16 valid_eigs[] = { 0, 1, 2, 3, 4, 5, 6 };
+ int i, ways, pos, reverse_pos;
+ u64 dpa, hpa, reverse_dpa;
+ int iterations = 10000;
+ int failures = 0;
+
+ for (i = 0; i < iterations; i++) {
+ /* Generate valid random parameters for eiw, eig, pos, dpa */
+ u8 eiw = valid_eiws[get_random_u32() % ARRAY_SIZE(valid_eiws)];
+ u16 eig = valid_eigs[get_random_u32() % ARRAY_SIZE(valid_eigs)];
+
+ eiw_to_ways(eiw, &ways);
+ pos = get_random_u32() % ways;
+ dpa = get_random_u64() >> 12;
+
+ hpa = cxl_calculate_hpa_offset(dpa, pos, eiw, eig);
+ reverse_dpa = cxl_calculate_dpa_offset(hpa, eiw, eig);
+ reverse_pos = cxl_calculate_position(hpa, eiw, eig);
+
+ if (reverse_dpa != dpa || reverse_pos != pos) {
+ pr_err("test random iter %d FAIL hpa=%llu, dpa=%llu reverse_dpa=%llu, pos=%d reverse_pos=%d eiw=%u eig=%u\n",
+ i, hpa, dpa, reverse_dpa, pos, reverse_pos, eiw,
+ eig);
+
+ if (failures++ > 10) {
+ pr_err("test random too many failures, stop\n");
+ break;
+ }
+ }
+ }
+ pr_info("..... test random: PASS %d FAIL %d\n", i - failures, failures);
+
+ if (failures)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct param_test {
+ u8 eiw;
+ u16 eig;
+ int pos;
+ bool expect; /* true: expect pass, false: expect fail */
+ const char *desc;
+};
+
+static struct param_test param_tests[] = {
+ { 0x0, 0, 0, true, "1-way, min eig=0, pos=0" },
+ { 0x0, 3, 0, true, "1-way, mid eig=3, pos=0" },
+ { 0x0, 6, 0, true, "1-way, max eig=6, pos=0" },
+ { 0x1, 0, 0, true, "2-way, eig=0, pos=0" },
+ { 0x1, 3, 1, true, "2-way, eig=3, max pos=1" },
+ { 0x1, 6, 1, true, "2-way, eig=6, max pos=1" },
+ { 0x2, 0, 0, true, "4-way, eig=0, pos=0" },
+ { 0x2, 3, 3, true, "4-way, eig=3, max pos=3" },
+ { 0x2, 6, 3, true, "4-way, eig=6, max pos=3" },
+ { 0x3, 0, 0, true, "8-way, eig=0, pos=0" },
+ { 0x3, 3, 7, true, "8-way, eig=3, max pos=7" },
+ { 0x3, 6, 7, true, "8-way, eig=6, max pos=7" },
+ { 0x4, 0, 0, true, "16-way, eig=0, pos=0" },
+ { 0x4, 3, 15, true, "16-way, eig=3, max pos=15" },
+ { 0x4, 6, 15, true, "16-way, eig=6, max pos=15" },
+ { 0x8, 0, 0, true, "3-way, eig=0, pos=0" },
+ { 0x8, 3, 2, true, "3-way, eig=3, max pos=2" },
+ { 0x8, 6, 2, true, "3-way, eig=6, max pos=2" },
+ { 0x9, 0, 0, true, "6-way, eig=0, pos=0" },
+ { 0x9, 3, 5, true, "6-way, eig=3, max pos=5" },
+ { 0x9, 6, 5, true, "6-way, eig=6, max pos=5" },
+ { 0xA, 0, 0, true, "12-way, eig=0, pos=0" },
+ { 0xA, 3, 11, true, "12-way, eig=3, max pos=11" },
+ { 0xA, 6, 11, true, "12-way, eig=6, max pos=11" },
+ { 0x5, 0, 0, false, "invalid eiw=5" },
+ { 0x7, 0, 0, false, "invalid eiw=7" },
+ { 0xB, 0, 0, false, "invalid eiw=0xB" },
+ { 0xFF, 0, 0, false, "invalid eiw=0xFF" },
+ { 0x1, 7, 0, false, "invalid eig=7 (out of range)" },
+ { 0x2, 0x10, 0, false, "invalid eig=0x10" },
+ { 0x3, 0xFFFF, 0, false, "invalid eig=0xFFFF" },
+ { 0x1, 0, -1, false, "pos < 0" },
+ { 0x1, 0, 2, false, "2-way, pos=2 (>= ways)" },
+ { 0x2, 0, 4, false, "4-way, pos=4 (>= ways)" },
+ { 0x3, 0, 8, false, "8-way, pos=8 (>= ways)" },
+ { 0x4, 0, 16, false, "16-way, pos=16 (>= ways)" },
+ { 0x8, 0, 3, false, "3-way, pos=3 (>= ways)" },
+ { 0x9, 0, 6, false, "6-way, pos=6 (>= ways)" },
+ { 0xA, 0, 12, false, "12-way, pos=12 (>= ways)" },
+};
+
+static int test_cxl_validate_translation_params(void)
+{
+ int i, rc, failures = 0;
+ bool valid;
+
+ for (i = 0; i < ARRAY_SIZE(param_tests); i++) {
+ struct param_test *t = &param_tests[i];
+
+ rc = cxl_validate_translation_params(t->eiw, t->eig, t->pos);
+ valid = (rc == 0);
+
+ if (valid != t->expect) {
+ pr_err("test params failed: %s\n", t->desc);
+ failures++;
+ }
+ }
+ pr_info("..... test params: PASS %d FAIL %d\n", i - failures, failures);
+
+ if (failures)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * cxl_translate_init
+ *
+ * Run the internal validation tests when no params are passed.
+ * Otherwise, parse the parameters (test vectors), and kick off
+ * the translation test.
+ *
+ * Returns: 0 on success, negative error code on failure
+ */
+static int __init cxl_translate_init(void)
+{
+ int rc, i;
+
+ /* If no tables are passed, validate module params only */
+ if (table_num == 0) {
+ pr_info("Internal validation test start...\n");
+ rc = test_cxl_validate_translation_params();
+ if (rc)
+ return rc;
+
+ rc = test_random_params();
+ if (rc)
+ return rc;
+
+ pr_info("Internal validation test completed successfully\n");
+
+ return 0;
+ }
+
+ pr_info("CXL translate test module loaded with %d test vectors\n",
+ table_num);
+
+ rc = setup_xor_mapping();
+ if (rc)
+ return rc;
+
+ /* Process each test vector */
+ for (i = 0; i < table_num; i++) {
+ u64 dpa, expect_spa;
+ int pos, math;
+ u8 r_eiw, hb_ways;
+ u16 r_eig;
+
+ pr_debug("Processing test vector %d: '%s'\n", i, table[i]);
+
+ /* Parse the test vector */
+ rc = parse_test_vector(table[i], &dpa, &pos, &r_eiw, &r_eig,
+ &hb_ways, &math, &expect_spa);
+ if (rc) {
+ pr_err("CXL Translate Test %d: FAIL\n"
+ " Failed to parse test vector '%s'\n",
+ i, table[i]);
+ continue;
+ }
+ /* Run the translation test */
+ rc = run_translation_test(dpa, pos, r_eiw, r_eig, hb_ways, math,
+ expect_spa);
+ if (rc) {
+ pr_err("CXL Translate Test %d: FAIL\n"
+ " dpa=%llu pos=%d r_eiw=%u r_eig=%u hb_ways=%u math=%s expect_spa=%llu\n",
+ i, dpa, pos, r_eiw, r_eig, hb_ways,
+ (math == XOR_MATH) ? "XOR" : "MODULO",
+ expect_spa);
+ } else {
+ pr_info("CXL Translate Test %d: PASS\n", i);
+ }
+ }
+
+ kfree(cximsd);
+ pr_info("CXL translate test completed\n");
+
+ return 0;
+}
+
+static void __exit cxl_translate_exit(void)
+{
+ pr_info("CXL translate test module unloaded\n");
+}
+
+module_param_array(table, charp, &table_num, 0444);
+MODULE_PARM_DESC(table, "Test vectors as space-separated decimal strings");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("cxl_test: cxl address translation test module");
+MODULE_IMPORT_NS("CXL");
+
+module_init(cxl_translate_init);
+module_exit(cxl_translate_exit);
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index d533481672b7..176dcde570cd 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -250,22 +250,21 @@ static void mes_add_event(struct mock_event_store *mes,
* Vary the number of events returned to simulate events occuring while the
* logs are being read.
*/
-static int ret_limit = 0;
+static atomic_t event_counter = ATOMIC_INIT(0);
static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_get_event_payload *pl;
struct mock_event_log *log;
- u16 nr_overflow;
+ int ret_limit;
u8 log_type;
int i;
if (cmd->size_in != sizeof(log_type))
return -EINVAL;
- ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
- if (!ret_limit)
- ret_limit = 1;
+ /* Vary return limit from 1 to CXL_TEST_EVENT_RET_MAX */
+ ret_limit = (atomic_inc_return(&event_counter) % CXL_TEST_EVENT_RET_MAX) + 1;
if (cmd->size_out < struct_size(pl, records, ret_limit))
return -EINVAL;
@@ -299,7 +298,7 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
u64 ns;
pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
- pl->overflow_err_count = cpu_to_le16(nr_overflow);
+ pl->overflow_err_count = cpu_to_le16(log->nr_overflow);
ns = ktime_get_real_ns();
ns -= 5000000000; /* 5s ago */
pl->first_overflow_timestamp = cpu_to_le64(ns);
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 995269a75cbd..44bce80ef3ff 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -111,6 +111,26 @@ acpi_status __wrap_acpi_evaluate_integer(acpi_handle handle,
}
EXPORT_SYMBOL(__wrap_acpi_evaluate_integer);
+int __wrap_hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid,
+ resource_size_t *cache_size)
+{
+ int index, rc;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->hmat_get_extended_linear_cache_size(backing_res, nid,
+ cache_size);
+ else
+ rc = hmat_get_extended_linear_cache_size(backing_res, nid,
+ cache_size);
+
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_hmat_get_extended_linear_cache_size);
+
struct acpi_pci_root *__wrap_acpi_pci_find_root(acpi_handle handle)
{
int index;
@@ -172,21 +192,6 @@ int __wrap_devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_endpoint_decoders_setup, "CXL");
-int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port)
-{
- int rc, index;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(port->uport_dev))
- rc = ops->devm_cxl_port_enumerate_dports(port);
- else
- rc = devm_cxl_port_enumerate_dports(port);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_port_enumerate_dports, "CXL");
-
int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
{
int rc, index;
@@ -226,23 +231,6 @@ struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_rch_dport, "CXL");
-resource_size_t __wrap_cxl_rcd_component_reg_phys(struct device *dev,
- struct cxl_dport *dport)
-{
- int index;
- resource_size_t component_reg_phys;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(dev))
- component_reg_phys = CXL_RESOURCE_NONE;
- else
- component_reg_phys = cxl_rcd_component_reg_phys(dev, dport);
- put_cxl_mock_ops(index);
-
- return component_reg_phys;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcd_component_reg_phys, "CXL");
-
void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
{
int index;
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index 4ed932e76aae..2684b89c8aa2 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -19,12 +19,14 @@ struct cxl_mock_ops {
bool (*is_mock_bus)(struct pci_bus *bus);
bool (*is_mock_port)(struct device *dev);
bool (*is_mock_dev)(struct device *dev);
- int (*devm_cxl_port_enumerate_dports)(struct cxl_port *port);
int (*devm_cxl_switch_port_decoders_setup)(struct cxl_port *port);
int (*devm_cxl_endpoint_decoders_setup)(struct cxl_port *port);
void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
struct cxl_dport *(*devm_cxl_add_dport_by_dev)(struct cxl_port *port,
struct device *dport_dev);
+ int (*hmat_get_extended_linear_cache_size)(struct resource *backing_res,
+ int nid,
+ resource_size_t *cache_size);
};
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index bb4d33dde3c8..10e051b6f592 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -13,9 +13,6 @@
static unsigned long HUGEPAGE_SIZE;
-#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
-#define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
-
static unsigned long get_huge_page_size(void)
{
char buf[80];
@@ -1574,6 +1571,49 @@ TEST_F(iommufd_ioas, copy_sweep)
test_ioctl_destroy(dst_ioas_id);
}
+TEST_F(iommufd_ioas, dmabuf_simple)
+{
+ size_t buf_size = PAGE_SIZE*4;
+ __u64 iova;
+ int dfd;
+
+ test_cmd_get_dmabuf(buf_size, &dfd);
+ test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, 0, &iova);
+ test_err_ioctl_ioas_map_file(EINVAL, dfd, buf_size, buf_size, &iova);
+ test_err_ioctl_ioas_map_file(EINVAL, dfd, 0, buf_size + 1, &iova);
+ test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
+
+ close(dfd);
+}
+
+TEST_F(iommufd_ioas, dmabuf_revoke)
+{
+ size_t buf_size = PAGE_SIZE*4;
+ __u32 hwpt_id;
+ __u64 iova;
+ __u64 iova2;
+ int dfd;
+
+ test_cmd_get_dmabuf(buf_size, &dfd);
+ test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova);
+ test_cmd_revoke_dmabuf(dfd, true);
+
+ if (variant->mock_domains)
+ test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
+ &hwpt_id);
+
+ test_err_ioctl_ioas_map_file(ENODEV, dfd, 0, buf_size, &iova2);
+
+ test_cmd_revoke_dmabuf(dfd, false);
+ test_ioctl_ioas_map_file(dfd, 0, buf_size, &iova2);
+
+ /* Restore the iova back */
+ test_ioctl_ioas_unmap(iova, buf_size);
+ test_ioctl_ioas_map_fixed_file(dfd, 0, buf_size, iova);
+
+ close(dfd);
+}
+
FIXTURE(iommufd_mock_domain)
{
int fd;
@@ -2058,6 +2098,12 @@ FIXTURE_VARIANT(iommufd_dirty_tracking)
FIXTURE_SETUP(iommufd_dirty_tracking)
{
+ struct iommu_option cmd = {
+ .size = sizeof(cmd),
+ .option_id = IOMMU_OPTION_HUGE_PAGES,
+ .op = IOMMU_OPTION_OP_SET,
+ .val64 = 0,
+ };
size_t mmap_buffer_size;
unsigned long size;
int mmap_flags;
@@ -2066,7 +2112,7 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
if (variant->buffer_size < MOCK_PAGE_SIZE) {
SKIP(return,
- "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
+ "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%u",
variant->buffer_size, MOCK_PAGE_SIZE);
}
@@ -2114,16 +2160,18 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
test_ioctl_ioas_alloc(&self->ioas_id);
- /* Enable 1M mock IOMMU hugepages */
- if (variant->hugepages) {
- test_cmd_mock_domain_flags(self->ioas_id,
- MOCK_FLAGS_DEVICE_HUGE_IOVA,
- &self->stdev_id, &self->hwpt_id,
- &self->idev_id);
- } else {
- test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
- &self->hwpt_id, &self->idev_id);
- }
+
+ /*
+ * For dirty testing it is important that the page size fed into
+ * the iommu page tables matches the size the dirty logic
+ * expects, or set_dirty can touch too much stuff.
+ */
+ cmd.object_id = self->ioas_id;
+ if (!variant->hugepages)
+ ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
+
+ test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
+ &self->idev_id);
}
FIXTURE_TEARDOWN(iommufd_dirty_tracking)
@@ -2248,18 +2296,23 @@ TEST_F(iommufd_dirty_tracking, device_dirty_capability)
TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
{
uint32_t page_size = MOCK_PAGE_SIZE;
+ uint32_t ioas_id = self->ioas_id;
uint32_t hwpt_id;
- uint32_t ioas_id;
if (variant->hugepages)
page_size = MOCK_HUGE_PAGE_SIZE;
- test_ioctl_ioas_alloc(&ioas_id);
test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
variant->buffer_size, MOCK_APERTURE_START);
- test_cmd_hwpt_alloc(self->idev_id, ioas_id,
- IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
+ if (variant->hugepages)
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_HUGE, &hwpt_id);
+ else
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_DEFAULT, &hwpt_id);
test_cmd_set_dirty_tracking(hwpt_id, true);
@@ -2285,18 +2338,24 @@ TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
{
uint32_t page_size = MOCK_PAGE_SIZE;
+ uint32_t ioas_id = self->ioas_id;
uint32_t hwpt_id;
- uint32_t ioas_id;
if (variant->hugepages)
page_size = MOCK_HUGE_PAGE_SIZE;
- test_ioctl_ioas_alloc(&ioas_id);
test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
variant->buffer_size, MOCK_APERTURE_START);
- test_cmd_hwpt_alloc(self->idev_id, ioas_id,
- IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
+
+ if (variant->hugepages)
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_HUGE, &hwpt_id);
+ else
+ test_cmd_hwpt_alloc_iommupt(self->idev_id, ioas_id,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING,
+ MOCK_IOMMUPT_DEFAULT, &hwpt_id);
test_cmd_set_dirty_tracking(hwpt_id, true);
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index 9f472c20c190..0a0ff6f7926d 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -215,6 +215,18 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_i
ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
0))
+#define test_cmd_hwpt_alloc_iommupt(device_id, pt_id, flags, iommupt_type, \
+ hwpt_id) \
+ ({ \
+ struct iommu_hwpt_selftest user_cfg = { \
+ .pagetable_type = iommupt_type \
+ }; \
+ \
+ ASSERT_EQ(0, _test_cmd_hwpt_alloc( \
+ self->fd, device_id, pt_id, 0, flags, \
+ hwpt_id, IOMMU_HWPT_DATA_SELFTEST, \
+ &user_cfg, sizeof(user_cfg))); \
+ })
#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
self->fd, device_id, pt_id, 0, flags, \
@@ -548,6 +560,39 @@ static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
self->fd, access_id, access_pages_id))
+static int _test_cmd_get_dmabuf(int fd, size_t len, int *out_fd)
+{
+ struct iommu_test_cmd cmd = {
+ .size = sizeof(cmd),
+ .op = IOMMU_TEST_OP_DMABUF_GET,
+ .dmabuf_get = { .length = len, .open_flags = O_CLOEXEC },
+ };
+
+ *out_fd = ioctl(fd, IOMMU_TEST_CMD, &cmd);
+ if (*out_fd < 0)
+ return -1;
+ return 0;
+}
+#define test_cmd_get_dmabuf(len, out_fd) \
+ ASSERT_EQ(0, _test_cmd_get_dmabuf(self->fd, len, out_fd))
+
+static int _test_cmd_revoke_dmabuf(int fd, int dmabuf_fd, bool revoked)
+{
+ struct iommu_test_cmd cmd = {
+ .size = sizeof(cmd),
+ .op = IOMMU_TEST_OP_DMABUF_REVOKE,
+ .dmabuf_revoke = { .dmabuf_fd = dmabuf_fd, .revoked = revoked },
+ };
+ int ret;
+
+ ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+#define test_cmd_revoke_dmabuf(dmabuf_fd, revoke) \
+ ASSERT_EQ(0, _test_cmd_revoke_dmabuf(self->fd, dmabuf_fd, revoke))
+
static int _test_ioctl_destroy(int fd, unsigned int id)
{
struct iommu_destroy cmd = {
@@ -718,6 +763,17 @@ static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
self->fd, ioas_id, mfd, start, length, iova_p, \
IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
+#define test_ioctl_ioas_map_fixed_file(mfd, start, length, iova) \
+ ({ \
+ __u64 __iova = iova; \
+ ASSERT_EQ(0, _test_ioctl_ioas_map_file( \
+ self->fd, self->ioas_id, mfd, start, \
+ length, &__iova, \
+ IOMMU_IOAS_MAP_FIXED_IOVA | \
+ IOMMU_IOAS_MAP_WRITEABLE | \
+ IOMMU_IOAS_MAP_READABLE)); \
+ })
+
static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
{
struct iommu_test_cmd memlimit_cmd = {
diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py
index bba8cb54548e..3d130c30bc7c 100644
--- a/tools/testing/selftests/tpm2/tpm2.py
+++ b/tools/testing/selftests/tpm2/tpm2.py
@@ -437,7 +437,7 @@ class Client:
def extend_pcr(self, i, dig, bank_alg = TPM2_ALG_SHA1):
ds = get_digest_size(bank_alg)
- assert(ds == len(dig))
+ assert ds == len(dig)
auth_cmd = AuthCommand()
@@ -589,7 +589,7 @@ class Client:
def seal(self, parent_key, data, auth_value, policy_dig,
name_alg = TPM2_ALG_SHA1):
ds = get_digest_size(name_alg)
- assert(not policy_dig or ds == len(policy_dig))
+ assert not policy_dig or ds == len(policy_dig)
attributes = 0
if not policy_dig:
diff --git a/tools/testing/selftests/vfio/Makefile b/tools/testing/selftests/vfio/Makefile
index 324ba0175a33..3c796ca99a50 100644
--- a/tools/testing/selftests/vfio/Makefile
+++ b/tools/testing/selftests/vfio/Makefile
@@ -2,8 +2,14 @@ CFLAGS = $(KHDR_INCLUDES)
TEST_GEN_PROGS += vfio_dma_mapping_test
TEST_GEN_PROGS += vfio_iommufd_setup_test
TEST_GEN_PROGS += vfio_pci_device_test
+TEST_GEN_PROGS += vfio_pci_device_init_perf_test
TEST_GEN_PROGS += vfio_pci_driver_test
-TEST_PROGS_EXTENDED := run.sh
+
+TEST_FILES += scripts/cleanup.sh
+TEST_FILES += scripts/lib.sh
+TEST_FILES += scripts/run.sh
+TEST_FILES += scripts/setup.sh
+
include ../lib.mk
include lib/libvfio.mk
@@ -11,6 +17,8 @@ CFLAGS += -I$(top_srcdir)/tools/include
CFLAGS += -MD
CFLAGS += $(EXTRA_CFLAGS)
+LDFLAGS += -pthread
+
$(TEST_GEN_PROGS): %: %.o $(LIBVFIO_O)
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $< $(LIBVFIO_O) $(LDLIBS) -o $@
diff --git a/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c b/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
index 0ca2cbc2a316..c75045bcab79 100644
--- a/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
+++ b/tools/testing/selftests/vfio/lib/drivers/dsa/dsa.c
@@ -9,7 +9,7 @@
#include <linux/pci_ids.h>
#include <linux/sizes.h>
-#include <vfio_util.h>
+#include <libvfio.h>
#include "registers.h"
@@ -70,7 +70,7 @@ static int dsa_probe(struct vfio_pci_device *device)
return -EINVAL;
if (dsa_int_handle_request_required(device)) {
- printf("Device requires requesting interrupt handles\n");
+ dev_err(device, "Device requires requesting interrupt handles\n");
return -EINVAL;
}
@@ -91,23 +91,23 @@ static void dsa_check_sw_err(struct vfio_pci_device *device)
return;
}
- fprintf(stderr, "SWERR: 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
+ dev_err(device, "SWERR: 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
err.bits[0], err.bits[1], err.bits[2], err.bits[3]);
- fprintf(stderr, " valid: 0x%x\n", err.valid);
- fprintf(stderr, " overflow: 0x%x\n", err.overflow);
- fprintf(stderr, " desc_valid: 0x%x\n", err.desc_valid);
- fprintf(stderr, " wq_idx_valid: 0x%x\n", err.wq_idx_valid);
- fprintf(stderr, " batch: 0x%x\n", err.batch);
- fprintf(stderr, " fault_rw: 0x%x\n", err.fault_rw);
- fprintf(stderr, " priv: 0x%x\n", err.priv);
- fprintf(stderr, " error: 0x%x\n", err.error);
- fprintf(stderr, " wq_idx: 0x%x\n", err.wq_idx);
- fprintf(stderr, " operation: 0x%x\n", err.operation);
- fprintf(stderr, " pasid: 0x%x\n", err.pasid);
- fprintf(stderr, " batch_idx: 0x%x\n", err.batch_idx);
- fprintf(stderr, " invalid_flags: 0x%x\n", err.invalid_flags);
- fprintf(stderr, " fault_addr: 0x%lx\n", err.fault_addr);
+ dev_err(device, " valid: 0x%x\n", err.valid);
+ dev_err(device, " overflow: 0x%x\n", err.overflow);
+ dev_err(device, " desc_valid: 0x%x\n", err.desc_valid);
+ dev_err(device, " wq_idx_valid: 0x%x\n", err.wq_idx_valid);
+ dev_err(device, " batch: 0x%x\n", err.batch);
+ dev_err(device, " fault_rw: 0x%x\n", err.fault_rw);
+ dev_err(device, " priv: 0x%x\n", err.priv);
+ dev_err(device, " error: 0x%x\n", err.error);
+ dev_err(device, " wq_idx: 0x%x\n", err.wq_idx);
+ dev_err(device, " operation: 0x%x\n", err.operation);
+ dev_err(device, " pasid: 0x%x\n", err.pasid);
+ dev_err(device, " batch_idx: 0x%x\n", err.batch_idx);
+ dev_err(device, " invalid_flags: 0x%x\n", err.invalid_flags);
+ dev_err(device, " fault_addr: 0x%lx\n", err.fault_addr);
VFIO_FAIL("Software Error Detected!\n");
}
@@ -256,7 +256,7 @@ static int dsa_completion_wait(struct vfio_pci_device *device,
if (status == DSA_COMP_SUCCESS)
return 0;
- printf("Error detected during memcpy operation: 0x%x\n", status);
+ dev_err(device, "Error detected during memcpy operation: 0x%x\n", status);
return -1;
}
diff --git a/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c b/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
index c3b91d9b1f59..a871b935542b 100644
--- a/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
+++ b/tools/testing/selftests/vfio/lib/drivers/ioat/ioat.c
@@ -7,7 +7,7 @@
#include <linux/pci_ids.h>
#include <linux/sizes.h>
-#include <vfio_util.h>
+#include <libvfio.h>
#include "hw.h"
#include "registers.h"
@@ -51,7 +51,7 @@ static int ioat_probe(struct vfio_pci_device *device)
r = 0;
break;
default:
- printf("ioat: Unsupported version: 0x%x\n", version);
+ dev_err(device, "ioat: Unsupported version: 0x%x\n", version);
r = -EINVAL;
}
return r;
@@ -135,13 +135,13 @@ static void ioat_handle_error(struct vfio_pci_device *device)
{
void *registers = ioat_channel_registers(device);
- printf("Error detected during memcpy operation!\n"
- " CHANERR: 0x%x\n"
- " CHANERR_INT: 0x%x\n"
- " DMAUNCERRSTS: 0x%x\n",
- readl(registers + IOAT_CHANERR_OFFSET),
- vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET),
- vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET));
+ dev_err(device, "Error detected during memcpy operation!\n"
+ " CHANERR: 0x%x\n"
+ " CHANERR_INT: 0x%x\n"
+ " DMAUNCERRSTS: 0x%x\n",
+ readl(registers + IOAT_CHANERR_OFFSET),
+ vfio_pci_config_readl(device, IOAT_PCI_CHANERR_INT_OFFSET),
+ vfio_pci_config_readl(device, IOAT_PCI_DMAUNCERRSTS_OFFSET));
ioat_reset(device);
}
diff --git a/tools/testing/selftests/vfio/lib/include/libvfio.h b/tools/testing/selftests/vfio/lib/include/libvfio.h
new file mode 100644
index 000000000000..279ddcd70194
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/include/libvfio.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_H
+#define SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_H
+
+#include <libvfio/assert.h>
+#include <libvfio/iommu.h>
+#include <libvfio/iova_allocator.h>
+#include <libvfio/vfio_pci_device.h>
+#include <libvfio/vfio_pci_driver.h>
+
+/*
+ * Return the BDF string of the device that the test should use.
+ *
+ * If a BDF string is provided by the user on the command line (as the last
+ * element of argv[]), then this function will return that and decrement argc
+ * by 1.
+ *
+ * Otherwise this function will attempt to use the environment variable
+ * $VFIO_SELFTESTS_BDF.
+ *
+ * If BDF cannot be determined then the test will exit with KSFT_SKIP.
+ */
+const char *vfio_selftests_get_bdf(int *argc, char *argv[]);
+char **vfio_selftests_get_bdfs(int *argc, char *argv[], int *nr_bdfs);
+
+#endif /* SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_H */
diff --git a/tools/testing/selftests/vfio/lib/include/libvfio/assert.h b/tools/testing/selftests/vfio/lib/include/libvfio/assert.h
new file mode 100644
index 000000000000..f4ebd122d9b6
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/include/libvfio/assert.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_ASSERT_H
+#define SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_ASSERT_H
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "../../../../kselftest.h"
+
+#define VFIO_LOG_AND_EXIT(...) do { \
+ fprintf(stderr, " " __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(KSFT_FAIL); \
+} while (0)
+
+#define VFIO_ASSERT_OP(_lhs, _rhs, _op, ...) do { \
+ typeof(_lhs) __lhs = (_lhs); \
+ typeof(_rhs) __rhs = (_rhs); \
+ \
+ if (__lhs _op __rhs) \
+ break; \
+ \
+ fprintf(stderr, "%s:%u: Assertion Failure\n\n", __FILE__, __LINE__); \
+ fprintf(stderr, " Expression: " #_lhs " " #_op " " #_rhs "\n"); \
+ fprintf(stderr, " Observed: %#lx %s %#lx\n", \
+ (u64)__lhs, #_op, (u64)__rhs); \
+ fprintf(stderr, " [errno: %d - %s]\n", errno, strerror(errno)); \
+ VFIO_LOG_AND_EXIT(__VA_ARGS__); \
+} while (0)
+
+#define VFIO_ASSERT_EQ(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, ==, ##__VA_ARGS__)
+#define VFIO_ASSERT_NE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, !=, ##__VA_ARGS__)
+#define VFIO_ASSERT_LT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <, ##__VA_ARGS__)
+#define VFIO_ASSERT_LE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <=, ##__VA_ARGS__)
+#define VFIO_ASSERT_GT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >, ##__VA_ARGS__)
+#define VFIO_ASSERT_GE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >=, ##__VA_ARGS__)
+#define VFIO_ASSERT_TRUE(_a, ...) VFIO_ASSERT_NE(false, (_a), ##__VA_ARGS__)
+#define VFIO_ASSERT_FALSE(_a, ...) VFIO_ASSERT_EQ(false, (_a), ##__VA_ARGS__)
+#define VFIO_ASSERT_NULL(_a, ...) VFIO_ASSERT_EQ(NULL, _a, ##__VA_ARGS__)
+#define VFIO_ASSERT_NOT_NULL(_a, ...) VFIO_ASSERT_NE(NULL, _a, ##__VA_ARGS__)
+
+#define VFIO_FAIL(_fmt, ...) do { \
+ fprintf(stderr, "%s:%u: FAIL\n\n", __FILE__, __LINE__); \
+ VFIO_LOG_AND_EXIT(_fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define ioctl_assert(_fd, _op, _arg) do { \
+ void *__arg = (_arg); \
+ int __ret = ioctl((_fd), (_op), (__arg)); \
+ VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
+} while (0)
+
+#endif /* SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_ASSERT_H */
diff --git a/tools/testing/selftests/vfio/lib/include/libvfio/iommu.h b/tools/testing/selftests/vfio/lib/include/libvfio/iommu.h
new file mode 100644
index 000000000000..5c9b9dc6d993
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/include/libvfio/iommu.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_IOMMU_H
+#define SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_IOMMU_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include <libvfio/assert.h>
+
+typedef u64 iova_t;
+
+struct iommu_mode {
+ const char *name;
+ const char *container_path;
+ unsigned long iommu_type;
+};
+
+extern const char *default_iommu_mode;
+
+struct dma_region {
+ struct list_head link;
+ void *vaddr;
+ iova_t iova;
+ u64 size;
+};
+
+struct iommu {
+ const struct iommu_mode *mode;
+ int container_fd;
+ int iommufd;
+ u32 ioas_id;
+ struct list_head dma_regions;
+};
+
+struct iommu *iommu_init(const char *iommu_mode);
+void iommu_cleanup(struct iommu *iommu);
+
+int __iommu_map(struct iommu *iommu, struct dma_region *region);
+
+static inline void iommu_map(struct iommu *iommu, struct dma_region *region)
+{
+ VFIO_ASSERT_EQ(__iommu_map(iommu, region), 0);
+}
+
+int __iommu_unmap(struct iommu *iommu, struct dma_region *region, u64 *unmapped);
+
+static inline void iommu_unmap(struct iommu *iommu, struct dma_region *region)
+{
+ VFIO_ASSERT_EQ(__iommu_unmap(iommu, region, NULL), 0);
+}
+
+int __iommu_unmap_all(struct iommu *iommu, u64 *unmapped);
+
+static inline void iommu_unmap_all(struct iommu *iommu)
+{
+ VFIO_ASSERT_EQ(__iommu_unmap_all(iommu, NULL), 0);
+}
+
+int __iommu_hva2iova(struct iommu *iommu, void *vaddr, iova_t *iova);
+iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr);
+
+struct iommu_iova_range *iommu_iova_ranges(struct iommu *iommu, u32 *nranges);
+
+/*
+ * Generator for VFIO selftests fixture variants that replicate across all
+ * possible IOMMU modes. Tests must define FIXTURE_VARIANT_ADD_IOMMU_MODE()
+ * which should then use FIXTURE_VARIANT_ADD() to create the variant.
+ */
+#define FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(...) \
+FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1_iommu, ##__VA_ARGS__); \
+FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1v2_iommu, ##__VA_ARGS__); \
+FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1, ##__VA_ARGS__); \
+FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1v2, ##__VA_ARGS__); \
+FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd, ##__VA_ARGS__)
+
+#endif /* SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_IOMMU_H */
diff --git a/tools/testing/selftests/vfio/lib/include/libvfio/iova_allocator.h b/tools/testing/selftests/vfio/lib/include/libvfio/iova_allocator.h
new file mode 100644
index 000000000000..8f1d994e9ea2
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/include/libvfio/iova_allocator.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_IOVA_ALLOCATOR_H
+#define SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_IOVA_ALLOCATOR_H
+
+#include <uapi/linux/types.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/iommufd.h>
+
+#include <libvfio/iommu.h>
+
+struct iova_allocator {
+ struct iommu_iova_range *ranges;
+ u32 nranges;
+ u32 range_idx;
+ u64 range_offset;
+};
+
+struct iova_allocator *iova_allocator_init(struct iommu *iommu);
+void iova_allocator_cleanup(struct iova_allocator *allocator);
+iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size);
+
+#endif /* SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_IOVA_ALLOCATOR_H */
diff --git a/tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h b/tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h
new file mode 100644
index 000000000000..2858885a89bb
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_device.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_VFIO_PCI_DEVICE_H
+#define SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_VFIO_PCI_DEVICE_H
+
+#include <fcntl.h>
+#include <linux/vfio.h>
+#include <linux/pci_regs.h>
+
+#include <libvfio/assert.h>
+#include <libvfio/iommu.h>
+#include <libvfio/vfio_pci_driver.h>
+
+struct vfio_pci_bar {
+ struct vfio_region_info info;
+ void *vaddr;
+};
+
+struct vfio_pci_device {
+ const char *bdf;
+ int fd;
+ int group_fd;
+
+ struct iommu *iommu;
+
+ struct vfio_device_info info;
+ struct vfio_region_info config_space;
+ struct vfio_pci_bar bars[PCI_STD_NUM_BARS];
+
+ struct vfio_irq_info msi_info;
+ struct vfio_irq_info msix_info;
+
+ /* eventfds for MSI and MSI-x interrupts */
+ int msi_eventfds[PCI_MSIX_FLAGS_QSIZE + 1];
+
+ struct vfio_pci_driver driver;
+};
+
+#define dev_info(_dev, _fmt, ...) printf("%s: " _fmt, (_dev)->bdf, ##__VA_ARGS__)
+#define dev_err(_dev, _fmt, ...) fprintf(stderr, "%s: " _fmt, (_dev)->bdf, ##__VA_ARGS__)
+
+struct vfio_pci_device *vfio_pci_device_init(const char *bdf, struct iommu *iommu);
+void vfio_pci_device_cleanup(struct vfio_pci_device *device);
+
+void vfio_pci_device_reset(struct vfio_pci_device *device);
+
+void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
+ size_t config, size_t size, void *data);
+
+#define vfio_pci_config_read(_device, _offset, _type) ({ \
+ _type __data; \
+ vfio_pci_config_access((_device), false, _offset, sizeof(__data), &__data); \
+ __data; \
+})
+
+#define vfio_pci_config_readb(_d, _o) vfio_pci_config_read(_d, _o, u8)
+#define vfio_pci_config_readw(_d, _o) vfio_pci_config_read(_d, _o, u16)
+#define vfio_pci_config_readl(_d, _o) vfio_pci_config_read(_d, _o, u32)
+
+#define vfio_pci_config_write(_device, _offset, _value, _type) do { \
+ _type __data = (_value); \
+ vfio_pci_config_access((_device), true, _offset, sizeof(_type), &__data); \
+} while (0)
+
+#define vfio_pci_config_writeb(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u8)
+#define vfio_pci_config_writew(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u16)
+#define vfio_pci_config_writel(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u32)
+
+void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index,
+ u32 vector, int count);
+void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index);
+void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector);
+
+static inline void fcntl_set_nonblock(int fd)
+{
+ int r;
+
+ r = fcntl(fd, F_GETFL, 0);
+ VFIO_ASSERT_NE(r, -1, "F_GETFL failed for fd %d\n", fd);
+
+ r = fcntl(fd, F_SETFL, r | O_NONBLOCK);
+ VFIO_ASSERT_NE(r, -1, "F_SETFL O_NONBLOCK failed for fd %d\n", fd);
+}
+
+static inline void vfio_pci_msi_enable(struct vfio_pci_device *device,
+ u32 vector, int count)
+{
+ vfio_pci_irq_enable(device, VFIO_PCI_MSI_IRQ_INDEX, vector, count);
+}
+
+static inline void vfio_pci_msi_disable(struct vfio_pci_device *device)
+{
+ vfio_pci_irq_disable(device, VFIO_PCI_MSI_IRQ_INDEX);
+}
+
+static inline void vfio_pci_msix_enable(struct vfio_pci_device *device,
+ u32 vector, int count)
+{
+ vfio_pci_irq_enable(device, VFIO_PCI_MSIX_IRQ_INDEX, vector, count);
+}
+
+static inline void vfio_pci_msix_disable(struct vfio_pci_device *device)
+{
+ vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX);
+}
+
+static inline int __to_iova(struct vfio_pci_device *device, void *vaddr, iova_t *iova)
+{
+ return __iommu_hva2iova(device->iommu, vaddr, iova);
+}
+
+static inline iova_t to_iova(struct vfio_pci_device *device, void *vaddr)
+{
+ return iommu_hva2iova(device->iommu, vaddr);
+}
+
+static inline bool vfio_pci_device_match(struct vfio_pci_device *device,
+ u16 vendor_id, u16 device_id)
+{
+ return (vendor_id == vfio_pci_config_readw(device, PCI_VENDOR_ID)) &&
+ (device_id == vfio_pci_config_readw(device, PCI_DEVICE_ID));
+}
+
+const char *vfio_pci_get_cdev_path(const char *bdf);
+
+#endif /* SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_VFIO_PCI_DEVICE_H */
diff --git a/tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_driver.h b/tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_driver.h
new file mode 100644
index 000000000000..e5ada209b1d1
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/include/libvfio/vfio_pci_driver.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_VFIO_PCI_DRIVER_H
+#define SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_VFIO_PCI_DRIVER_H
+
+#include <libvfio/iommu.h>
+
+struct vfio_pci_device;
+
+struct vfio_pci_driver_ops {
+ const char *name;
+
+ /**
+ * @probe() - Check if the driver supports the given device.
+ *
+ * Return: 0 on success, non-0 on failure.
+ */
+ int (*probe)(struct vfio_pci_device *device);
+
+ /**
+ * @init() - Initialize the driver for @device.
+ *
+ * Must be called after device->driver.region has been initialized.
+ */
+ void (*init)(struct vfio_pci_device *device);
+
+ /**
+ * remove() - Deinitialize the driver for @device.
+ */
+ void (*remove)(struct vfio_pci_device *device);
+
+ /**
+ * memcpy_start() - Kick off @count repeated memcpy operations from
+ * [@src, @src + @size) to [@dst, @dst + @size).
+ *
+ * Guarantees:
+ * - The device will attempt DMA reads on [src, src + size).
+ * - The device will attempt DMA writes on [dst, dst + size).
+ * - The device will not generate any interrupts.
+ *
+ * memcpy_start() returns immediately, it does not wait for the
+ * copies to complete.
+ */
+ void (*memcpy_start)(struct vfio_pci_device *device,
+ iova_t src, iova_t dst, u64 size, u64 count);
+
+ /**
+ * memcpy_wait() - Wait until the memcpy operations started by
+ * memcpy_start() have finished.
+ *
+ * Guarantees:
+ * - All in-flight DMAs initiated by memcpy_start() are fully complete
+ * before memcpy_wait() returns.
+ *
+ * Returns non-0 if the driver detects that an error occurred during the
+ * memcpy, 0 otherwise.
+ */
+ int (*memcpy_wait)(struct vfio_pci_device *device);
+
+ /**
+ * send_msi() - Make the device send the MSI device->driver.msi.
+ *
+ * Guarantees:
+ * - The device will send the MSI once.
+ */
+ void (*send_msi)(struct vfio_pci_device *device);
+};
+
+struct vfio_pci_driver {
+ const struct vfio_pci_driver_ops *ops;
+ bool initialized;
+ bool memcpy_in_progress;
+
+ /* Region to be used by the driver (e.g. for in-memory descriptors) */
+ struct dma_region region;
+
+ /* The maximum size that can be passed to memcpy_start(). */
+ u64 max_memcpy_size;
+
+ /* The maximum count that can be passed to memcpy_start(). */
+ u64 max_memcpy_count;
+
+ /* The MSI vector the device will signal in ops->send_msi(). */
+ int msi;
+};
+
+void vfio_pci_driver_probe(struct vfio_pci_device *device);
+void vfio_pci_driver_init(struct vfio_pci_device *device);
+void vfio_pci_driver_remove(struct vfio_pci_device *device);
+int vfio_pci_driver_memcpy(struct vfio_pci_device *device,
+ iova_t src, iova_t dst, u64 size);
+void vfio_pci_driver_memcpy_start(struct vfio_pci_device *device,
+ iova_t src, iova_t dst, u64 size,
+ u64 count);
+int vfio_pci_driver_memcpy_wait(struct vfio_pci_device *device);
+void vfio_pci_driver_send_msi(struct vfio_pci_device *device);
+
+#endif /* SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_VFIO_PCI_DRIVER_H */
diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h
deleted file mode 100644
index 69ec0c856481..000000000000
--- a/tools/testing/selftests/vfio/lib/include/vfio_util.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H
-#define SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H
-
-#include <fcntl.h>
-#include <string.h>
-
-#include <uapi/linux/types.h>
-#include <linux/iommufd.h>
-#include <linux/list.h>
-#include <linux/pci_regs.h>
-#include <linux/vfio.h>
-
-#include "../../../kselftest.h"
-
-#define VFIO_LOG_AND_EXIT(...) do { \
- fprintf(stderr, " " __VA_ARGS__); \
- fprintf(stderr, "\n"); \
- exit(KSFT_FAIL); \
-} while (0)
-
-#define VFIO_ASSERT_OP(_lhs, _rhs, _op, ...) do { \
- typeof(_lhs) __lhs = (_lhs); \
- typeof(_rhs) __rhs = (_rhs); \
- \
- if (__lhs _op __rhs) \
- break; \
- \
- fprintf(stderr, "%s:%u: Assertion Failure\n\n", __FILE__, __LINE__); \
- fprintf(stderr, " Expression: " #_lhs " " #_op " " #_rhs "\n"); \
- fprintf(stderr, " Observed: %#lx %s %#lx\n", \
- (u64)__lhs, #_op, (u64)__rhs); \
- fprintf(stderr, " [errno: %d - %s]\n", errno, strerror(errno)); \
- VFIO_LOG_AND_EXIT(__VA_ARGS__); \
-} while (0)
-
-#define VFIO_ASSERT_EQ(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, ==, ##__VA_ARGS__)
-#define VFIO_ASSERT_NE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, !=, ##__VA_ARGS__)
-#define VFIO_ASSERT_LT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <, ##__VA_ARGS__)
-#define VFIO_ASSERT_LE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <=, ##__VA_ARGS__)
-#define VFIO_ASSERT_GT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >, ##__VA_ARGS__)
-#define VFIO_ASSERT_GE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >=, ##__VA_ARGS__)
-#define VFIO_ASSERT_TRUE(_a, ...) VFIO_ASSERT_NE(false, (_a), ##__VA_ARGS__)
-#define VFIO_ASSERT_FALSE(_a, ...) VFIO_ASSERT_EQ(false, (_a), ##__VA_ARGS__)
-#define VFIO_ASSERT_NULL(_a, ...) VFIO_ASSERT_EQ(NULL, _a, ##__VA_ARGS__)
-#define VFIO_ASSERT_NOT_NULL(_a, ...) VFIO_ASSERT_NE(NULL, _a, ##__VA_ARGS__)
-
-#define VFIO_FAIL(_fmt, ...) do { \
- fprintf(stderr, "%s:%u: FAIL\n\n", __FILE__, __LINE__); \
- VFIO_LOG_AND_EXIT(_fmt, ##__VA_ARGS__); \
-} while (0)
-
-struct vfio_iommu_mode {
- const char *name;
- const char *container_path;
- unsigned long iommu_type;
-};
-
-/*
- * Generator for VFIO selftests fixture variants that replicate across all
- * possible IOMMU modes. Tests must define FIXTURE_VARIANT_ADD_IOMMU_MODE()
- * which should then use FIXTURE_VARIANT_ADD() to create the variant.
- */
-#define FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(...) \
-FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1_iommu, ##__VA_ARGS__); \
-FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1v2_iommu, ##__VA_ARGS__); \
-FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1, ##__VA_ARGS__); \
-FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1v2, ##__VA_ARGS__); \
-FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd, ##__VA_ARGS__)
-
-struct vfio_pci_bar {
- struct vfio_region_info info;
- void *vaddr;
-};
-
-typedef u64 iova_t;
-
-#define INVALID_IOVA UINT64_MAX
-
-struct vfio_dma_region {
- struct list_head link;
- void *vaddr;
- iova_t iova;
- u64 size;
-};
-
-struct vfio_pci_device;
-
-struct vfio_pci_driver_ops {
- const char *name;
-
- /**
- * @probe() - Check if the driver supports the given device.
- *
- * Return: 0 on success, non-0 on failure.
- */
- int (*probe)(struct vfio_pci_device *device);
-
- /**
- * @init() - Initialize the driver for @device.
- *
- * Must be called after device->driver.region has been initialized.
- */
- void (*init)(struct vfio_pci_device *device);
-
- /**
- * remove() - Deinitialize the driver for @device.
- */
- void (*remove)(struct vfio_pci_device *device);
-
- /**
- * memcpy_start() - Kick off @count repeated memcpy operations from
- * [@src, @src + @size) to [@dst, @dst + @size).
- *
- * Guarantees:
- * - The device will attempt DMA reads on [src, src + size).
- * - The device will attempt DMA writes on [dst, dst + size).
- * - The device will not generate any interrupts.
- *
- * memcpy_start() returns immediately, it does not wait for the
- * copies to complete.
- */
- void (*memcpy_start)(struct vfio_pci_device *device,
- iova_t src, iova_t dst, u64 size, u64 count);
-
- /**
- * memcpy_wait() - Wait until the memcpy operations started by
- * memcpy_start() have finished.
- *
- * Guarantees:
- * - All in-flight DMAs initiated by memcpy_start() are fully complete
- * before memcpy_wait() returns.
- *
- * Returns non-0 if the driver detects that an error occurred during the
- * memcpy, 0 otherwise.
- */
- int (*memcpy_wait)(struct vfio_pci_device *device);
-
- /**
- * send_msi() - Make the device send the MSI device->driver.msi.
- *
- * Guarantees:
- * - The device will send the MSI once.
- */
- void (*send_msi)(struct vfio_pci_device *device);
-};
-
-struct vfio_pci_driver {
- const struct vfio_pci_driver_ops *ops;
- bool initialized;
- bool memcpy_in_progress;
-
- /* Region to be used by the driver (e.g. for in-memory descriptors) */
- struct vfio_dma_region region;
-
- /* The maximum size that can be passed to memcpy_start(). */
- u64 max_memcpy_size;
-
- /* The maximum count that can be passed to memcpy_start(). */
- u64 max_memcpy_count;
-
- /* The MSI vector the device will signal in ops->send_msi(). */
- int msi;
-};
-
-struct vfio_pci_device {
- int fd;
-
- const struct vfio_iommu_mode *iommu_mode;
- int group_fd;
- int container_fd;
-
- int iommufd;
- u32 ioas_id;
-
- struct vfio_device_info info;
- struct vfio_region_info config_space;
- struct vfio_pci_bar bars[PCI_STD_NUM_BARS];
-
- struct vfio_irq_info msi_info;
- struct vfio_irq_info msix_info;
-
- struct list_head dma_regions;
-
- /* eventfds for MSI and MSI-x interrupts */
- int msi_eventfds[PCI_MSIX_FLAGS_QSIZE + 1];
-
- struct vfio_pci_driver driver;
-};
-
-struct iova_allocator {
- struct iommu_iova_range *ranges;
- u32 nranges;
- u32 range_idx;
- u64 range_offset;
-};
-
-/*
- * Return the BDF string of the device that the test should use.
- *
- * If a BDF string is provided by the user on the command line (as the last
- * element of argv[]), then this function will return that and decrement argc
- * by 1.
- *
- * Otherwise this function will attempt to use the environment variable
- * $VFIO_SELFTESTS_BDF.
- *
- * If BDF cannot be determined then the test will exit with KSFT_SKIP.
- */
-const char *vfio_selftests_get_bdf(int *argc, char *argv[]);
-const char *vfio_pci_get_cdev_path(const char *bdf);
-
-extern const char *default_iommu_mode;
-
-struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode);
-void vfio_pci_device_cleanup(struct vfio_pci_device *device);
-void vfio_pci_device_reset(struct vfio_pci_device *device);
-
-struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
- u32 *nranges);
-
-struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device);
-void iova_allocator_cleanup(struct iova_allocator *allocator);
-iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size);
-
-int __vfio_pci_dma_map(struct vfio_pci_device *device,
- struct vfio_dma_region *region);
-int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
- struct vfio_dma_region *region,
- u64 *unmapped);
-int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped);
-
-static inline void vfio_pci_dma_map(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
-{
- VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0);
-}
-
-static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
-{
- VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0);
-}
-
-static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device)
-{
- VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0);
-}
-
-void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
- size_t config, size_t size, void *data);
-
-#define vfio_pci_config_read(_device, _offset, _type) ({ \
- _type __data; \
- vfio_pci_config_access((_device), false, _offset, sizeof(__data), &__data); \
- __data; \
-})
-
-#define vfio_pci_config_readb(_d, _o) vfio_pci_config_read(_d, _o, u8)
-#define vfio_pci_config_readw(_d, _o) vfio_pci_config_read(_d, _o, u16)
-#define vfio_pci_config_readl(_d, _o) vfio_pci_config_read(_d, _o, u32)
-
-#define vfio_pci_config_write(_device, _offset, _value, _type) do { \
- _type __data = (_value); \
- vfio_pci_config_access((_device), true, _offset, sizeof(_type), &__data); \
-} while (0)
-
-#define vfio_pci_config_writeb(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u8)
-#define vfio_pci_config_writew(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u16)
-#define vfio_pci_config_writel(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u32)
-
-void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index,
- u32 vector, int count);
-void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index);
-void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector);
-
-static inline void fcntl_set_nonblock(int fd)
-{
- int r;
-
- r = fcntl(fd, F_GETFL, 0);
- VFIO_ASSERT_NE(r, -1, "F_GETFL failed for fd %d\n", fd);
-
- r = fcntl(fd, F_SETFL, r | O_NONBLOCK);
- VFIO_ASSERT_NE(r, -1, "F_SETFL O_NONBLOCK failed for fd %d\n", fd);
-}
-
-static inline void vfio_pci_msi_enable(struct vfio_pci_device *device,
- u32 vector, int count)
-{
- vfio_pci_irq_enable(device, VFIO_PCI_MSI_IRQ_INDEX, vector, count);
-}
-
-static inline void vfio_pci_msi_disable(struct vfio_pci_device *device)
-{
- vfio_pci_irq_disable(device, VFIO_PCI_MSI_IRQ_INDEX);
-}
-
-static inline void vfio_pci_msix_enable(struct vfio_pci_device *device,
- u32 vector, int count)
-{
- vfio_pci_irq_enable(device, VFIO_PCI_MSIX_IRQ_INDEX, vector, count);
-}
-
-static inline void vfio_pci_msix_disable(struct vfio_pci_device *device)
-{
- vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX);
-}
-
-iova_t __to_iova(struct vfio_pci_device *device, void *vaddr);
-iova_t to_iova(struct vfio_pci_device *device, void *vaddr);
-
-static inline bool vfio_pci_device_match(struct vfio_pci_device *device,
- u16 vendor_id, u16 device_id)
-{
- return (vendor_id == vfio_pci_config_readw(device, PCI_VENDOR_ID)) &&
- (device_id == vfio_pci_config_readw(device, PCI_DEVICE_ID));
-}
-
-void vfio_pci_driver_probe(struct vfio_pci_device *device);
-void vfio_pci_driver_init(struct vfio_pci_device *device);
-void vfio_pci_driver_remove(struct vfio_pci_device *device);
-int vfio_pci_driver_memcpy(struct vfio_pci_device *device,
- iova_t src, iova_t dst, u64 size);
-void vfio_pci_driver_memcpy_start(struct vfio_pci_device *device,
- iova_t src, iova_t dst, u64 size,
- u64 count);
-int vfio_pci_driver_memcpy_wait(struct vfio_pci_device *device);
-void vfio_pci_driver_send_msi(struct vfio_pci_device *device);
-
-#endif /* SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H */
diff --git a/tools/testing/selftests/vfio/lib/iommu.c b/tools/testing/selftests/vfio/lib/iommu.c
new file mode 100644
index 000000000000..8079d43523f3
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/iommu.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <dirent.h>
+#include <fcntl.h>
+#include <libgen.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <uapi/linux/types.h>
+#include <linux/limits.h>
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/vfio.h>
+#include <linux/iommufd.h>
+
+#include "../../../kselftest.h"
+#include <libvfio.h>
+
+const char *default_iommu_mode = "iommufd";
+
+/* Reminder: Keep in sync with FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(). */
+static const struct iommu_mode iommu_modes[] = {
+ {
+ .name = "vfio_type1_iommu",
+ .container_path = "/dev/vfio/vfio",
+ .iommu_type = VFIO_TYPE1_IOMMU,
+ },
+ {
+ .name = "vfio_type1v2_iommu",
+ .container_path = "/dev/vfio/vfio",
+ .iommu_type = VFIO_TYPE1v2_IOMMU,
+ },
+ {
+ .name = "iommufd_compat_type1",
+ .container_path = "/dev/iommu",
+ .iommu_type = VFIO_TYPE1_IOMMU,
+ },
+ {
+ .name = "iommufd_compat_type1v2",
+ .container_path = "/dev/iommu",
+ .iommu_type = VFIO_TYPE1v2_IOMMU,
+ },
+ {
+ .name = "iommufd",
+ },
+};
+
+static const struct iommu_mode *lookup_iommu_mode(const char *iommu_mode)
+{
+ int i;
+
+ if (!iommu_mode)
+ iommu_mode = default_iommu_mode;
+
+ for (i = 0; i < ARRAY_SIZE(iommu_modes); i++) {
+ if (strcmp(iommu_mode, iommu_modes[i].name))
+ continue;
+
+ return &iommu_modes[i];
+ }
+
+ VFIO_FAIL("Unrecognized IOMMU mode: %s\n", iommu_mode);
+}
+
+int __iommu_hva2iova(struct iommu *iommu, void *vaddr, iova_t *iova)
+{
+ struct dma_region *region;
+
+ list_for_each_entry(region, &iommu->dma_regions, link) {
+ if (vaddr < region->vaddr)
+ continue;
+
+ if (vaddr >= region->vaddr + region->size)
+ continue;
+
+ if (iova)
+ *iova = region->iova + (vaddr - region->vaddr);
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr)
+{
+ iova_t iova;
+ int ret;
+
+ ret = __iommu_hva2iova(iommu, vaddr, &iova);
+ VFIO_ASSERT_EQ(ret, 0, "%p is not mapped into the iommu\n", vaddr);
+
+ return iova;
+}
+
+static int vfio_iommu_map(struct iommu *iommu, struct dma_region *region)
+{
+ struct vfio_iommu_type1_dma_map args = {
+ .argsz = sizeof(args),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ .vaddr = (u64)region->vaddr,
+ .iova = region->iova,
+ .size = region->size,
+ };
+
+ if (ioctl(iommu->container_fd, VFIO_IOMMU_MAP_DMA, &args))
+ return -errno;
+
+ return 0;
+}
+
+static int iommufd_map(struct iommu *iommu, struct dma_region *region)
+{
+ struct iommu_ioas_map args = {
+ .size = sizeof(args),
+ .flags = IOMMU_IOAS_MAP_READABLE |
+ IOMMU_IOAS_MAP_WRITEABLE |
+ IOMMU_IOAS_MAP_FIXED_IOVA,
+ .user_va = (u64)region->vaddr,
+ .iova = region->iova,
+ .length = region->size,
+ .ioas_id = iommu->ioas_id,
+ };
+
+ if (ioctl(iommu->iommufd, IOMMU_IOAS_MAP, &args))
+ return -errno;
+
+ return 0;
+}
+
+int __iommu_map(struct iommu *iommu, struct dma_region *region)
+{
+ int ret;
+
+ if (iommu->iommufd)
+ ret = iommufd_map(iommu, region);
+ else
+ ret = vfio_iommu_map(iommu, region);
+
+ if (ret)
+ return ret;
+
+ list_add(&region->link, &iommu->dma_regions);
+
+ return 0;
+}
+
+static int __vfio_iommu_unmap(int fd, u64 iova, u64 size, u32 flags, u64 *unmapped)
+{
+ struct vfio_iommu_type1_dma_unmap args = {
+ .argsz = sizeof(args),
+ .iova = iova,
+ .size = size,
+ .flags = flags,
+ };
+
+ if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args))
+ return -errno;
+
+ if (unmapped)
+ *unmapped = args.size;
+
+ return 0;
+}
+
+static int vfio_iommu_unmap(struct iommu *iommu, struct dma_region *region,
+ u64 *unmapped)
+{
+ return __vfio_iommu_unmap(iommu->container_fd, region->iova,
+ region->size, 0, unmapped);
+}
+
+static int __iommufd_unmap(int fd, u64 iova, u64 length, u32 ioas_id, u64 *unmapped)
+{
+ struct iommu_ioas_unmap args = {
+ .size = sizeof(args),
+ .iova = iova,
+ .length = length,
+ .ioas_id = ioas_id,
+ };
+
+ if (ioctl(fd, IOMMU_IOAS_UNMAP, &args))
+ return -errno;
+
+ if (unmapped)
+ *unmapped = args.length;
+
+ return 0;
+}
+
+static int iommufd_unmap(struct iommu *iommu, struct dma_region *region,
+ u64 *unmapped)
+{
+ return __iommufd_unmap(iommu->iommufd, region->iova, region->size,
+ iommu->ioas_id, unmapped);
+}
+
+int __iommu_unmap(struct iommu *iommu, struct dma_region *region, u64 *unmapped)
+{
+ int ret;
+
+ if (iommu->iommufd)
+ ret = iommufd_unmap(iommu, region, unmapped);
+ else
+ ret = vfio_iommu_unmap(iommu, region, unmapped);
+
+ if (ret)
+ return ret;
+
+ list_del_init(&region->link);
+
+ return 0;
+}
+
+int __iommu_unmap_all(struct iommu *iommu, u64 *unmapped)
+{
+ int ret;
+ struct dma_region *curr, *next;
+
+ if (iommu->iommufd)
+ ret = __iommufd_unmap(iommu->iommufd, 0, UINT64_MAX,
+ iommu->ioas_id, unmapped);
+ else
+ ret = __vfio_iommu_unmap(iommu->container_fd, 0, 0,
+ VFIO_DMA_UNMAP_FLAG_ALL, unmapped);
+
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(curr, next, &iommu->dma_regions, link)
+ list_del_init(&curr->link);
+
+ return 0;
+}
+
+static struct vfio_info_cap_header *next_cap_hdr(void *buf, u32 bufsz,
+ u32 *cap_offset)
+{
+ struct vfio_info_cap_header *hdr;
+
+ if (!*cap_offset)
+ return NULL;
+
+ VFIO_ASSERT_LT(*cap_offset, bufsz);
+ VFIO_ASSERT_GE(bufsz - *cap_offset, sizeof(*hdr));
+
+ hdr = (struct vfio_info_cap_header *)((u8 *)buf + *cap_offset);
+ *cap_offset = hdr->next;
+
+ return hdr;
+}
+
+static struct vfio_info_cap_header *vfio_iommu_info_cap_hdr(struct vfio_iommu_type1_info *info,
+ u16 cap_id)
+{
+ struct vfio_info_cap_header *hdr;
+ u32 cap_offset = info->cap_offset;
+ u32 max_depth;
+ u32 depth = 0;
+
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS))
+ return NULL;
+
+ if (cap_offset)
+ VFIO_ASSERT_GE(cap_offset, sizeof(*info));
+
+ max_depth = (info->argsz - sizeof(*info)) / sizeof(*hdr);
+
+ while ((hdr = next_cap_hdr(info, info->argsz, &cap_offset))) {
+ depth++;
+ VFIO_ASSERT_LE(depth, max_depth, "Capability chain contains a cycle\n");
+
+ if (hdr->id == cap_id)
+ return hdr;
+ }
+
+ return NULL;
+}
+
+/* Return buffer including capability chain, if present. Free with free() */
+static struct vfio_iommu_type1_info *vfio_iommu_get_info(int container_fd)
+{
+ struct vfio_iommu_type1_info *info;
+
+ info = malloc(sizeof(*info));
+ VFIO_ASSERT_NOT_NULL(info);
+
+ *info = (struct vfio_iommu_type1_info) {
+ .argsz = sizeof(*info),
+ };
+
+ ioctl_assert(container_fd, VFIO_IOMMU_GET_INFO, info);
+ VFIO_ASSERT_GE(info->argsz, sizeof(*info));
+
+ info = realloc(info, info->argsz);
+ VFIO_ASSERT_NOT_NULL(info);
+
+ ioctl_assert(container_fd, VFIO_IOMMU_GET_INFO, info);
+ VFIO_ASSERT_GE(info->argsz, sizeof(*info));
+
+ return info;
+}
+
+/*
+ * Return iova ranges for the device's container. Normalize vfio_iommu_type1 to
+ * report iommufd's iommu_iova_range. Free with free().
+ */
+static struct iommu_iova_range *vfio_iommu_iova_ranges(struct iommu *iommu,
+ u32 *nranges)
+{
+ struct vfio_iommu_type1_info_cap_iova_range *cap_range;
+ struct vfio_iommu_type1_info *info;
+ struct vfio_info_cap_header *hdr;
+ struct iommu_iova_range *ranges = NULL;
+
+ info = vfio_iommu_get_info(iommu->container_fd);
+ hdr = vfio_iommu_info_cap_hdr(info, VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
+ VFIO_ASSERT_NOT_NULL(hdr);
+
+ cap_range = container_of(hdr, struct vfio_iommu_type1_info_cap_iova_range, header);
+ VFIO_ASSERT_GT(cap_range->nr_iovas, 0);
+
+ ranges = calloc(cap_range->nr_iovas, sizeof(*ranges));
+ VFIO_ASSERT_NOT_NULL(ranges);
+
+ for (u32 i = 0; i < cap_range->nr_iovas; i++) {
+ ranges[i] = (struct iommu_iova_range){
+ .start = cap_range->iova_ranges[i].start,
+ .last = cap_range->iova_ranges[i].end,
+ };
+ }
+
+ *nranges = cap_range->nr_iovas;
+
+ free(info);
+ return ranges;
+}
+
+/* Return iova ranges of the device's IOAS. Free with free() */
+static struct iommu_iova_range *iommufd_iova_ranges(struct iommu *iommu,
+ u32 *nranges)
+{
+ struct iommu_iova_range *ranges;
+ int ret;
+
+ struct iommu_ioas_iova_ranges query = {
+ .size = sizeof(query),
+ .ioas_id = iommu->ioas_id,
+ };
+
+ ret = ioctl(iommu->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
+ VFIO_ASSERT_EQ(ret, -1);
+ VFIO_ASSERT_EQ(errno, EMSGSIZE);
+ VFIO_ASSERT_GT(query.num_iovas, 0);
+
+ ranges = calloc(query.num_iovas, sizeof(*ranges));
+ VFIO_ASSERT_NOT_NULL(ranges);
+
+ query.allowed_iovas = (uintptr_t)ranges;
+
+ ioctl_assert(iommu->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
+ *nranges = query.num_iovas;
+
+ return ranges;
+}
+
+static int iova_range_comp(const void *a, const void *b)
+{
+ const struct iommu_iova_range *ra = a, *rb = b;
+
+ if (ra->start < rb->start)
+ return -1;
+
+ if (ra->start > rb->start)
+ return 1;
+
+ return 0;
+}
+
+/* Return sorted IOVA ranges of the device. Free with free(). */
+struct iommu_iova_range *iommu_iova_ranges(struct iommu *iommu, u32 *nranges)
+{
+ struct iommu_iova_range *ranges;
+
+ if (iommu->iommufd)
+ ranges = iommufd_iova_ranges(iommu, nranges);
+ else
+ ranges = vfio_iommu_iova_ranges(iommu, nranges);
+
+ if (!ranges)
+ return NULL;
+
+ VFIO_ASSERT_GT(*nranges, 0);
+
+ /* Sort and check that ranges are sane and non-overlapping */
+ qsort(ranges, *nranges, sizeof(*ranges), iova_range_comp);
+ VFIO_ASSERT_LT(ranges[0].start, ranges[0].last);
+
+ for (u32 i = 1; i < *nranges; i++) {
+ VFIO_ASSERT_LT(ranges[i].start, ranges[i].last);
+ VFIO_ASSERT_LT(ranges[i - 1].last, ranges[i].start);
+ }
+
+ return ranges;
+}
+
+static u32 iommufd_ioas_alloc(int iommufd)
+{
+ struct iommu_ioas_alloc args = {
+ .size = sizeof(args),
+ };
+
+ ioctl_assert(iommufd, IOMMU_IOAS_ALLOC, &args);
+ return args.out_ioas_id;
+}
+
+struct iommu *iommu_init(const char *iommu_mode)
+{
+ const char *container_path;
+ struct iommu *iommu;
+ int version;
+
+ iommu = calloc(1, sizeof(*iommu));
+ VFIO_ASSERT_NOT_NULL(iommu);
+
+ INIT_LIST_HEAD(&iommu->dma_regions);
+
+ iommu->mode = lookup_iommu_mode(iommu_mode);
+
+ container_path = iommu->mode->container_path;
+ if (container_path) {
+ iommu->container_fd = open(container_path, O_RDWR);
+ VFIO_ASSERT_GE(iommu->container_fd, 0, "open(%s) failed\n", container_path);
+
+ version = ioctl(iommu->container_fd, VFIO_GET_API_VERSION);
+ VFIO_ASSERT_EQ(version, VFIO_API_VERSION, "Unsupported version: %d\n", version);
+ } else {
+ /*
+ * Require device->iommufd to be >0 so that a simple non-0 check can be
+ * used to check if iommufd is enabled. In practice open() will never
+ * return 0 unless stdin is closed.
+ */
+ iommu->iommufd = open("/dev/iommu", O_RDWR);
+ VFIO_ASSERT_GT(iommu->iommufd, 0);
+
+ iommu->ioas_id = iommufd_ioas_alloc(iommu->iommufd);
+ }
+
+ return iommu;
+}
+
+void iommu_cleanup(struct iommu *iommu)
+{
+ if (iommu->iommufd)
+ VFIO_ASSERT_EQ(close(iommu->iommufd), 0);
+ else
+ VFIO_ASSERT_EQ(close(iommu->container_fd), 0);
+
+ free(iommu);
+}
diff --git a/tools/testing/selftests/vfio/lib/iova_allocator.c b/tools/testing/selftests/vfio/lib/iova_allocator.c
new file mode 100644
index 000000000000..a12b0a51e9e6
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/iova_allocator.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <dirent.h>
+#include <fcntl.h>
+#include <libgen.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <uapi/linux/types.h>
+#include <linux/iommufd.h>
+#include <linux/limits.h>
+#include <linux/mman.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+#include <linux/vfio.h>
+
+#include <libvfio.h>
+
+struct iova_allocator *iova_allocator_init(struct iommu *iommu)
+{
+ struct iova_allocator *allocator;
+ struct iommu_iova_range *ranges;
+ u32 nranges;
+
+ ranges = iommu_iova_ranges(iommu, &nranges);
+ VFIO_ASSERT_NOT_NULL(ranges);
+
+ allocator = malloc(sizeof(*allocator));
+ VFIO_ASSERT_NOT_NULL(allocator);
+
+ *allocator = (struct iova_allocator){
+ .ranges = ranges,
+ .nranges = nranges,
+ .range_idx = 0,
+ .range_offset = 0,
+ };
+
+ return allocator;
+}
+
+void iova_allocator_cleanup(struct iova_allocator *allocator)
+{
+ free(allocator->ranges);
+ free(allocator);
+}
+
+iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size)
+{
+ VFIO_ASSERT_GT(size, 0, "Invalid size arg, zero\n");
+ VFIO_ASSERT_EQ(size & (size - 1), 0, "Invalid size arg, non-power-of-2\n");
+
+ for (;;) {
+ struct iommu_iova_range *range;
+ iova_t iova, last;
+
+ VFIO_ASSERT_LT(allocator->range_idx, allocator->nranges,
+ "IOVA allocator out of space\n");
+
+ range = &allocator->ranges[allocator->range_idx];
+ iova = range->start + allocator->range_offset;
+
+ /* Check for sufficient space at the current offset */
+ if (check_add_overflow(iova, size - 1, &last) ||
+ last > range->last)
+ goto next_range;
+
+ /* Align iova to size */
+ iova = last & ~(size - 1);
+
+ /* Check for sufficient space at the aligned iova */
+ if (check_add_overflow(iova, size - 1, &last) ||
+ last > range->last)
+ goto next_range;
+
+ if (last == range->last) {
+ allocator->range_idx++;
+ allocator->range_offset = 0;
+ } else {
+ allocator->range_offset = last - range->start + 1;
+ }
+
+ return iova;
+
+next_range:
+ allocator->range_idx++;
+ allocator->range_offset = 0;
+ }
+}
+
diff --git a/tools/testing/selftests/vfio/lib/libvfio.c b/tools/testing/selftests/vfio/lib/libvfio.c
new file mode 100644
index 000000000000..a23a3cc5be69
--- /dev/null
+++ b/tools/testing/selftests/vfio/lib/libvfio.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../../kselftest.h"
+#include <libvfio.h>
+
+static bool is_bdf(const char *str)
+{
+ unsigned int s, b, d, f;
+ int length, count;
+
+ count = sscanf(str, "%4x:%2x:%2x.%2x%n", &s, &b, &d, &f, &length);
+ return count == 4 && length == strlen(str);
+}
+
+static char **get_bdfs_cmdline(int *argc, char *argv[], int *nr_bdfs)
+{
+ int i;
+
+ for (i = *argc - 1; i > 0 && is_bdf(argv[i]); i--)
+ continue;
+
+ i++;
+ *nr_bdfs = *argc - i;
+ *argc -= *nr_bdfs;
+
+ return *nr_bdfs ? &argv[i] : NULL;
+}
+
+static char *get_bdf_env(void)
+{
+ char *bdf;
+
+ bdf = getenv("VFIO_SELFTESTS_BDF");
+ if (!bdf)
+ return NULL;
+
+ VFIO_ASSERT_TRUE(is_bdf(bdf), "Invalid BDF: %s\n", bdf);
+ return bdf;
+}
+
+char **vfio_selftests_get_bdfs(int *argc, char *argv[], int *nr_bdfs)
+{
+ static char *env_bdf;
+ char **bdfs;
+
+ bdfs = get_bdfs_cmdline(argc, argv, nr_bdfs);
+ if (bdfs)
+ return bdfs;
+
+ env_bdf = get_bdf_env();
+ if (env_bdf) {
+ *nr_bdfs = 1;
+ return &env_bdf;
+ }
+
+ fprintf(stderr, "Unable to determine which device(s) to use, skipping test.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, "To pass the device address via environment variable:\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " export VFIO_SELFTESTS_BDF=\"segment:bus:device.function\"\n");
+ fprintf(stderr, " %s [options]\n", argv[0]);
+ fprintf(stderr, "\n");
+ fprintf(stderr, "To pass the device address(es) via argv:\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " %s [options] segment:bus:device.function ...\n", argv[0]);
+ fprintf(stderr, "\n");
+ exit(KSFT_SKIP);
+}
+
+const char *vfio_selftests_get_bdf(int *argc, char *argv[])
+{
+ int nr_bdfs;
+
+ return vfio_selftests_get_bdfs(argc, argv, &nr_bdfs)[0];
+}
diff --git a/tools/testing/selftests/vfio/lib/libvfio.mk b/tools/testing/selftests/vfio/lib/libvfio.mk
index 5d11c3a89a28..9f47bceed16f 100644
--- a/tools/testing/selftests/vfio/lib/libvfio.mk
+++ b/tools/testing/selftests/vfio/lib/libvfio.mk
@@ -1,24 +1,29 @@
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
-VFIO_DIR := $(selfdir)/vfio
+LIBVFIO_SRCDIR := $(selfdir)/vfio/lib
-LIBVFIO_C := lib/vfio_pci_device.c
-LIBVFIO_C += lib/vfio_pci_driver.c
+LIBVFIO_C := iommu.c
+LIBVFIO_C += iova_allocator.c
+LIBVFIO_C += libvfio.c
+LIBVFIO_C += vfio_pci_device.c
+LIBVFIO_C += vfio_pci_driver.c
ifeq ($(ARCH:x86_64=x86),x86)
-LIBVFIO_C += lib/drivers/ioat/ioat.c
-LIBVFIO_C += lib/drivers/dsa/dsa.c
+LIBVFIO_C += drivers/ioat/ioat.c
+LIBVFIO_C += drivers/dsa/dsa.c
endif
-LIBVFIO_O := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBVFIO_C))
+LIBVFIO_OUTPUT := $(OUTPUT)/libvfio
+
+LIBVFIO_O := $(patsubst %.c, $(LIBVFIO_OUTPUT)/%.o, $(LIBVFIO_C))
LIBVFIO_O_DIRS := $(shell dirname $(LIBVFIO_O) | uniq)
$(shell mkdir -p $(LIBVFIO_O_DIRS))
-CFLAGS += -I$(VFIO_DIR)/lib/include
+CFLAGS += -I$(LIBVFIO_SRCDIR)/include
-$(LIBVFIO_O): $(OUTPUT)/%.o : $(VFIO_DIR)/%.c
+$(LIBVFIO_O): $(LIBVFIO_OUTPUT)/%.o : $(LIBVFIO_SRCDIR)/%.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
-EXTRA_CLEAN += $(LIBVFIO_O)
+EXTRA_CLEAN += $(LIBVFIO_OUTPUT)
diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
index b479a359da12..13fdb4b0b10f 100644
--- a/tools/testing/selftests/vfio/lib/vfio_pci_device.c
+++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
@@ -20,286 +20,10 @@
#include <linux/vfio.h>
#include "../../../kselftest.h"
-#include <vfio_util.h>
+#include <libvfio.h>
#define PCI_SYSFS_PATH "/sys/bus/pci/devices"
-#define ioctl_assert(_fd, _op, _arg) do { \
- void *__arg = (_arg); \
- int __ret = ioctl((_fd), (_op), (__arg)); \
- VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
-} while (0)
-
-static struct vfio_info_cap_header *next_cap_hdr(void *buf, u32 bufsz,
- u32 *cap_offset)
-{
- struct vfio_info_cap_header *hdr;
-
- if (!*cap_offset)
- return NULL;
-
- VFIO_ASSERT_LT(*cap_offset, bufsz);
- VFIO_ASSERT_GE(bufsz - *cap_offset, sizeof(*hdr));
-
- hdr = (struct vfio_info_cap_header *)((u8 *)buf + *cap_offset);
- *cap_offset = hdr->next;
-
- return hdr;
-}
-
-static struct vfio_info_cap_header *vfio_iommu_info_cap_hdr(struct vfio_iommu_type1_info *info,
- u16 cap_id)
-{
- struct vfio_info_cap_header *hdr;
- u32 cap_offset = info->cap_offset;
- u32 max_depth;
- u32 depth = 0;
-
- if (!(info->flags & VFIO_IOMMU_INFO_CAPS))
- return NULL;
-
- if (cap_offset)
- VFIO_ASSERT_GE(cap_offset, sizeof(*info));
-
- max_depth = (info->argsz - sizeof(*info)) / sizeof(*hdr);
-
- while ((hdr = next_cap_hdr(info, info->argsz, &cap_offset))) {
- depth++;
- VFIO_ASSERT_LE(depth, max_depth, "Capability chain contains a cycle\n");
-
- if (hdr->id == cap_id)
- return hdr;
- }
-
- return NULL;
-}
-
-/* Return buffer including capability chain, if present. Free with free() */
-static struct vfio_iommu_type1_info *vfio_iommu_get_info(struct vfio_pci_device *device)
-{
- struct vfio_iommu_type1_info *info;
-
- info = malloc(sizeof(*info));
- VFIO_ASSERT_NOT_NULL(info);
-
- *info = (struct vfio_iommu_type1_info) {
- .argsz = sizeof(*info),
- };
-
- ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
- VFIO_ASSERT_GE(info->argsz, sizeof(*info));
-
- info = realloc(info, info->argsz);
- VFIO_ASSERT_NOT_NULL(info);
-
- ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
- VFIO_ASSERT_GE(info->argsz, sizeof(*info));
-
- return info;
-}
-
-/*
- * Return iova ranges for the device's container. Normalize vfio_iommu_type1 to
- * report iommufd's iommu_iova_range. Free with free().
- */
-static struct iommu_iova_range *vfio_iommu_iova_ranges(struct vfio_pci_device *device,
- u32 *nranges)
-{
- struct vfio_iommu_type1_info_cap_iova_range *cap_range;
- struct vfio_iommu_type1_info *info;
- struct vfio_info_cap_header *hdr;
- struct iommu_iova_range *ranges = NULL;
-
- info = vfio_iommu_get_info(device);
- hdr = vfio_iommu_info_cap_hdr(info, VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
- VFIO_ASSERT_NOT_NULL(hdr);
-
- cap_range = container_of(hdr, struct vfio_iommu_type1_info_cap_iova_range, header);
- VFIO_ASSERT_GT(cap_range->nr_iovas, 0);
-
- ranges = calloc(cap_range->nr_iovas, sizeof(*ranges));
- VFIO_ASSERT_NOT_NULL(ranges);
-
- for (u32 i = 0; i < cap_range->nr_iovas; i++) {
- ranges[i] = (struct iommu_iova_range){
- .start = cap_range->iova_ranges[i].start,
- .last = cap_range->iova_ranges[i].end,
- };
- }
-
- *nranges = cap_range->nr_iovas;
-
- free(info);
- return ranges;
-}
-
-/* Return iova ranges of the device's IOAS. Free with free() */
-static struct iommu_iova_range *iommufd_iova_ranges(struct vfio_pci_device *device,
- u32 *nranges)
-{
- struct iommu_iova_range *ranges;
- int ret;
-
- struct iommu_ioas_iova_ranges query = {
- .size = sizeof(query),
- .ioas_id = device->ioas_id,
- };
-
- ret = ioctl(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
- VFIO_ASSERT_EQ(ret, -1);
- VFIO_ASSERT_EQ(errno, EMSGSIZE);
- VFIO_ASSERT_GT(query.num_iovas, 0);
-
- ranges = calloc(query.num_iovas, sizeof(*ranges));
- VFIO_ASSERT_NOT_NULL(ranges);
-
- query.allowed_iovas = (uintptr_t)ranges;
-
- ioctl_assert(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
- *nranges = query.num_iovas;
-
- return ranges;
-}
-
-static int iova_range_comp(const void *a, const void *b)
-{
- const struct iommu_iova_range *ra = a, *rb = b;
-
- if (ra->start < rb->start)
- return -1;
-
- if (ra->start > rb->start)
- return 1;
-
- return 0;
-}
-
-/* Return sorted IOVA ranges of the device. Free with free(). */
-struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
- u32 *nranges)
-{
- struct iommu_iova_range *ranges;
-
- if (device->iommufd)
- ranges = iommufd_iova_ranges(device, nranges);
- else
- ranges = vfio_iommu_iova_ranges(device, nranges);
-
- if (!ranges)
- return NULL;
-
- VFIO_ASSERT_GT(*nranges, 0);
-
- /* Sort and check that ranges are sane and non-overlapping */
- qsort(ranges, *nranges, sizeof(*ranges), iova_range_comp);
- VFIO_ASSERT_LT(ranges[0].start, ranges[0].last);
-
- for (u32 i = 1; i < *nranges; i++) {
- VFIO_ASSERT_LT(ranges[i].start, ranges[i].last);
- VFIO_ASSERT_LT(ranges[i - 1].last, ranges[i].start);
- }
-
- return ranges;
-}
-
-struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device)
-{
- struct iova_allocator *allocator;
- struct iommu_iova_range *ranges;
- u32 nranges;
-
- ranges = vfio_pci_iova_ranges(device, &nranges);
- VFIO_ASSERT_NOT_NULL(ranges);
-
- allocator = malloc(sizeof(*allocator));
- VFIO_ASSERT_NOT_NULL(allocator);
-
- *allocator = (struct iova_allocator){
- .ranges = ranges,
- .nranges = nranges,
- .range_idx = 0,
- .range_offset = 0,
- };
-
- return allocator;
-}
-
-void iova_allocator_cleanup(struct iova_allocator *allocator)
-{
- free(allocator->ranges);
- free(allocator);
-}
-
-iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size)
-{
- VFIO_ASSERT_GT(size, 0, "Invalid size arg, zero\n");
- VFIO_ASSERT_EQ(size & (size - 1), 0, "Invalid size arg, non-power-of-2\n");
-
- for (;;) {
- struct iommu_iova_range *range;
- iova_t iova, last;
-
- VFIO_ASSERT_LT(allocator->range_idx, allocator->nranges,
- "IOVA allocator out of space\n");
-
- range = &allocator->ranges[allocator->range_idx];
- iova = range->start + allocator->range_offset;
-
- /* Check for sufficient space at the current offset */
- if (check_add_overflow(iova, size - 1, &last) ||
- last > range->last)
- goto next_range;
-
- /* Align iova to size */
- iova = last & ~(size - 1);
-
- /* Check for sufficient space at the aligned iova */
- if (check_add_overflow(iova, size - 1, &last) ||
- last > range->last)
- goto next_range;
-
- if (last == range->last) {
- allocator->range_idx++;
- allocator->range_offset = 0;
- } else {
- allocator->range_offset = last - range->start + 1;
- }
-
- return iova;
-
-next_range:
- allocator->range_idx++;
- allocator->range_offset = 0;
- }
-}
-
-iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
-{
- struct vfio_dma_region *region;
-
- list_for_each_entry(region, &device->dma_regions, link) {
- if (vaddr < region->vaddr)
- continue;
-
- if (vaddr >= region->vaddr + region->size)
- continue;
-
- return region->iova + (vaddr - region->vaddr);
- }
-
- return INVALID_IOVA;
-}
-
-iova_t to_iova(struct vfio_pci_device *device, void *vaddr)
-{
- iova_t iova;
-
- iova = __to_iova(device, vaddr);
- VFIO_ASSERT_NE(iova, INVALID_IOVA, "%p is not mapped into device.\n", vaddr);
-
- return iova;
-}
-
static void vfio_pci_irq_set(struct vfio_pci_device *device,
u32 index, u32 vector, u32 count, int *fds)
{
@@ -386,141 +110,6 @@ static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index,
ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);
}
-static int vfio_iommu_dma_map(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
-{
- struct vfio_iommu_type1_dma_map args = {
- .argsz = sizeof(args),
- .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
- .vaddr = (u64)region->vaddr,
- .iova = region->iova,
- .size = region->size,
- };
-
- if (ioctl(device->container_fd, VFIO_IOMMU_MAP_DMA, &args))
- return -errno;
-
- return 0;
-}
-
-static int iommufd_dma_map(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
-{
- struct iommu_ioas_map args = {
- .size = sizeof(args),
- .flags = IOMMU_IOAS_MAP_READABLE |
- IOMMU_IOAS_MAP_WRITEABLE |
- IOMMU_IOAS_MAP_FIXED_IOVA,
- .user_va = (u64)region->vaddr,
- .iova = region->iova,
- .length = region->size,
- .ioas_id = device->ioas_id,
- };
-
- if (ioctl(device->iommufd, IOMMU_IOAS_MAP, &args))
- return -errno;
-
- return 0;
-}
-
-int __vfio_pci_dma_map(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
-{
- int ret;
-
- if (device->iommufd)
- ret = iommufd_dma_map(device, region);
- else
- ret = vfio_iommu_dma_map(device, region);
-
- if (ret)
- return ret;
-
- list_add(&region->link, &device->dma_regions);
-
- return 0;
-}
-
-static int vfio_iommu_dma_unmap(int fd, u64 iova, u64 size, u32 flags,
- u64 *unmapped)
-{
- struct vfio_iommu_type1_dma_unmap args = {
- .argsz = sizeof(args),
- .iova = iova,
- .size = size,
- .flags = flags,
- };
-
- if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args))
- return -errno;
-
- if (unmapped)
- *unmapped = args.size;
-
- return 0;
-}
-
-static int iommufd_dma_unmap(int fd, u64 iova, u64 length, u32 ioas_id,
- u64 *unmapped)
-{
- struct iommu_ioas_unmap args = {
- .size = sizeof(args),
- .iova = iova,
- .length = length,
- .ioas_id = ioas_id,
- };
-
- if (ioctl(fd, IOMMU_IOAS_UNMAP, &args))
- return -errno;
-
- if (unmapped)
- *unmapped = args.length;
-
- return 0;
-}
-
-int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
- struct vfio_dma_region *region, u64 *unmapped)
-{
- int ret;
-
- if (device->iommufd)
- ret = iommufd_dma_unmap(device->iommufd, region->iova,
- region->size, device->ioas_id,
- unmapped);
- else
- ret = vfio_iommu_dma_unmap(device->container_fd, region->iova,
- region->size, 0, unmapped);
-
- if (ret)
- return ret;
-
- list_del_init(&region->link);
-
- return 0;
-}
-
-int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped)
-{
- int ret;
- struct vfio_dma_region *curr, *next;
-
- if (device->iommufd)
- ret = iommufd_dma_unmap(device->iommufd, 0, UINT64_MAX,
- device->ioas_id, unmapped);
- else
- ret = vfio_iommu_dma_unmap(device->container_fd, 0, 0,
- VFIO_DMA_UNMAP_FLAG_ALL, unmapped);
-
- if (ret)
- return ret;
-
- list_for_each_entry_safe(curr, next, &device->dma_regions, link)
- list_del_init(&curr->link);
-
- return 0;
-}
-
static void vfio_pci_region_get(struct vfio_pci_device *device, int index,
struct vfio_region_info *info)
{
@@ -627,28 +216,26 @@ static void vfio_pci_group_setup(struct vfio_pci_device *device, const char *bdf
ioctl_assert(device->group_fd, VFIO_GROUP_GET_STATUS, &group_status);
VFIO_ASSERT_TRUE(group_status.flags & VFIO_GROUP_FLAGS_VIABLE);
- ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->container_fd);
+ ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->iommu->container_fd);
}
static void vfio_pci_container_setup(struct vfio_pci_device *device, const char *bdf)
{
- unsigned long iommu_type = device->iommu_mode->iommu_type;
- const char *path = device->iommu_mode->container_path;
- int version;
+ struct iommu *iommu = device->iommu;
+ unsigned long iommu_type = iommu->mode->iommu_type;
int ret;
- device->container_fd = open(path, O_RDWR);
- VFIO_ASSERT_GE(device->container_fd, 0, "open(%s) failed\n", path);
-
- version = ioctl(device->container_fd, VFIO_GET_API_VERSION);
- VFIO_ASSERT_EQ(version, VFIO_API_VERSION, "Unsupported version: %d\n", version);
-
vfio_pci_group_setup(device, bdf);
- ret = ioctl(device->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
+ ret = ioctl(iommu->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
VFIO_ASSERT_GT(ret, 0, "VFIO IOMMU type %lu not supported\n", iommu_type);
- ioctl_assert(device->container_fd, VFIO_SET_IOMMU, (void *)iommu_type);
+ /*
+ * Allow multiple threads to race to set the IOMMU type on the
+ * container. The first will succeed and the rest should fail
+ * because the IOMMU type is already set.
+ */
+ (void)ioctl(iommu->container_fd, VFIO_SET_IOMMU, (void *)iommu_type);
device->fd = ioctl(device->group_fd, VFIO_GROUP_GET_DEVICE_FD, bdf);
VFIO_ASSERT_GE(device->fd, 0);
@@ -712,52 +299,6 @@ const char *vfio_pci_get_cdev_path(const char *bdf)
return cdev_path;
}
-/* Reminder: Keep in sync with FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(). */
-static const struct vfio_iommu_mode iommu_modes[] = {
- {
- .name = "vfio_type1_iommu",
- .container_path = "/dev/vfio/vfio",
- .iommu_type = VFIO_TYPE1_IOMMU,
- },
- {
- .name = "vfio_type1v2_iommu",
- .container_path = "/dev/vfio/vfio",
- .iommu_type = VFIO_TYPE1v2_IOMMU,
- },
- {
- .name = "iommufd_compat_type1",
- .container_path = "/dev/iommu",
- .iommu_type = VFIO_TYPE1_IOMMU,
- },
- {
- .name = "iommufd_compat_type1v2",
- .container_path = "/dev/iommu",
- .iommu_type = VFIO_TYPE1v2_IOMMU,
- },
- {
- .name = "iommufd",
- },
-};
-
-const char *default_iommu_mode = "iommufd";
-
-static const struct vfio_iommu_mode *lookup_iommu_mode(const char *iommu_mode)
-{
- int i;
-
- if (!iommu_mode)
- iommu_mode = default_iommu_mode;
-
- for (i = 0; i < ARRAY_SIZE(iommu_modes); i++) {
- if (strcmp(iommu_mode, iommu_modes[i].name))
- continue;
-
- return &iommu_modes[i];
- }
-
- VFIO_FAIL("Unrecognized IOMMU mode: %s\n", iommu_mode);
-}
-
static void vfio_device_bind_iommufd(int device_fd, int iommufd)
{
struct vfio_device_bind_iommufd args = {
@@ -768,16 +309,6 @@ static void vfio_device_bind_iommufd(int device_fd, int iommufd)
ioctl_assert(device_fd, VFIO_DEVICE_BIND_IOMMUFD, &args);
}
-static u32 iommufd_ioas_alloc(int iommufd)
-{
- struct iommu_ioas_alloc args = {
- .size = sizeof(args),
- };
-
- ioctl_assert(iommufd, IOMMU_IOAS_ALLOC, &args);
- return args.out_ioas_id;
-}
-
static void vfio_device_attach_iommufd_pt(int device_fd, u32 pt_id)
{
struct vfio_device_attach_iommufd_pt args = {
@@ -796,31 +327,22 @@ static void vfio_pci_iommufd_setup(struct vfio_pci_device *device, const char *b
VFIO_ASSERT_GE(device->fd, 0);
free((void *)cdev_path);
- /*
- * Require device->iommufd to be >0 so that a simple non-0 check can be
- * used to check if iommufd is enabled. In practice open() will never
- * return 0 unless stdin is closed.
- */
- device->iommufd = open("/dev/iommu", O_RDWR);
- VFIO_ASSERT_GT(device->iommufd, 0);
-
- vfio_device_bind_iommufd(device->fd, device->iommufd);
- device->ioas_id = iommufd_ioas_alloc(device->iommufd);
- vfio_device_attach_iommufd_pt(device->fd, device->ioas_id);
+ vfio_device_bind_iommufd(device->fd, device->iommu->iommufd);
+ vfio_device_attach_iommufd_pt(device->fd, device->iommu->ioas_id);
}
-struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode)
+struct vfio_pci_device *vfio_pci_device_init(const char *bdf, struct iommu *iommu)
{
struct vfio_pci_device *device;
device = calloc(1, sizeof(*device));
VFIO_ASSERT_NOT_NULL(device);
- INIT_LIST_HEAD(&device->dma_regions);
-
- device->iommu_mode = lookup_iommu_mode(iommu_mode);
+ VFIO_ASSERT_NOT_NULL(iommu);
+ device->iommu = iommu;
+ device->bdf = bdf;
- if (device->iommu_mode->container_path)
+ if (iommu->mode->container_path)
vfio_pci_container_setup(device, bdf);
else
vfio_pci_iommufd_setup(device, bdf);
@@ -849,48 +371,8 @@ void vfio_pci_device_cleanup(struct vfio_pci_device *device)
VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
}
- if (device->iommufd) {
- VFIO_ASSERT_EQ(close(device->iommufd), 0);
- } else {
+ if (device->group_fd)
VFIO_ASSERT_EQ(close(device->group_fd), 0);
- VFIO_ASSERT_EQ(close(device->container_fd), 0);
- }
free(device);
}
-
-static bool is_bdf(const char *str)
-{
- unsigned int s, b, d, f;
- int length, count;
-
- count = sscanf(str, "%4x:%2x:%2x.%2x%n", &s, &b, &d, &f, &length);
- return count == 4 && length == strlen(str);
-}
-
-const char *vfio_selftests_get_bdf(int *argc, char *argv[])
-{
- char *bdf;
-
- if (*argc > 1 && is_bdf(argv[*argc - 1]))
- return argv[--(*argc)];
-
- bdf = getenv("VFIO_SELFTESTS_BDF");
- if (bdf) {
- VFIO_ASSERT_TRUE(is_bdf(bdf), "Invalid BDF: %s\n", bdf);
- return bdf;
- }
-
- fprintf(stderr, "Unable to determine which device to use, skipping test.\n");
- fprintf(stderr, "\n");
- fprintf(stderr, "To pass the device address via environment variable:\n");
- fprintf(stderr, "\n");
- fprintf(stderr, " export VFIO_SELFTESTS_BDF=segment:bus:device.function\n");
- fprintf(stderr, " %s [options]\n", argv[0]);
- fprintf(stderr, "\n");
- fprintf(stderr, "To pass the device address via argv:\n");
- fprintf(stderr, "\n");
- fprintf(stderr, " %s [options] segment:bus:device.function\n", argv[0]);
- fprintf(stderr, "\n");
- exit(KSFT_SKIP);
-}
diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_driver.c b/tools/testing/selftests/vfio/lib/vfio_pci_driver.c
index e5e8723ecb41..ca0e25efbfa1 100644
--- a/tools/testing/selftests/vfio/lib/vfio_pci_driver.c
+++ b/tools/testing/selftests/vfio/lib/vfio_pci_driver.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <stdio.h>
-
#include "../../../kselftest.h"
-#include <vfio_util.h>
+#include <libvfio.h>
#ifdef __x86_64__
extern struct vfio_pci_driver_ops dsa_ops;
@@ -29,7 +27,6 @@ void vfio_pci_driver_probe(struct vfio_pci_device *device)
if (ops->probe(device))
continue;
- printf("Driver found: %s\n", ops->name);
device->driver.ops = ops;
}
}
@@ -58,17 +55,6 @@ void vfio_pci_driver_init(struct vfio_pci_device *device)
driver->ops->init(device);
driver->initialized = true;
-
- printf("%s: region: vaddr %p, iova 0x%lx, size 0x%lx\n",
- driver->ops->name,
- driver->region.vaddr,
- driver->region.iova,
- driver->region.size);
-
- printf("%s: max_memcpy_size 0x%lx, max_memcpy_count 0x%lx\n",
- driver->ops->name,
- driver->max_memcpy_size,
- driver->max_memcpy_count);
}
void vfio_pci_driver_remove(struct vfio_pci_device *device)
diff --git a/tools/testing/selftests/vfio/run.sh b/tools/testing/selftests/vfio/run.sh
deleted file mode 100755
index 0476b6d7adc3..000000000000
--- a/tools/testing/selftests/vfio/run.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
-
-# Global variables initialized in main() and then used during cleanup() when
-# the script exits.
-declare DEVICE_BDF
-declare NEW_DRIVER
-declare OLD_DRIVER
-declare OLD_NUMVFS
-declare DRIVER_OVERRIDE
-
-function write_to() {
- # Unfortunately set -x does not show redirects so use echo to manually
- # tell the user what commands are being run.
- echo "+ echo \"${2}\" > ${1}"
- echo "${2}" > ${1}
-}
-
-function bind() {
- write_to /sys/bus/pci/drivers/${2}/bind ${1}
-}
-
-function unbind() {
- write_to /sys/bus/pci/drivers/${2}/unbind ${1}
-}
-
-function set_sriov_numvfs() {
- write_to /sys/bus/pci/devices/${1}/sriov_numvfs ${2}
-}
-
-function set_driver_override() {
- write_to /sys/bus/pci/devices/${1}/driver_override ${2}
-}
-
-function clear_driver_override() {
- set_driver_override ${1} ""
-}
-
-function cleanup() {
- if [ "${NEW_DRIVER}" ]; then unbind ${DEVICE_BDF} ${NEW_DRIVER} ; fi
- if [ "${DRIVER_OVERRIDE}" ]; then clear_driver_override ${DEVICE_BDF} ; fi
- if [ "${OLD_DRIVER}" ]; then bind ${DEVICE_BDF} ${OLD_DRIVER} ; fi
- if [ "${OLD_NUMVFS}" ]; then set_sriov_numvfs ${DEVICE_BDF} ${OLD_NUMVFS} ; fi
-}
-
-function usage() {
- echo "usage: $0 [-d segment:bus:device.function] [-s] [-h] [cmd ...]" >&2
- echo >&2
- echo " -d: The BDF of the device to use for the test (required)" >&2
- echo " -h: Show this help message" >&2
- echo " -s: Drop into a shell rather than running a command" >&2
- echo >&2
- echo " cmd: The command to run and arguments to pass to it." >&2
- echo " Required when not using -s. The SBDF will be " >&2
- echo " appended to the argument list." >&2
- exit 1
-}
-
-function main() {
- local shell
-
- while getopts "d:hs" opt; do
- case $opt in
- d) DEVICE_BDF="$OPTARG" ;;
- s) shell=true ;;
- *) usage ;;
- esac
- done
-
- # Shift past all optional arguments.
- shift $((OPTIND - 1))
-
- # Check that the user passed in the command to run.
- [ ! "${shell}" ] && [ $# = 0 ] && usage
-
- # Check that the user passed in a BDF.
- [ "${DEVICE_BDF}" ] || usage
-
- trap cleanup EXIT
- set -e
-
- test -d /sys/bus/pci/devices/${DEVICE_BDF}
-
- if [ -f /sys/bus/pci/devices/${DEVICE_BDF}/sriov_numvfs ]; then
- OLD_NUMVFS=$(cat /sys/bus/pci/devices/${DEVICE_BDF}/sriov_numvfs)
- set_sriov_numvfs ${DEVICE_BDF} 0
- fi
-
- if [ -L /sys/bus/pci/devices/${DEVICE_BDF}/driver ]; then
- OLD_DRIVER=$(basename $(readlink -m /sys/bus/pci/devices/${DEVICE_BDF}/driver))
- unbind ${DEVICE_BDF} ${OLD_DRIVER}
- fi
-
- set_driver_override ${DEVICE_BDF} vfio-pci
- DRIVER_OVERRIDE=true
-
- bind ${DEVICE_BDF} vfio-pci
- NEW_DRIVER=vfio-pci
-
- echo
- if [ "${shell}" ]; then
- echo "Dropping into ${SHELL} with VFIO_SELFTESTS_BDF=${DEVICE_BDF}"
- VFIO_SELFTESTS_BDF=${DEVICE_BDF} ${SHELL}
- else
- "$@" ${DEVICE_BDF}
- fi
- echo
-}
-
-main "$@"
diff --git a/tools/testing/selftests/vfio/scripts/cleanup.sh b/tools/testing/selftests/vfio/scripts/cleanup.sh
new file mode 100755
index 000000000000..69c922d8aafb
--- /dev/null
+++ b/tools/testing/selftests/vfio/scripts/cleanup.sh
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+source $(dirname -- "${BASH_SOURCE[0]}")/lib.sh
+
+function cleanup_devices() {
+ local device_bdf
+ local device_dir
+
+ for device_bdf in "$@"; do
+ device_dir=${DEVICES_DIR}/${device_bdf}
+
+ if [ -f ${device_dir}/vfio-pci ]; then
+ unbind ${device_bdf} vfio-pci
+ fi
+
+ if [ -f ${device_dir}/driver_override ]; then
+ clear_driver_override ${device_bdf}
+ fi
+
+ if [ -f ${device_dir}/driver ]; then
+ bind ${device_bdf} $(cat ${device_dir}/driver)
+ fi
+
+ if [ -f ${device_dir}/sriov_numvfs ]; then
+ set_sriov_numvfs ${device_bdf} $(cat ${device_dir}/sriov_numvfs)
+ fi
+
+ rm -rf ${device_dir}
+ done
+}
+
+function main() {
+ if [ $# = 0 ]; then
+ cleanup_devices $(ls ${DEVICES_DIR})
+ rmdir ${DEVICES_DIR}
+ else
+ cleanup_devices "$@"
+ fi
+}
+
+main "$@"
diff --git a/tools/testing/selftests/vfio/scripts/lib.sh b/tools/testing/selftests/vfio/scripts/lib.sh
new file mode 100755
index 000000000000..9f05f29c7b86
--- /dev/null
+++ b/tools/testing/selftests/vfio/scripts/lib.sh
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+readonly DEVICES_DIR="${TMPDIR:-/tmp}/vfio-selftests-devices"
+
+function write_to() {
+ # Unfortunately set -x does not show redirects so use echo to manually
+ # tell the user what commands are being run.
+ echo "+ echo \"${2}\" > ${1}"
+ echo "${2}" > ${1}
+}
+
+function get_driver() {
+ if [ -L /sys/bus/pci/devices/${1}/driver ]; then
+ basename $(readlink -m /sys/bus/pci/devices/${1}/driver)
+ fi
+}
+
+function bind() {
+ write_to /sys/bus/pci/drivers/${2}/bind ${1}
+}
+
+function unbind() {
+ write_to /sys/bus/pci/drivers/${2}/unbind ${1}
+}
+
+function set_sriov_numvfs() {
+ write_to /sys/bus/pci/devices/${1}/sriov_numvfs ${2}
+}
+
+function get_sriov_numvfs() {
+ if [ -f /sys/bus/pci/devices/${1}/sriov_numvfs ]; then
+ cat /sys/bus/pci/devices/${1}/sriov_numvfs
+ fi
+}
+
+function set_driver_override() {
+ write_to /sys/bus/pci/devices/${1}/driver_override ${2}
+}
+
+function clear_driver_override() {
+ set_driver_override ${1} ""
+}
diff --git a/tools/testing/selftests/vfio/scripts/run.sh b/tools/testing/selftests/vfio/scripts/run.sh
new file mode 100755
index 000000000000..91fd38f9f6f6
--- /dev/null
+++ b/tools/testing/selftests/vfio/scripts/run.sh
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+source $(dirname -- "${BASH_SOURCE[0]}")/lib.sh
+
+function main() {
+ local device_bdfs=$(ls ${DEVICES_DIR})
+
+ if [ -z "${device_bdfs}" ]; then
+ echo "No devices found, skipping."
+ exit 4
+ fi
+
+ "$@" ${device_bdfs}
+}
+
+main "$@"
diff --git a/tools/testing/selftests/vfio/scripts/setup.sh b/tools/testing/selftests/vfio/scripts/setup.sh
new file mode 100755
index 000000000000..49a499e51cbe
--- /dev/null
+++ b/tools/testing/selftests/vfio/scripts/setup.sh
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+set -e
+
+source $(dirname -- "${BASH_SOURCE[0]}")/lib.sh
+
+function main() {
+ local device_bdf
+ local device_dir
+ local numvfs
+ local driver
+
+ if [ $# = 0 ]; then
+ echo "usage: $0 segment:bus:device.function ..." >&2
+ exit 1
+ fi
+
+ for device_bdf in "$@"; do
+ test -d /sys/bus/pci/devices/${device_bdf}
+
+ device_dir=${DEVICES_DIR}/${device_bdf}
+ if [ -d "${device_dir}" ]; then
+ echo "${device_bdf} has already been set up, exiting."
+ exit 0
+ fi
+
+ mkdir -p ${device_dir}
+
+ numvfs=$(get_sriov_numvfs ${device_bdf})
+ if [ "${numvfs}" ]; then
+ set_sriov_numvfs ${device_bdf} 0
+ echo ${numvfs} > ${device_dir}/sriov_numvfs
+ fi
+
+ driver=$(get_driver ${device_bdf})
+ if [ "${driver}" ]; then
+ unbind ${device_bdf} ${driver}
+ echo ${driver} > ${device_dir}/driver
+ fi
+
+ set_driver_override ${device_bdf} vfio-pci
+ touch ${device_dir}/driver_override
+
+ bind ${device_bdf} vfio-pci
+ touch ${device_dir}/vfio-pci
+ done
+}
+
+main "$@"
diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
index 102603d4407d..5397822c3dd4 100644
--- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
+++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
@@ -10,7 +10,7 @@
#include <linux/sizes.h>
#include <linux/vfio.h>
-#include <vfio_util.h>
+#include <libvfio.h>
#include "../kselftest_harness.h"
@@ -94,6 +94,7 @@ static int iommu_mapping_get(const char *bdf, u64 iova,
}
FIXTURE(vfio_dma_mapping_test) {
+ struct iommu *iommu;
struct vfio_pci_device *device;
struct iova_allocator *iova_allocator;
};
@@ -119,21 +120,23 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB |
FIXTURE_SETUP(vfio_dma_mapping_test)
{
- self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
- self->iova_allocator = iova_allocator_init(self->device);
+ self->iommu = iommu_init(variant->iommu_mode);
+ self->device = vfio_pci_device_init(device_bdf, self->iommu);
+ self->iova_allocator = iova_allocator_init(self->iommu);
}
FIXTURE_TEARDOWN(vfio_dma_mapping_test)
{
iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
+ iommu_cleanup(self->iommu);
}
TEST_F(vfio_dma_mapping_test, dma_map_unmap)
{
const u64 size = variant->size ?: getpagesize();
const int flags = variant->mmap_flags;
- struct vfio_dma_region region;
+ struct dma_region region;
struct iommu_mapping mapping;
u64 mapping_size = size;
u64 unmapped;
@@ -150,7 +153,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
region.iova = iova_allocator_alloc(self->iova_allocator, size);
region.size = size;
- vfio_pci_dma_map(self->device, &region);
+ iommu_map(self->iommu, &region);
printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova);
ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr));
@@ -192,19 +195,20 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
}
unmap:
- rc = __vfio_pci_dma_unmap(self->device, &region, &unmapped);
+ rc = __iommu_unmap(self->iommu, &region, &unmapped);
ASSERT_EQ(rc, 0);
ASSERT_EQ(unmapped, region.size);
printf("Unmapped IOVA 0x%lx\n", region.iova);
- ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr));
+ ASSERT_NE(0, __to_iova(self->device, region.vaddr, NULL));
ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
ASSERT_TRUE(!munmap(region.vaddr, size));
}
FIXTURE(vfio_dma_map_limit_test) {
+ struct iommu *iommu;
struct vfio_pci_device *device;
- struct vfio_dma_region region;
+ struct dma_region region;
size_t mmap_size;
};
@@ -223,7 +227,7 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
FIXTURE_SETUP(vfio_dma_map_limit_test)
{
- struct vfio_dma_region *region = &self->region;
+ struct dma_region *region = &self->region;
struct iommu_iova_range *ranges;
u64 region_size = getpagesize();
iova_t last_iova;
@@ -235,12 +239,13 @@ FIXTURE_SETUP(vfio_dma_map_limit_test)
*/
self->mmap_size = 2 * region_size;
- self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
+ self->iommu = iommu_init(variant->iommu_mode);
+ self->device = vfio_pci_device_init(device_bdf, self->iommu);
region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT_NE(region->vaddr, MAP_FAILED);
- ranges = vfio_pci_iova_ranges(self->device, &nranges);
+ ranges = iommu_iova_ranges(self->iommu, &nranges);
VFIO_ASSERT_NOT_NULL(ranges);
last_iova = ranges[nranges - 1].last;
free(ranges);
@@ -253,49 +258,50 @@ FIXTURE_SETUP(vfio_dma_map_limit_test)
FIXTURE_TEARDOWN(vfio_dma_map_limit_test)
{
vfio_pci_device_cleanup(self->device);
+ iommu_cleanup(self->iommu);
ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
}
TEST_F(vfio_dma_map_limit_test, unmap_range)
{
- struct vfio_dma_region *region = &self->region;
+ struct dma_region *region = &self->region;
u64 unmapped;
int rc;
- vfio_pci_dma_map(self->device, region);
+ iommu_map(self->iommu, region);
ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
- rc = __vfio_pci_dma_unmap(self->device, region, &unmapped);
+ rc = __iommu_unmap(self->iommu, region, &unmapped);
ASSERT_EQ(rc, 0);
ASSERT_EQ(unmapped, region->size);
}
TEST_F(vfio_dma_map_limit_test, unmap_all)
{
- struct vfio_dma_region *region = &self->region;
+ struct dma_region *region = &self->region;
u64 unmapped;
int rc;
- vfio_pci_dma_map(self->device, region);
+ iommu_map(self->iommu, region);
ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
- rc = __vfio_pci_dma_unmap_all(self->device, &unmapped);
+ rc = __iommu_unmap_all(self->iommu, &unmapped);
ASSERT_EQ(rc, 0);
ASSERT_EQ(unmapped, region->size);
}
TEST_F(vfio_dma_map_limit_test, overflow)
{
- struct vfio_dma_region *region = &self->region;
+ struct dma_region *region = &self->region;
int rc;
region->iova = ~(iova_t)0 & ~(region->size - 1);
region->size = self->mmap_size;
- rc = __vfio_pci_dma_map(self->device, region);
+ rc = __iommu_map(self->iommu, region);
ASSERT_EQ(rc, -EOVERFLOW);
- rc = __vfio_pci_dma_unmap(self->device, region, NULL);
+ rc = __iommu_unmap(self->iommu, region, NULL);
ASSERT_EQ(rc, -EOVERFLOW);
}
diff --git a/tools/testing/selftests/vfio/vfio_iommufd_setup_test.c b/tools/testing/selftests/vfio/vfio_iommufd_setup_test.c
index 3655106b912d..caf1c6291f3d 100644
--- a/tools/testing/selftests/vfio/vfio_iommufd_setup_test.c
+++ b/tools/testing/selftests/vfio/vfio_iommufd_setup_test.c
@@ -10,7 +10,7 @@
#include <sys/ioctl.h>
#include <unistd.h>
-#include <vfio_util.h>
+#include <libvfio.h>
#include "../kselftest_harness.h"
static const char iommu_dev_path[] = "/dev/iommu";
diff --git a/tools/testing/selftests/vfio/vfio_pci_device_init_perf_test.c b/tools/testing/selftests/vfio/vfio_pci_device_init_perf_test.c
new file mode 100644
index 000000000000..33b0c31fe2ed
--- /dev/null
+++ b/tools/testing/selftests/vfio/vfio_pci_device_init_perf_test.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <linux/sizes.h>
+#include <linux/time64.h>
+#include <linux/vfio.h>
+
+#include <libvfio.h>
+
+#include "../kselftest_harness.h"
+
+static char **device_bdfs;
+static int nr_devices;
+
+struct thread_args {
+ struct iommu *iommu;
+ int device_index;
+ struct timespec start;
+ struct timespec end;
+ pthread_barrier_t *barrier;
+};
+
+FIXTURE(vfio_pci_device_init_perf_test) {
+ pthread_t *threads;
+ pthread_barrier_t barrier;
+ struct thread_args *thread_args;
+ struct iommu *iommu;
+};
+
+FIXTURE_VARIANT(vfio_pci_device_init_perf_test) {
+ const char *iommu_mode;
+};
+
+#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \
+FIXTURE_VARIANT_ADD(vfio_pci_device_init_perf_test, _iommu_mode) { \
+ .iommu_mode = #_iommu_mode, \
+}
+
+FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
+
+FIXTURE_SETUP(vfio_pci_device_init_perf_test)
+{
+ int i;
+
+ self->iommu = iommu_init(variant->iommu_mode);
+ self->threads = calloc(nr_devices, sizeof(self->threads[0]));
+ self->thread_args = calloc(nr_devices, sizeof(self->thread_args[0]));
+
+ pthread_barrier_init(&self->barrier, NULL, nr_devices);
+
+ for (i = 0; i < nr_devices; i++) {
+ self->thread_args[i].iommu = self->iommu;
+ self->thread_args[i].barrier = &self->barrier;
+ self->thread_args[i].device_index = i;
+ }
+}
+
+FIXTURE_TEARDOWN(vfio_pci_device_init_perf_test)
+{
+ iommu_cleanup(self->iommu);
+ free(self->threads);
+ free(self->thread_args);
+}
+
+static s64 to_ns(struct timespec ts)
+{
+ return (s64)ts.tv_nsec + NSEC_PER_SEC * (s64)ts.tv_sec;
+}
+
+static struct timespec to_timespec(s64 ns)
+{
+ struct timespec ts = {
+ .tv_nsec = ns % NSEC_PER_SEC,
+ .tv_sec = ns / NSEC_PER_SEC,
+ };
+
+ return ts;
+}
+
+static struct timespec timespec_sub(struct timespec a, struct timespec b)
+{
+ return to_timespec(to_ns(a) - to_ns(b));
+}
+
+static struct timespec timespec_min(struct timespec a, struct timespec b)
+{
+ return to_ns(a) < to_ns(b) ? a : b;
+}
+
+static struct timespec timespec_max(struct timespec a, struct timespec b)
+{
+ return to_ns(a) > to_ns(b) ? a : b;
+}
+
+static void *thread_main(void *__args)
+{
+ struct thread_args *args = __args;
+ struct vfio_pci_device *device;
+
+ pthread_barrier_wait(args->barrier);
+
+ clock_gettime(CLOCK_MONOTONIC, &args->start);
+ device = vfio_pci_device_init(device_bdfs[args->device_index], args->iommu);
+ clock_gettime(CLOCK_MONOTONIC, &args->end);
+
+ pthread_barrier_wait(args->barrier);
+
+ vfio_pci_device_cleanup(device);
+ return NULL;
+}
+
+TEST_F(vfio_pci_device_init_perf_test, init)
+{
+ struct timespec start = to_timespec(INT64_MAX), end = {};
+ struct timespec min = to_timespec(INT64_MAX);
+ struct timespec max = {};
+ struct timespec avg = {};
+ struct timespec wall_time;
+ s64 thread_ns = 0;
+ int i;
+
+ for (i = 0; i < nr_devices; i++) {
+ pthread_create(&self->threads[i], NULL, thread_main,
+ &self->thread_args[i]);
+ }
+
+ for (i = 0; i < nr_devices; i++) {
+ struct thread_args *args = &self->thread_args[i];
+ struct timespec init_time;
+
+ pthread_join(self->threads[i], NULL);
+
+ start = timespec_min(start, args->start);
+ end = timespec_max(end, args->end);
+
+ init_time = timespec_sub(args->end, args->start);
+ min = timespec_min(min, init_time);
+ max = timespec_max(max, init_time);
+ thread_ns += to_ns(init_time);
+ }
+
+ avg = to_timespec(thread_ns / nr_devices);
+ wall_time = timespec_sub(end, start);
+
+ printf("Wall time: %lu.%09lus\n",
+ wall_time.tv_sec, wall_time.tv_nsec);
+ printf("Min init time (per device): %lu.%09lus\n",
+ min.tv_sec, min.tv_nsec);
+ printf("Max init time (per device): %lu.%09lus\n",
+ max.tv_sec, max.tv_nsec);
+ printf("Avg init time (per device): %lu.%09lus\n",
+ avg.tv_sec, avg.tv_nsec);
+}
+
+int main(int argc, char *argv[])
+{
+ int i;
+
+ device_bdfs = vfio_selftests_get_bdfs(&argc, argv, &nr_devices);
+
+ printf("Testing parallel initialization of %d devices:\n", nr_devices);
+ for (i = 0; i < nr_devices; i++)
+ printf(" %s\n", device_bdfs[i]);
+
+ return test_harness_run(argc, argv);
+}
diff --git a/tools/testing/selftests/vfio/vfio_pci_device_test.c b/tools/testing/selftests/vfio/vfio_pci_device_test.c
index 7a270698e4d2..ecbb669b3765 100644
--- a/tools/testing/selftests/vfio/vfio_pci_device_test.c
+++ b/tools/testing/selftests/vfio/vfio_pci_device_test.c
@@ -10,7 +10,7 @@
#include <linux/sizes.h>
#include <linux/vfio.h>
-#include <vfio_util.h>
+#include <libvfio.h>
#include "../kselftest_harness.h"
@@ -23,17 +23,20 @@ static const char *device_bdf;
#define MAX_TEST_MSI 16U
FIXTURE(vfio_pci_device_test) {
+ struct iommu *iommu;
struct vfio_pci_device *device;
};
FIXTURE_SETUP(vfio_pci_device_test)
{
- self->device = vfio_pci_device_init(device_bdf, default_iommu_mode);
+ self->iommu = iommu_init(default_iommu_mode);
+ self->device = vfio_pci_device_init(device_bdf, self->iommu);
}
FIXTURE_TEARDOWN(vfio_pci_device_test)
{
vfio_pci_device_cleanup(self->device);
+ iommu_cleanup(self->iommu);
}
#define read_pci_id_from_sysfs(_file) ({ \
@@ -99,6 +102,7 @@ TEST_F(vfio_pci_device_test, validate_bars)
}
FIXTURE(vfio_pci_irq_test) {
+ struct iommu *iommu;
struct vfio_pci_device *device;
};
@@ -116,12 +120,14 @@ FIXTURE_VARIANT_ADD(vfio_pci_irq_test, msix) {
FIXTURE_SETUP(vfio_pci_irq_test)
{
- self->device = vfio_pci_device_init(device_bdf, default_iommu_mode);
+ self->iommu = iommu_init(default_iommu_mode);
+ self->device = vfio_pci_device_init(device_bdf, self->iommu);
}
FIXTURE_TEARDOWN(vfio_pci_irq_test)
{
vfio_pci_device_cleanup(self->device);
+ iommu_cleanup(self->iommu);
}
TEST_F(vfio_pci_irq_test, enable_trigger_disable)
diff --git a/tools/testing/selftests/vfio/vfio_pci_driver_test.c b/tools/testing/selftests/vfio/vfio_pci_driver_test.c
index f69eec8b928d..f0ca8310d6a8 100644
--- a/tools/testing/selftests/vfio/vfio_pci_driver_test.c
+++ b/tools/testing/selftests/vfio/vfio_pci_driver_test.c
@@ -5,7 +5,7 @@
#include <linux/sizes.h>
#include <linux/vfio.h>
-#include <vfio_util.h>
+#include <libvfio.h>
#include "../kselftest_harness.h"
@@ -18,9 +18,9 @@ static const char *device_bdf;
ASSERT_EQ(EAGAIN, errno); \
} while (0)
-static void region_setup(struct vfio_pci_device *device,
+static void region_setup(struct iommu *iommu,
struct iova_allocator *iova_allocator,
- struct vfio_dma_region *region, u64 size)
+ struct dma_region *region, u64 size)
{
const int flags = MAP_SHARED | MAP_ANONYMOUS;
const int prot = PROT_READ | PROT_WRITE;
@@ -33,20 +33,20 @@ static void region_setup(struct vfio_pci_device *device,
region->iova = iova_allocator_alloc(iova_allocator, size);
region->size = size;
- vfio_pci_dma_map(device, region);
+ iommu_map(iommu, region);
}
-static void region_teardown(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
+static void region_teardown(struct iommu *iommu, struct dma_region *region)
{
- vfio_pci_dma_unmap(device, region);
+ iommu_unmap(iommu, region);
VFIO_ASSERT_EQ(munmap(region->vaddr, region->size), 0);
}
FIXTURE(vfio_pci_driver_test) {
+ struct iommu *iommu;
struct vfio_pci_device *device;
struct iova_allocator *iova_allocator;
- struct vfio_dma_region memcpy_region;
+ struct dma_region memcpy_region;
void *vaddr;
int msi_fd;
@@ -73,13 +73,14 @@ FIXTURE_SETUP(vfio_pci_driver_test)
{
struct vfio_pci_driver *driver;
- self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
- self->iova_allocator = iova_allocator_init(self->device);
+ self->iommu = iommu_init(variant->iommu_mode);
+ self->device = vfio_pci_device_init(device_bdf, self->iommu);
+ self->iova_allocator = iova_allocator_init(self->iommu);
driver = &self->device->driver;
- region_setup(self->device, self->iova_allocator, &self->memcpy_region, SZ_1G);
- region_setup(self->device, self->iova_allocator, &driver->region, SZ_2M);
+ region_setup(self->iommu, self->iova_allocator, &self->memcpy_region, SZ_1G);
+ region_setup(self->iommu, self->iova_allocator, &driver->region, SZ_2M);
/* Any IOVA that doesn't overlap memcpy_region and driver->region. */
self->unmapped_iova = iova_allocator_alloc(self->iova_allocator, SZ_1G);
@@ -108,11 +109,12 @@ FIXTURE_TEARDOWN(vfio_pci_driver_test)
vfio_pci_driver_remove(self->device);
- region_teardown(self->device, &self->memcpy_region);
- region_teardown(self->device, &driver->region);
+ region_teardown(self->iommu, &self->memcpy_region);
+ region_teardown(self->iommu, &driver->region);
iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
+ iommu_cleanup(self->iommu);
}
TEST_F(vfio_pci_driver_test, init_remove)
@@ -231,18 +233,31 @@ TEST_F_TIMEOUT(vfio_pci_driver_test, memcpy_storm, 60)
ASSERT_NO_MSI(self->msi_fd);
}
-int main(int argc, char *argv[])
+static bool device_has_selftests_driver(const char *bdf)
{
struct vfio_pci_device *device;
+ struct iommu *iommu;
+ bool has_driver;
+
+ iommu = iommu_init(default_iommu_mode);
+ device = vfio_pci_device_init(device_bdf, iommu);
+
+ has_driver = !!device->driver.ops;
+
+ vfio_pci_device_cleanup(device);
+ iommu_cleanup(iommu);
+ return has_driver;
+}
+
+int main(int argc, char *argv[])
+{
device_bdf = vfio_selftests_get_bdf(&argc, argv);
- device = vfio_pci_device_init(device_bdf, default_iommu_mode);
- if (!device->driver.ops) {
+ if (!device_has_selftests_driver(device_bdf)) {
fprintf(stderr, "No driver found for device %s\n", device_bdf);
return KSFT_SKIP;
}
- vfio_pci_device_cleanup(device);
return test_harness_run(argc, argv);
}