summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/tests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/tests')
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c17
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c208
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c11
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c16
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c6
6 files changed, 248 insertions, 12 deletions
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index a7e548a2bdfb..5df98de5ba3c 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -31,6 +31,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
struct drm_exec *exec)
{
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
+ struct dma_buf_attachment *attach;
u32 mem_type;
int ret;
@@ -46,7 +47,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
(params->mem_mask & XE_BO_FLAG_SYSTEM))
- /* Pin migrated to TT */
+ /* Pin migrated to TT on non-dynamic attachments. */
mem_type = XE_PL_TT;
if (!xe_bo_is_mem_type(exported, mem_type)) {
@@ -88,6 +89,18 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ /* Check that we can pin without migrating. */
+ attach = list_first_entry_or_null(&dmabuf->attachments, typeof(*attach), node);
+ if (attach) {
+ int err = dma_buf_pin(attach);
+
+ if (!err) {
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ dma_buf_unpin(attach);
+ }
+ KUNIT_EXPECT_EQ(test, err, 0);
+ }
+
if (params->force_different_devices)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
else
@@ -150,7 +163,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
xe_bo_lock(import_bo, false);
err = xe_bo_validate(import_bo, NULL, false, exec);
- /* Pinning in VRAM is not allowed. */
+ /* Pinning in VRAM is not allowed for non-dynamic attachments */
if (!is_dynamic(params) &&
params->force_different_devices &&
!(params->mem_mask & XE_BO_FLAG_SYSTEM))
diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
new file mode 100644
index 000000000000..42bfc4bcfbcf
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+#define TEST_MAX_VFS 63
+
+static void pf_set_admin_mode(struct xe_device *xe, bool enable)
+{
+ /* should match logic of xe_sriov_pf_admin_only() */
+ xe->info.probe_display = !enable;
+ KUNIT_EXPECT_EQ(kunit_get_current_test(), enable, xe_sriov_pf_admin_only(xe));
+}
+
+static const void *num_vfs_gen_param(struct kunit *test, const void *prev, char *desc)
+{
+ unsigned long next = 1 + (unsigned long)prev;
+
+ if (next > TEST_MAX_VFS)
+ return NULL;
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%lu VF%s",
+ next, str_plural(next));
+ return (void *)next;
+}
+
+static int pf_gt_config_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* any random platform with SR-IOV */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_device *xe;
+ struct xe_gt *gt;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_TRUE(test, IS_SRIOV_PF(xe));
+
+ gt = xe_root_mmio_gt(xe);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt);
+ test->priv = gt;
+
+ /* pretend it can support up to 63 VFs */
+ xe->sriov.pf.device_total_vfs = TEST_MAX_VFS;
+ xe->sriov.pf.driver_max_vfs = TEST_MAX_VFS;
+ KUNIT_ASSERT_EQ(test, xe_sriov_pf_get_totalvfs(xe), 63);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ /* more sanity checks */
+ KUNIT_EXPECT_EQ(test, GUC_ID_MAX + 1, SZ_64K);
+ KUNIT_EXPECT_EQ(test, GUC_NUM_DOORBELLS, SZ_256);
+
+ return 0;
+}
+
+static void fair_contexts_1vf(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, 1));
+
+ pf_set_admin_mode(xe, true);
+ KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_64K - SZ_1K, pf_profile_fair_ctxs(gt, 1));
+}
+
+static void fair_contexts(struct kunit *test)
+{
+ unsigned int num_vfs = (unsigned long)test->param_value;
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+
+ KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_ctxs(gt, num_vfs)));
+ KUNIT_EXPECT_GT(test, GUC_ID_MAX, num_vfs * pf_profile_fair_ctxs(gt, num_vfs));
+
+ if (num_vfs > 31)
+ KUNIT_ASSERT_EQ(test, SZ_1K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 15)
+ KUNIT_ASSERT_EQ(test, SZ_2K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 7)
+ KUNIT_ASSERT_EQ(test, SZ_4K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 3)
+ KUNIT_ASSERT_EQ(test, SZ_8K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 1)
+ KUNIT_ASSERT_EQ(test, SZ_16K, pf_profile_fair_ctxs(gt, num_vfs));
+ else
+ KUNIT_ASSERT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, num_vfs));
+}
+
+static void fair_doorbells_1vf(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, 128, pf_profile_fair_dbs(gt, 1));
+
+ pf_set_admin_mode(xe, true);
+ KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, 240, pf_profile_fair_dbs(gt, 1));
+}
+
+static void fair_doorbells(struct kunit *test)
+{
+ unsigned int num_vfs = (unsigned long)test->param_value;
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+
+ KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_dbs(gt, num_vfs)));
+ KUNIT_EXPECT_GE(test, GUC_NUM_DOORBELLS, (num_vfs + 1) * pf_profile_fair_dbs(gt, num_vfs));
+
+ if (num_vfs > 31)
+ KUNIT_ASSERT_EQ(test, SZ_4, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 15)
+ KUNIT_ASSERT_EQ(test, SZ_8, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 7)
+ KUNIT_ASSERT_EQ(test, SZ_16, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 3)
+ KUNIT_ASSERT_EQ(test, SZ_32, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 1)
+ KUNIT_ASSERT_EQ(test, SZ_64, pf_profile_fair_dbs(gt, num_vfs));
+ else
+ KUNIT_ASSERT_EQ(test, SZ_128, pf_profile_fair_dbs(gt, num_vfs));
+}
+
+static void fair_ggtt_1vf(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, 1));
+
+ pf_set_admin_mode(xe, true);
+ KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_2G + SZ_1G + SZ_512M, pf_profile_fair_ggtt(gt, 1));
+}
+
+static void fair_ggtt(struct kunit *test)
+{
+ unsigned int num_vfs = (unsigned long)test->param_value;
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 shareable = SZ_2G + SZ_1G + SZ_512M;
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+
+ KUNIT_EXPECT_TRUE(test, IS_ALIGNED(pf_profile_fair_ggtt(gt, num_vfs), alignment));
+ KUNIT_EXPECT_GE(test, shareable, num_vfs * pf_profile_fair_ggtt(gt, num_vfs));
+
+ if (num_vfs > 56)
+ KUNIT_ASSERT_EQ(test, SZ_64M - SZ_8M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 28)
+ KUNIT_ASSERT_EQ(test, SZ_64M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 14)
+ KUNIT_ASSERT_EQ(test, SZ_128M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 7)
+ KUNIT_ASSERT_EQ(test, SZ_256M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 3)
+ KUNIT_ASSERT_EQ(test, SZ_512M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 1)
+ KUNIT_ASSERT_EQ(test, SZ_1G, pf_profile_fair_ggtt(gt, num_vfs));
+ else
+ KUNIT_ASSERT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, num_vfs));
+}
+
+static struct kunit_case pf_gt_config_test_cases[] = {
+ KUNIT_CASE(fair_contexts_1vf),
+ KUNIT_CASE(fair_doorbells_1vf),
+ KUNIT_CASE(fair_ggtt_1vf),
+ KUNIT_CASE_PARAM(fair_contexts, num_vfs_gen_param),
+ KUNIT_CASE_PARAM(fair_doorbells, num_vfs_gen_param),
+ KUNIT_CASE_PARAM(fair_ggtt, num_vfs_gen_param),
+ {}
+};
+
+static struct kunit_suite pf_gt_config_suite = {
+ .name = "pf_gt_config",
+ .test_cases = pf_gt_config_test_cases,
+ .init = pf_gt_config_test_init,
+};
+
+kunit_test_suite(pf_gt_config_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 0e502feaca81..6bb278167aaf 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -49,7 +49,7 @@ static void read_l3cc_table(struct xe_gt *gt,
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
- KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ KUNIT_FAIL_AND_ABORT(test, "Forcewake Failed.\n");
}
for (i = 0; i < info->num_mocs_regs; i++) {
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 69e2840c7ef0..f3179b31f13e 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -66,6 +66,7 @@ KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc);
/**
* xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters
+ * @test: test context object
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
@@ -242,6 +243,7 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
/**
* xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters
+ * @test: test context object
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
@@ -266,6 +268,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
/**
* xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters
+ * @test: test context object
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
@@ -290,6 +293,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
/**
* xe_pci_id_gen_param - Generate struct pci_device_id parameters
+ * @test: test context object
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
@@ -307,8 +311,8 @@ const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param);
-static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
- u32 *ver, u32 *revid)
+static int fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
+ u32 *ver, u32 *revid)
{
struct kunit *test = kunit_get_current_test();
struct xe_pci_fake_data *data = test->priv;
@@ -320,6 +324,8 @@ static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
*ver = data->graphics_verx100;
*revid = xe_step_to_gmdid(data->step.graphics);
}
+
+ return 0;
}
static void fake_xe_info_probe_tile_count(struct xe_device *xe)
@@ -376,6 +382,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
/**
* xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters
+ * @test: test context object
* @prev: the previously returned value, or NULL for the first iteration
* @desc: the buffer for a parameter name
*
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 37b344df2dc3..4d10a7e2b570 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -44,21 +44,27 @@ static void check_media_ip(struct kunit *test)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void check_platform_gt_count(struct kunit *test)
+static void check_platform_desc(struct kunit *test)
{
const struct pci_device_id *pci = test->param_value;
const struct xe_device_desc *desc =
(const struct xe_device_desc *)pci->driver_data;
- int max_gt = desc->max_gt_per_tile;
- KUNIT_ASSERT_GT(test, max_gt, 0);
- KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE);
+ KUNIT_EXPECT_GT(test, desc->dma_mask_size, 0);
+
+ KUNIT_EXPECT_GT(test, (unsigned int)desc->max_gt_per_tile, 0);
+ KUNIT_EXPECT_LE(test, (unsigned int)desc->max_gt_per_tile, XE_MAX_GT_PER_TILE);
+
+ KUNIT_EXPECT_GT(test, desc->va_bits, 0);
+ KUNIT_EXPECT_LE(test, desc->va_bits, 64);
+
+ KUNIT_EXPECT_GT(test, desc->vm_max_level, 0);
}
static struct kunit_case xe_pci_tests[] = {
KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param),
KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param),
- KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param),
+ KUNIT_CASE_PARAM(check_platform_desc, xe_pci_id_gen_param),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index b0254b014fe4..d2255a59e58f 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -48,12 +48,14 @@ struct rtp_test_case {
const struct xe_rtp_entry *entries;
};
-static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+static bool match_yes(const struct xe_device *xe, const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
{
return true;
}
-static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+static bool match_no(const struct xe_device *xe, const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
{
return false;
}