summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2025-12-12 10:28:47 -0800
committerMatthew Brost <matthew.brost@intel.com>2025-12-15 14:02:56 -0800
commit2b277b506138f501693f6278e675da259299e8aa (patch)
treec69090495beb1495e6a228c300a583f4cf7566b5
parentce3d65549c7a4ea4497546f49d18128281258ec5 (diff)
drm/xe: Add more GT stats around pagefault mode switch flows
Add GT stats to measure the time spent switching between pagefault mode and dma-fence mode. Also add a GT stat to indicate when pagefault suspend is skipped because the system is idle. These metrics will help profile pagefault workloads while 3D and display are enabled. v2: - Use GT stats helper functions (Francois) Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Francois Dugast <francois.dugast@intel.com> Link: https://patch.msgid.link/20251212182847.1683222-8-matthew.brost@intel.com
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c21
3 files changed, 30 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 714045ad9354..fb2904bd0abd 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -68,8 +68,14 @@ static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"),
DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
"hw_engine_group_suspend_lr_queue_count"),
+ DEF_STAT_STR(HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
+ "hw_engine_group_skip_lr_queue_count"),
DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
"hw_engine_group_wait_dma_queue_count"),
+ DEF_STAT_STR(HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+ "hw_engine_group_suspend_lr_queue_us"),
+ DEF_STAT_STR(HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+ "hw_engine_group_wait_dma_queue_us"),
};
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index aada5df421e5..b92d013091d5 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -45,7 +45,10 @@ enum xe_gt_stats_id {
XE_GT_STATS_ID_SVM_64K_BIND_US,
XE_GT_STATS_ID_SVM_2M_BIND_US,
XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT,
XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
/* must be the last entry */
__XE_GT_STATS_NUM_IDS,
};
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 40ce5d5f543c..f69a32c27458 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -200,7 +200,9 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
{
int err;
struct xe_exec_queue *q;
+ struct xe_gt *gt = NULL;
bool need_resume = false;
+ ktime_t start = xe_gt_stats_ktime_get();
lockdep_assert_held_write(&group->mode_sem);
@@ -215,9 +217,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
return -EAGAIN;
xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
+ if (idle_skip_suspend)
+ xe_gt_stats_incr(q->gt,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SKIP_LR_QUEUE_COUNT, 1);
need_resume |= !idle_skip_suspend;
q->ops->suspend(q);
+ gt = q->gt;
}
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
@@ -229,6 +235,12 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
return err;
}
+ if (gt) {
+ xe_gt_stats_incr(gt,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_US,
+ xe_gt_stats_ktime_us_delta(start));
+ }
+
if (need_resume)
xe_hw_engine_group_resume_faulting_lr_jobs(group);
@@ -249,7 +261,9 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
{
long timeout;
struct xe_exec_queue *q;
+ struct xe_gt *gt = NULL;
struct dma_fence *fence;
+ ktime_t start = xe_gt_stats_ktime_get();
lockdep_assert_held_write(&group->mode_sem);
@@ -261,11 +275,18 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm);
timeout = dma_fence_wait(fence, false);
dma_fence_put(fence);
+ gt = q->gt;
if (timeout < 0)
return -ETIME;
}
+ if (gt) {
+ xe_gt_stats_incr(gt,
+ XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_US,
+ xe_gt_stats_ktime_us_delta(start));
+ }
+
return 0;
}