From e5f3081291eb958b46775edfd29d1f5367078474 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 23 Jan 2025 17:09:10 +0200 Subject: drm/sched: stop passing non struct drm_device to drm_err() and friends The expectation is that the struct drm_device based logging helpers get passed an actual struct drm_device pointer rather than some random struct pointer where you can dereference the ->dev member. Convert drm_err(sched, ...) to dev_err(sched->dev, ...) and similar. This matches current usage, as struct drm_device is not available, but drops "[drm]" or "[drm] *ERROR*" prefix from logging. Unfortunately, there's no dev_WARN_ON(), so the conversion is not exactly the same. Reviewed-by: Simona Vetter Acked-by: Philipp Stanner Reviewed-by: Louis Chauvet Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/fe441dd1469d2b03e6b2ff247078bdde2011c6e3.1737644530.git.jani.nikula@intel.com --- drivers/gpu/drm/scheduler/sched_main.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/scheduler/sched_main.c') diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c634993f1346..c0b9822d6274 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -102,9 +102,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) { u32 credits; - drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit, - atomic_read(&sched->credit_count), - &credits)); + WARN_ON(check_sub_overflow(sched->credit_limit, + atomic_read(&sched->credit_count), + &credits)); return credits; } @@ -129,9 +129,11 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, /* If a job exceeds the credit limit, truncate it to the credit limit * itself to guarantee forward progress. */ - if (drm_WARN(sched, s_job->credits > sched->credit_limit, - "Jobs may not exceed the credit limit, truncate.\n")) + if (s_job->credits > sched->credit_limit) { + dev_WARN(sched->dev, + "Jobs may not exceed the credit limit, truncate.\n"); s_job->credits = sched->credit_limit; + } return drm_sched_available_credits(sched) >= s_job->credits; } @@ -789,7 +791,7 @@ int drm_sched_job_init(struct drm_sched_job *job, * or worse--a blank screen--leave a trail in the * logs, so this can be debugged easier. */ - drm_err(job->sched, "%s: entity has no rq!\n", __func__); + dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); return -ENOENT; } @@ -1263,7 +1265,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { /* This is a gross violation--tell drivers what the problem is. */ - drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", + dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", __func__); return -EINVAL; } else if (sched->sched_rq) { @@ -1271,7 +1273,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ * fine-tune their DRM calling order, and return all * is good. */ - drm_warn(sched, "%s: scheduler already initialized!\n", __func__); + dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); return 0; } @@ -1326,7 +1328,7 @@ Out_unroll: Out_check_own: if (sched->own_submit_wq) destroy_workqueue(sched->submit_wq); - drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); + dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); return -ENOMEM; } EXPORT_SYMBOL(drm_sched_init); -- cgit v1.2.3 From 44d2f310f008613c1dbe5e234c2cf2be90cbbfab Mon Sep 17 00:00:00 2001 From: Philipp Stanner Date: Tue, 4 Mar 2025 15:13:47 +0100 Subject: drm/sched: drm_sched_job_cleanup(): correct false doc drm_sched_job_cleanup()'s documentation claims that calling drm_sched_job_arm() is a "point of no return", implying that afterwards a job cannot be cancelled anymore. This is not correct, as proven by the function's code itself, which takes a previous call to drm_sched_job_arm() into account. In truth, the decisive factors are whether fences have been shared (e.g., with other processes) and if the job has been submitted to an entity already. Correct the wrong docstring. Reviewed-by: Tvrtko Ursulin Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250304141346.102683-2-phasta@kernel.org --- drivers/gpu/drm/scheduler/sched_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/scheduler/sched_main.c') diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c0b9822d6274..bfea608a7106 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1015,11 +1015,13 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency); * Cleans up the resources allocated with drm_sched_job_init(). * * Drivers should call this from their error unwind code if @job is aborted - * before drm_sched_job_arm() is called. + * before it was submitted to an entity with drm_sched_entity_push_job(). * - * After that point of no return @job is committed to be executed by the - * scheduler, and this function should be called from the - * &drm_sched_backend_ops.free_job callback. + * Since calling drm_sched_job_arm() causes the job's fences to be initialized, + * it is up to the driver to ensure that fences that were exposed to external + * parties get signaled. drm_sched_job_cleanup() does not ensure this. + * + * This function must also be called in &struct drm_sched_backend_ops.free_job */ void drm_sched_job_cleanup(struct drm_sched_job *job) { @@ -1030,7 +1032,7 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) /* drm_sched_job_arm() has been called */ dma_fence_put(&job->s_fence->finished); } else { - /* aborted job before committing to run it */ + /* aborted job before arming */ drm_sched_fence_free(job->s_fence); } -- cgit v1.2.3