diff options
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
| -rw-r--r-- | drivers/gpu/drm/scheduler/sched_main.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 46119aacb809..1d4f1b822e7b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -965,13 +965,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, dma_resv_assert_held(resv); dma_resv_for_each_fence(&cursor, resv, usage, fence) { - /* Make sure to grab an additional ref on the added fence */ - dma_fence_get(fence); - ret = drm_sched_job_add_dependency(job, fence); - if (ret) { - dma_fence_put(fence); + /* + * As drm_sched_job_add_dependency always consumes the fence + * reference (even when it fails), and dma_resv_for_each_fence + * is not obtaining one, we need to grab one before calling. + */ + ret = drm_sched_job_add_dependency(job, dma_fence_get(fence)); + if (ret) return ret; - } } return 0; } @@ -1236,8 +1237,13 @@ static void drm_sched_run_job_work(struct work_struct *w) /* Find entity with a ready job */ entity = drm_sched_select_entity(sched); - if (!entity) - return; /* No more work */ + if (!entity) { + /* + * Either no more work to do, or the next ready job needs more + * credits than the scheduler has currently available. + */ + return; + } sched_job = drm_sched_entity_pop_job(entity); if (!sched_job) { @@ -1314,7 +1320,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ sched->name = args->name; sched->timeout = args->timeout; sched->hang_limit = args->hang_limit; - sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq; + sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_percpu_wq; sched->score = args->score ? args->score : &sched->_score; sched->dev = args->dev; @@ -1419,7 +1425,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); - list_for_each_entry(s_entity, &rq->entities, list) + list_for_each_entry(s_entity, &rq->entities, list) { /* * Prevents reinsertion and marks job_queue as idle, * it will be removed from the rq in drm_sched_entity_fini() @@ -1440,8 +1446,15 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) * For now, this remains a potential race in all * drivers that keep entities alive for longer than * the scheduler. + * + * The READ_ONCE() is there to make the lockless read + * (warning about the lockless write below) slightly + * less broken... */ + if (!READ_ONCE(s_entity->stopped)) + dev_warn(sched->dev, "Tearing down scheduler with active entities!\n"); s_entity->stopped = true; + } spin_unlock(&rq->lock); kfree(sched->sched_rq[i]); } |
