summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_entity.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_entity.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 8867b95ab089..5a4697f636f2 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -285,9 +285,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
return 0;
sched = entity->rq->sched;
- /**
- * The client will not queue more IBs during this fini, consume existing
- * queued IBs or discard them on SIGKILL
+ /*
+ * The client will not queue more jobs during this fini - consume
+ * existing queued ones, or discard them on SIGKILL.
*/
if (current->flags & PF_EXITING) {
if (timeout)
@@ -300,7 +300,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
drm_sched_entity_is_idle(entity));
}
- /* For killed process disable any more IBs enqueue right now */
+ /* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
@@ -324,9 +324,9 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
/*
- * If consumption of existing IBs wasn't completed. Forcefully remove
- * them here. Also makes sure that the scheduler won't touch this entity
- * any more.
+ * If consumption of existing jobs wasn't completed forcefully remove
+ * them. Also makes sure that the scheduler won't touch this entity any
+ * more.
*/
drm_sched_entity_kill(entity);
@@ -391,7 +391,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority);
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
-static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity,
+ struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence *fence = entity->dependency;
@@ -421,6 +422,10 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
entity->dependency = fence;
}
+ if (trace_drm_sched_job_unschedulable_enabled() &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags))
+ trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
+
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
drm_sched_entity_wakeup))
return true;
@@ -461,10 +466,8 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
- if (drm_sched_entity_add_dependency_cb(entity)) {
- trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
+ if (drm_sched_entity_add_dependency_cb(entity, sched_job))
return NULL;
- }
}
/* skip jobs from entity that marked guilty */