diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_exec.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_exec.c | 28 |
1 files changed, 12 insertions, 16 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index a8ab363a8046..4d81210e41f5 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -16,10 +16,12 @@ #include "xe_exec_queue.h" #include "xe_hw_engine_group.h" #include "xe_macros.h" +#include "xe_pm.h" #include "xe_ring_ops_types.h" #include "xe_sched_job.h" #include "xe_sync.h" #include "xe_svm.h" +#include "xe_trace.h" #include "xe_vm.h" /** @@ -32,7 +34,7 @@ * - Binding at exec time * - Flow controlling the ring at exec time * - * In XE we avoid all of this complication by not allowing a BO list to be + * In Xe we avoid all of this complication by not allowing a BO list to be * passed into an exec, using the dma-buf implicit sync uAPI, have binds as * separate operations, and using the DRM scheduler to flow control the ring. * Let's deep dive on each of these. @@ -123,7 +125,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct xe_validation_ctx ctx; struct xe_sched_job *job; struct xe_vm *vm; - bool write_locked, skip_retry = false; + bool write_locked; int err = 0; struct xe_hw_engine_group *group; enum xe_hw_engine_group_execution_mode mode, previous_mode; @@ -153,6 +155,12 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto err_exec_queue; } + if (atomic_read(&q->job_cnt) >= XE_MAX_JOB_COUNT_PER_EXEC_QUEUE) { + trace_xe_exec_queue_reach_max_job_count(q, XE_MAX_JOB_COUNT_PER_EXEC_QUEUE); + err = -EAGAIN; + goto err_exec_queue; + } + if (args->num_syncs) { syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); if (!syncs) { @@ -248,7 +256,7 @@ retry: * on task freezing during suspend / hibernate, the call will * return -ERESTARTSYS and the IOCTL will be rerun. */ - err = wait_for_completion_interruptible(&xe->pm_block); + err = xe_pm_block_on_suspend(xe); if (err) goto err_unlock_list; @@ -266,12 +274,6 @@ retry: goto err_exec; } - if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) { - err = -EWOULDBLOCK; /* Aliased to -EAGAIN */ - skip_retry = true; - goto err_exec; - } - if (xe_exec_queue_uses_pxp(q)) { err = xe_vm_validate_protected(q->vm); if (err) @@ -300,10 +302,6 @@ retry: goto err_put_job; if (!xe_vm_in_lr_mode(vm)) { - err = xe_sched_job_last_fence_add_dep(job, vm); - if (err) - goto err_put_job; - err = xe_svm_notifier_lock_interruptible(vm); if (err) goto err_put_job; @@ -328,8 +326,6 @@ retry: xe_sched_job_init_user_fence(job, &syncs[i]); } - if (xe_exec_queue_is_lr(q)) - q->ring_ops->emit_job(job); if (!xe_vm_in_lr_mode(vm)) xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); xe_sched_job_push(job); @@ -355,7 +351,7 @@ err_exec: xe_validation_ctx_fini(&ctx); err_unlock_list: up_read(&vm->lock); - if (err == -EAGAIN && !skip_retry) + if (err == -EAGAIN) goto retry; err_hw_exec_mode: if (mode == EXEC_MODE_DMA_FENCE) |
