diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_vma.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_vma.c | 127 |
1 files changed, 98 insertions, 29 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 3cd8562a5109..8316af1723c2 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -319,13 +319,10 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt) mutex_lock(&vm->mmu_lock); /* - * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold + * NOTE: if not using pgtable preallocation, we cannot hold * a lock across map/unmap which is also used in the job_run() * path, as this can cause deadlock in job_run() vs shrinker/ * reclaim. - * - * Revisit this if we can come up with a scheme to pre-alloc pages - * for the pgtable in map/unmap ops. */ ret = vm_map_op(vm, &(struct msm_vm_map_op){ .iova = vma->va.addr, @@ -399,7 +396,14 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, if (obj) GEM_WARN_ON((range_end - range_start) > obj->size); - drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset); + struct drm_gpuva_op_map op_map = { + .va.addr = range_start, + .va.range = range_end - range_start, + .gem.obj = obj, + .gem.offset = offset, + }; + + drm_gpuva_init_from_op(&vma->base, &op_map); vma->mapped = false; ret = drm_gpuva_insert(&vm->base, &vma->base); @@ -454,6 +458,8 @@ msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) struct op_arg { unsigned flags; struct msm_vm_bind_job *job; + const struct msm_vm_bind_op *op; + bool kept; }; static void @@ -475,14 +481,18 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op) } static int -msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg) +msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg) { - struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; + struct op_arg *arg = _arg; + struct msm_vm_bind_job *job = arg->job; struct drm_gem_object *obj = op->map.gem.obj; struct drm_gpuva *vma; struct sg_table *sgt; unsigned prot; + if (arg->kept) + return 0; + vma = vma_from_op(arg, &op->map); if (WARN_ON(IS_ERR(vma))) return PTR_ERR(vma); @@ -602,15 +612,41 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) } static int -msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg) +msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg) { - struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; + struct op_arg *arg = _arg; + struct msm_vm_bind_job *job = arg->job; struct drm_gpuva *vma = op->unmap.va; struct msm_gem_vma *msm_vma = to_msm_vma(vma); vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, vma->va.addr, vma->va.range); + /* + * Detect in-place remap. Turnip does this to change the vma flags, + * in particular MSM_VMA_DUMP. In this case we want to avoid actually + * touching the page tables, as that would require synchronization + * against SUBMIT jobs running on the GPU. + */ + if (op->unmap.keep && + (arg->op->op == MSM_VM_BIND_OP_MAP) && + (vma->gem.obj == arg->op->obj) && + (vma->gem.offset == arg->op->obj_offset) && + (vma->va.addr == arg->op->iova) && + (vma->va.range == arg->op->range)) { + /* We are only expecting a single in-place unmap+map cb pair: */ + WARN_ON(arg->kept); + + /* Leave the existing VMA in place, but signal that to the map cb: */ + arg->kept = true; + + /* Only flags are changing, so update that in-place: */ + unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1); + vma->flags = orig_flags | arg->flags; + + return 0; + } + if (!msm_vma->mapped) goto out_close; @@ -994,6 +1030,7 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args struct drm_device *dev = job->vm->drm; int ret = 0; int cnt = 0; + int i = -1; if (args->nr_ops == 1) { /* Single op case, the op is inlined: */ @@ -1027,11 +1064,12 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args spin_lock(&file->table_lock); - for (unsigned i = 0; i < args->nr_ops; i++) { + for (i = 0; i < args->nr_ops; i++) { + struct msm_vm_bind_op *op = &job->ops[i]; struct drm_gem_object *obj; - if (!job->ops[i].handle) { - job->ops[i].obj = NULL; + if (!op->handle) { + op->obj = NULL; continue; } @@ -1039,16 +1077,22 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args * normally use drm_gem_object_lookup(), but for bulk lookup * all under single table_lock just hit object_idr directly: */ - obj = idr_find(&file->object_idr, job->ops[i].handle); + obj = idr_find(&file->object_idr, op->handle); if (!obj) { - ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i); + ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i); goto out_unlock; } drm_gem_object_get(obj); - job->ops[i].obj = obj; + op->obj = obj; cnt++; + + if ((op->range + op->obj_offset) > obj->size) { + ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n", + op->range, op->obj_offset, obj->size); + goto out_unlock; + } } *nr_bos = cnt; @@ -1056,6 +1100,17 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args out_unlock: spin_unlock(&file->table_lock); + if (ret) { + for (; i >= 0; i--) { + struct msm_vm_bind_op *op = &job->ops[i]; + + if (!op->obj) + continue; + + drm_gem_object_put(op->obj); + op->obj = NULL; + } + } out: return ret; } @@ -1171,11 +1226,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) op->obj_offset); break; case MSM_VM_BIND_OP_MAP: - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, - op->iova, op->range, - op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for @@ -1271,6 +1332,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job) const struct msm_vm_bind_op *op = &job->ops[i]; struct op_arg arg = { .job = job, + .op = op, }; switch (op->op) { @@ -1282,10 +1344,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job) if (op->flags & MSM_VM_BIND_OP_DUMP) arg.flags |= MSM_VMA_DUMP; fallthrough; - case MSM_VM_BIND_OP_MAP_NULL: - ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova, - op->range, op->obj, op->obj_offset); + case MSM_VM_BIND_OP_MAP_NULL: { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = op->iova, + .map.va.range = op->range, + .map.gem.obj = op->obj, + .map.gem.offset = op->obj_offset, + }; + + ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req); break; + } default: /* * lookup_op() should have already thrown an error for @@ -1460,12 +1529,8 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file) if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) { sync_file = sync_file_create(job->fence); - if (!sync_file) { + if (!sync_file) ret = -ENOMEM; - } else { - fd_install(out_fence_fd, sync_file->file); - args->fence_fd = out_fence_fd; - } } if (ret) @@ -1494,10 +1559,14 @@ out: out_unlock: mutex_unlock(&queue->lock); out_post_unlock: - if (ret && (out_fence_fd >= 0)) { - put_unused_fd(out_fence_fd); + if (ret) { + if (out_fence_fd >= 0) + put_unused_fd(out_fence_fd); if (sync_file) fput(sync_file->file); + } else if (sync_file) { + fd_install(out_fence_fd, sync_file->file); + args->fence_fd = out_fence_fd; } if (!IS_ERR_OR_NULL(job)) { |