summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_gpuvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_gpuvm.c')
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c191
1 files changed, 191 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index af63f4d00315..8a06d296561d 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -26,6 +26,7 @@
*/
#include <drm/drm_gpuvm.h>
+#include <drm/drm_print.h>
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
@@ -877,6 +878,31 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
}
/**
+ * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup
+ * @vm_bo: the &drm_gpuvm_bo
+ *
+ * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not
+ * immediately removed from the evict and extobj lists. Therefore, anyone
+ * iterating these lists should skip entries that are being destroyed.
+ *
+ * Checking the refcount without incrementing it is okay as long as the lock
+ * protecting the evict/extobj list is held for as long as you are using the
+ * vm_bo, because even if the refcount hits zero while you are using it, freeing
+ * the vm_bo requires taking the list's lock.
+ *
+ * Zombie entries can be observed on the evict and extobj lists regardless of
+ * whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a
+ * longer time when the resv lock is used because we can't take the resv lock
+ * during run_job() in immediate mode, meaning that they need to remain on the
+ * lists until drm_gpuvm_bo_deferred_cleanup() is called.
+ */
+static bool
+drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo)
+{
+ return !kref_read(&vm_bo->kref);
+}
+
+/**
* drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
* @__vm_bo: the &drm_gpuvm_bo
* @__list_name: the name of the list to insert into
@@ -1081,6 +1107,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
INIT_LIST_HEAD(&gpuvm->evict.list);
spin_lock_init(&gpuvm->evict.lock);
+ init_llist_head(&gpuvm->bo_defer);
+
kref_init(&gpuvm->kref);
gpuvm->name = name ? name : "unknown";
@@ -1122,6 +1150,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
"Extobj list should be empty.\n");
drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
"Evict list should be empty.\n");
+ drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer),
+ "VM BO cleanup list should be empty.\n");
drm_gem_object_put(gpuvm->r_obj);
}
@@ -1217,6 +1247,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
drm_gpuvm_resv_assert_held(gpuvm);
list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
if (ret)
break;
@@ -1460,6 +1493,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
list.entry.evict) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = ops->vm_bo_validate(vm_bo, exec);
if (ret)
break;
@@ -1560,6 +1596,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
INIT_LIST_HEAD(&vm_bo->list.entry.evict);
+ init_llist_node(&vm_bo->list.entry.bo_defer);
return vm_bo;
}
@@ -1621,6 +1658,126 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
+/*
+ * drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to
+ * deferred cleanup
+ *
+ * If deferred cleanup is used, then this must be called right after the vm_bo
+ * refcount drops to zero. Must be called with GEM mutex held. After releasing
+ * the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called.
+ */
+static void
+drm_gpuvm_bo_into_zombie(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ if (!drm_gpuvm_resv_protected(vm_bo->vm)) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, true);
+ drm_gpuvm_bo_list_del(vm_bo, evict, true);
+ }
+
+ list_del(&vm_bo->list.entry.gem);
+}
+
+/*
+ * drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the
+ * bo_defer list
+ *
+ * Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held.
+ *
+ * It's important that the GEM stays alive for the duration in which we hold
+ * the mutex, but the instant we add the vm_bo to bo_defer, another thread
+ * might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to
+ * avoid kfreeing a mutex we are holding, the GEM mutex must be released
+ * *before* calling this function.
+ */
+static void
+drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo)
+{
+ llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer);
+}
+
+static void
+drm_gpuvm_bo_defer_free(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ drm_gpuvm_bo_into_zombie(kref);
+ mutex_unlock(&vm_bo->obj->gpuva.lock);
+ drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
+}
+
+/**
+ * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with
+ * deferred cleanup
+ * @vm_bo: the &drm_gpuvm_bo to release the reference of
+ *
+ * This releases a reference to @vm_bo.
+ *
+ * This might take and release the GEMs GPUVA lock. You should call
+ * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process.
+ *
+ * Returns: true if vm_bo is being destroyed, false otherwise.
+ */
+bool
+drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo)
+{
+ if (!vm_bo)
+ return false;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ return !!kref_put_mutex(&vm_bo->kref,
+ drm_gpuvm_bo_defer_free,
+ &vm_bo->obj->gpuva.lock);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
+
+/**
+ * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list
+ * deferred cleanup
+ * @gpuvm: the VM to clean up
+ *
+ * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list.
+ */
+void
+drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+ struct drm_gem_object *obj;
+ struct llist_node *bo_defer;
+
+ bo_defer = llist_del_all(&gpuvm->bo_defer);
+ if (!bo_defer)
+ return;
+
+ if (drm_gpuvm_resv_protected(gpuvm)) {
+ dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
+ llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, false);
+ drm_gpuvm_bo_list_del(vm_bo, evict, false);
+ }
+ dma_resv_unlock(drm_gpuvm_resv(gpuvm));
+ }
+
+ while (bo_defer) {
+ vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
+ bo_defer = bo_defer->next;
+ obj = vm_bo->obj;
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
+
static struct drm_gpuvm_bo *
__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj)
@@ -1949,6 +2106,40 @@ drm_gpuva_unlink(struct drm_gpuva *va)
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
/**
+ * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup
+ * @va: the &drm_gpuva to unlink
+ *
+ * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes
+ * the lock for the caller.
+ */
+void
+drm_gpuva_unlink_defer(struct drm_gpuva *va)
+{
+ struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm_bo *vm_bo = va->vm_bo;
+ bool should_defer_bo;
+
+ if (unlikely(!obj))
+ return;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ mutex_lock(&obj->gpuva.lock);
+ list_del_init(&va->gem.entry);
+
+ /*
+ * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex.
+ */
+ should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie);
+ mutex_unlock(&obj->gpuva.lock);
+ if (should_defer_bo)
+ drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
+
+ va->vm_bo = NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer);
+
+/**
* drm_gpuva_find_first() - find the first &drm_gpuva in the given range
* @gpuvm: the &drm_gpuvm to search in
* @addr: the &drm_gpuvas address