From 5f72db59160c697d1dec3f6074dba72bd834c695 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Nov 2017 22:03:34 +0200 Subject: dma-buf/fence: Sparse wants __rcu on the object itself MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to silence sparse in dma_fence_get_rcu_safe(), we need to mark the incoming fence object as being RCU protected and not the pointer to the object. Cc: Dave Airlie Cc: Jason Ekstrand Cc: linaro-mm-sig@lists.linaro.org Cc: linux-media@vger.kernel.org Cc: Alex Deucher Cc: Christian König Cc: Sumit Semwal Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20171102200336.23347-3-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter Acked-by: Christian König Acked-by: Sumit Semwal [vsyrjala: s/silent/silence/ in commit message] Signed-off-by: Ville Syrjälä --- include/linux/dma-fence.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index efdabbb64e3c..4c008170fe65 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -242,7 +242,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) * The caller is required to hold the RCU read lock. */ static inline struct dma_fence * -dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) +dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) { do { struct dma_fence *fence; -- cgit v1.2.3 From 5d276a1acaa80f4b7a76577510a2b1835ce01f4f Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 9 Nov 2017 09:59:05 +0100 Subject: dma-buf: add reservation_object_lock_interruptible() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That's the only wrapper function missing and necessary to cleanup TTM. Reviewed-and-Tested-by: Michel Dänzer Signed-off-by: Christian König Signed-off-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20171109085909.1653-3-christian.koenig@amd.com --- include/linux/reservation.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'include/linux') diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 21fc84d82d41..02166e815afb 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -166,6 +166,29 @@ reservation_object_lock(struct reservation_object *obj, return ww_mutex_lock(&obj->lock, ctx); } +/** + * reservation_object_lock_interruptible - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object interruptible for exclusive access and + * modification. Note, that the lock is only against other writers, readers + * will run concurrently with a writer under RCU. The seqlock is used to + * notify readers if they overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock_interruptible(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_interruptible(&obj->lock, ctx); +} + + /** * reservation_object_trylock - trylock the reservation object * @obj: the reservation object -- cgit v1.2.3 From 03e4e0a9e02cf703da331ff6cfd57d0be9bf5692 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Nov 2017 16:27:19 +0000 Subject: dma-buf/fence: Fix lock inversion within dma-fence-array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ages ago Rob Clark noted, "Currently with fence-array, we have a potential deadlock situation. If we fence_add_callback() on an array-fence, the array-fence's lock is acquired first, and in it's ->enable_signaling() callback, it will install cbs on it's array-member fences, so the array-member's lock is acquired second. But in the signal path, the array-member's lock is acquired first, and the array-fence's lock acquired second." Rob proposed either extensive changes to dma-fence to unnest the fence-array signaling, or to defer the signaling onto a workqueue. This is a more refined version of the later, that should keep the latency of the fence signaling to a minimum by using an irq-work, which is executed asap. Reported-by: Rob Clark Suggested-by: Rob Clark References: 1476635975-21981-1-git-send-email-robdclark@gmail.com Signed-off-by: Chris Wilson Cc: Rob Clark Cc: Gustavo Padovan Cc: Sumit Semwal Cc: Christian König Reviewed-by: Christian König Signed-off-by: Sumit Semwal Link: https://patchwork.freedesktop.org/patch/msgid/20171114162719.30958-1-chris@chris-wilson.co.uk --- drivers/base/Kconfig | 1 + drivers/dma-buf/dma-fence-array.c | 14 ++++++++++++-- include/linux/dma-fence-array.h | 3 +++ 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 1a5f6a157a57..62b0de06836e 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -244,6 +244,7 @@ config DMA_SHARED_BUFFER bool default n select ANON_INODES + select IRQ_WORK help This option enables the framework for buffer-sharing between multiple drivers. A buffer is associated with a file using driver diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 0350829ba62e..dd1edfb27b61 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -31,6 +31,14 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) return "unbound"; } +static void irq_dma_fence_array_work(struct irq_work *wrk) +{ + struct dma_fence_array *array = container_of(wrk, typeof(*array), work); + + dma_fence_signal(&array->base); + dma_fence_put(&array->base); +} + static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { @@ -39,8 +47,9 @@ static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_array *array = array_cb->array; if (atomic_dec_and_test(&array->num_pending)) - dma_fence_signal(&array->base); - dma_fence_put(&array->base); + irq_work_queue(&array->work); + else + dma_fence_put(&array->base); } static bool dma_fence_array_enable_signaling(struct dma_fence *fence) @@ -136,6 +145,7 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, spin_lock_init(&array->lock); dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, context, seqno); + init_irq_work(&array->work, irq_dma_fence_array_work); array->num_fences = num_fences; atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 332a5420243c..bc8940ca280d 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -21,6 +21,7 @@ #define __LINUX_DMA_FENCE_ARRAY_H #include +#include /** * struct dma_fence_array_cb - callback helper for fence array @@ -47,6 +48,8 @@ struct dma_fence_array { unsigned num_fences; atomic_t num_pending; struct dma_fence **fences; + + struct irq_work work; }; extern const struct dma_fence_ops dma_fence_array_ops; -- cgit v1.2.3