From 3e31f94752e454bdd0ca4a1d046ee21f80c166c5 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Fri, 26 Feb 2021 17:06:58 -0700 Subject: lockdep: Add lockdep_assert_not_held() Some kernel functions must be called without holding a specific lock. Add lockdep_assert_not_held() to be used in these functions to detect incorrect calls while holding a lock. lockdep_assert_not_held() provides the opposite functionality of lockdep_assert_held() which is used to assert calls that require holding a specific lock. Incorporates suggestions from Peter Zijlstra to avoid misfires when lockdep_off() is employed. The need for lockdep_assert_not_held() came up in a discussion on ath10k patch. ath10k_drain_tx() and i915_vma_pin_ww() are examples of functions that can use lockdep_assert_not_held(). Signed-off-by: Shuah Khan Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/linux-wireless/871rdmu9z9.fsf@codeaurora.org/ --- include/linux/lockdep.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 7b7ebf2e28ec..dbd9ea846b36 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -301,8 +301,12 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) -#define lockdep_assert_held(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held(l)); \ +#define lockdep_assert_held(l) do { \ + WARN_ON(debug_locks && lockdep_is_held(l) == 0); \ + } while (0) + +#define lockdep_assert_not_held(l) do { \ + WARN_ON(debug_locks && lockdep_is_held(l) == 1); \ } while (0) #define lockdep_assert_held_write(l) do { \ @@ -393,7 +397,8 @@ extern int lockdep_is_held(const void *); #define lockdep_is_held_type(l, r) (1) #define lockdep_assert_held(l) do { (void)(l); } while (0) -#define lockdep_assert_held_write(l) do { (void)(l); } while (0) +#define lockdep_assert_not_held(l) do { (void)(l); } while (0) +#define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) -- cgit v1.2.3 From f8cfa46608f8aa5ca5421ce281ab314129c15411 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Fri, 26 Feb 2021 17:06:59 -0700 Subject: lockdep: Add lockdep lock state defines Adds defines for lock state returns from lock_is_held_type() based on Johannes Berg's suggestions as it make it easier to read and maintain the lock states. These are defines and a enum to avoid changes to lock_is_held_type() and lockdep_is_held() return types. Updates to lock_is_held_type() and __lock_is_held() to use the new defines. Signed-off-by: Shuah Khan Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/linux-wireless/871rdmu9z9.fsf@codeaurora.org/ --- include/linux/lockdep.h | 11 +++++++++-- kernel/locking/lockdep.c | 11 ++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index dbd9ea846b36..17805aac0e85 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -268,6 +268,11 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, unsigned long ip); +/* lock_is_held_type() returns */ +#define LOCK_STATE_UNKNOWN -1 +#define LOCK_STATE_NOT_HELD 0 +#define LOCK_STATE_HELD 1 + /* * Same "read" as for lock_acquire(), except -1 means any. */ @@ -302,11 +307,13 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #define lockdep_assert_held(l) do { \ - WARN_ON(debug_locks && lockdep_is_held(l) == 0); \ + WARN_ON(debug_locks && \ + lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \ } while (0) #define lockdep_assert_not_held(l) do { \ - WARN_ON(debug_locks && lockdep_is_held(l) == 1); \ + WARN_ON(debug_locks && \ + lockdep_is_held(l) == LOCK_STATE_HELD); \ } while (0) #define lockdep_assert_held_write(l) do { \ diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 969736b33185..c0b8926a67f0 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -54,6 +54,7 @@ #include #include #include +#include #include @@ -5252,13 +5253,13 @@ int __lock_is_held(const struct lockdep_map *lock, int read) if (match_held_lock(hlock, lock)) { if (read == -1 || hlock->read == read) - return 1; + return LOCK_STATE_HELD; - return 0; + return LOCK_STATE_NOT_HELD; } } - return 0; + return LOCK_STATE_NOT_HELD; } static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) @@ -5537,14 +5538,14 @@ EXPORT_SYMBOL_GPL(lock_release); noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) { unsigned long flags; - int ret = 0; + int ret = LOCK_STATE_NOT_HELD; /* * Avoid false negative lockdep_assert_held() and * lockdep_assert_not_held(). */ if (unlikely(!lockdep_enabled())) - return -1; + return LOCK_STATE_UNKNOWN; raw_local_irq_save(flags); check_flags(flags); -- cgit v1.2.3 From bd0ccc4afca2d6ae0029cae35c4f1d2e2ade7579 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 15 Jan 2021 18:09:53 +0100 Subject: kcsan: Add missing license and copyright headers Adds missing license and/or copyright headers for KCSAN source files. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- Documentation/dev-tools/kcsan.rst | 3 +++ include/linux/kcsan-checks.h | 6 ++++++ include/linux/kcsan.h | 7 +++++++ kernel/kcsan/atomic.h | 5 +++++ kernel/kcsan/core.c | 5 +++++ kernel/kcsan/debugfs.c | 5 +++++ kernel/kcsan/encoding.h | 5 +++++ kernel/kcsan/kcsan.h | 3 ++- kernel/kcsan/report.c | 5 +++++ kernel/kcsan/selftest.c | 5 +++++ 10 files changed, 48 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index be7a0b0e1f28..d85ce238ace7 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -1,3 +1,6 @@ +.. SPDX-License-Identifier: GPL-2.0 +.. Copyright (C) 2019, Google LLC. + The Kernel Concurrency Sanitizer (KCSAN) ======================================== diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index cf14840609ce..9fd0ad80fef6 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -1,4 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * KCSAN access checks and modifiers. These can be used to explicitly check + * uninstrumented accesses, or change KCSAN checking behaviour of accesses. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _LINUX_KCSAN_CHECKS_H #define _LINUX_KCSAN_CHECKS_H diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 53340d8789f9..fc266ecb2a4d 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -1,4 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and + * data structures to set up runtime. See kcsan-checks.h for explicit checks and + * modifiers. For more info please see Documentation/dev-tools/kcsan.rst. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _LINUX_KCSAN_H #define _LINUX_KCSAN_H diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h index 75fe701f4127..530ae1bda8e7 100644 --- a/kernel/kcsan/atomic.h +++ b/kernel/kcsan/atomic.h @@ -1,4 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Rules for implicitly atomic memory accesses. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _KERNEL_KCSAN_ATOMIC_H #define _KERNEL_KCSAN_ATOMIC_H diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 23e7acb5c667..45c821d4e8bd 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +/* + * KCSAN core runtime. + * + * Copyright (C) 2019, Google LLC. + */ #define pr_fmt(fmt) "kcsan: " fmt diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 209ad8dcfcec..c1dd02f3be8b 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +/* + * KCSAN debugfs interface. + * + * Copyright (C) 2019, Google LLC. + */ #define pr_fmt(fmt) "kcsan: " fmt diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h index 7ee405524904..170a2bb22f53 100644 --- a/kernel/kcsan/encoding.h +++ b/kernel/kcsan/encoding.h @@ -1,4 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * KCSAN watchpoint encoding. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _KERNEL_KCSAN_ENCODING_H #define _KERNEL_KCSAN_ENCODING_H diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index 87ccdb3b051f..9881099d4179 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -1,8 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ - /* * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info please * see Documentation/dev-tools/kcsan.rst. + * + * Copyright (C) 2019, Google LLC. */ #ifndef _KERNEL_KCSAN_KCSAN_H diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index d3bf87e6007c..13dce3c664d6 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +/* + * KCSAN reporting. + * + * Copyright (C) 2019, Google LLC. + */ #include #include diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index 9014a3a82cf9..7f29cb0f5e63 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -1,4 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +/* + * KCSAN short boot-time selftests. + * + * Copyright (C) 2019, Google LLC. + */ #define pr_fmt(fmt) "kcsan: " fmt -- cgit v1.2.3 From 5261ced47f8e89173c3b015f6152a05f11a418c3 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 18 Mar 2021 13:28:12 -0400 Subject: locking/ww_mutex: Remove DEFINE_WW_MUTEX() macro The current DEFINE_WW_MUTEX() macro fails to properly set up the lockdep key of the ww_mutexes causing potential circular locking dependency splat. Though it is possible to add more macro magic to make it work, but the result is rather ugly. Since locktorture was the only user of DEFINE_WW_MUTEX() and the previous commit has just removed its use. It is easier to just remove the macro to force future users of ww_mutexes to use ww_mutex_init() for initialization. Suggested-by: Peter Zijlstra Signed-off-by: Waiman Long Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210318172814.4400-4-longman@redhat.com --- include/linux/ww_mutex.h | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 6ecf2a0220db..b77f39f319ad 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -48,39 +48,26 @@ struct ww_acquire_ctx { #endif }; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ - , .ww_class = class -#else -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) -#endif - #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \ { .stamp = ATOMIC_LONG_INIT(0) \ , .acquire_name = #ww_class "_acquire" \ , .mutex_name = #ww_class "_mutex" \ , .is_wait_die = _is_wait_die } -#define __WW_MUTEX_INITIALIZER(lockname, class) \ - { .base = __MUTEX_INITIALIZER(lockname.base) \ - __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } - #define DEFINE_WD_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1) #define DEFINE_WW_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0) -#define DEFINE_WW_MUTEX(mutexname, ww_class) \ - struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) - /** * ww_mutex_init - initialize the w/w mutex * @lock: the mutex to be initialized * @ww_class: the w/w class the mutex should belong to * * Initialize the w/w mutex to unlocked state and associate it with the given - * class. + * class. Static define macro for w/w mutex is not provided and this function + * is the only way to properly initialize the w/w mutex. * * It is not allowed to initialize an already locked mutex. */ -- cgit v1.2.3 From e2db7592be8e83df47519116621411e1056b21c7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 22 Mar 2021 02:35:05 +0100 Subject: locking: Fix typos in comments Fix ~16 single-word typos in locking code comments. Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Paul E. McKenney Cc: Will Deacon Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/arm/include/asm/spinlock.h | 2 +- include/linux/lockdep.h | 2 +- include/linux/rwsem.h | 2 +- kernel/locking/lockdep.c | 4 ++-- kernel/locking/lockdep_proc.c | 2 +- kernel/locking/mcs_spinlock.h | 2 +- kernel/locking/mutex.c | 4 ++-- kernel/locking/osq_lock.c | 4 ++-- kernel/locking/rtmutex.c | 4 ++-- kernel/locking/rwsem.c | 2 +- kernel/locking/spinlock.c | 4 ++-- 11 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 8f009e788ad4..f610a773f2be 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -22,7 +22,7 @@ * assembler to insert a extra (16-bit) IT instruction, depending on the * presence or absence of neighbouring conditional instructions. * - * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * To avoid this unpredictability, an appropriate IT is inserted explicitly: * the assembler won't change IT instructions which are explicitly present * in the input. */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 17805aac0e85..09ac2e8348d2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -155,7 +155,7 @@ extern void lockdep_set_selftest_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task); /* - * Split the recrursion counter in two to readily detect 'off' vs recursion. + * Split the recursion counter in two to readily detect 'off' vs recursion. */ #define LOCKDEP_RECURSION_BITS 16 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 4c715be48717..a66038d88878 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -110,7 +110,7 @@ do { \ /* * This is the same regardless of which rwsem implementation that is being used. - * It is just a heuristic meant to be called by somebody alreadying holding the + * It is just a heuristic meant to be called by somebody already holding the * rwsem to see if somebody from an incompatible type is wanting access to the * lock. */ diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c0b8926a67f0..0e97287891db 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1747,7 +1747,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry, /* * Step 4: if not match, expand the path by adding the - * forward or backwards dependencis in the search + * forward or backwards dependencies in the search * */ first = true; @@ -1916,7 +1916,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the * dependency graph, as any strong path ..-> A -> B ->.. we can get with * having dependency A -> B, we could already get a equivalent path ..-> A -> - * .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant. + * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant. * * We need to make sure both the start and the end of A -> .. -> B is not * weaker than A -> B. For the start part, please see the comment in diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 02ef87f50df2..806978314496 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -348,7 +348,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) debug_locks); /* - * Zappped classes and lockdep data buffers reuse statistics. + * Zapped classes and lockdep data buffers reuse statistics. */ seq_puts(m, "\n"); seq_printf(m, " zapped classes: %11lu\n", diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 5e10153b4d3c..85251d8771d9 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -7,7 +7,7 @@ * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock * with the desirable properties of being fair, and with each cpu trying * to acquire the lock spinning on a local variable. - * It avoids expensive cache bouncings that common test-and-set spin-lock + * It avoids expensive cache bounces that common test-and-set spin-lock * implementations incur. */ #ifndef __LINUX_MCS_SPINLOCK_H diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 622ebdfcd083..cb6b112ce155 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -92,7 +92,7 @@ static inline unsigned long __owner_flags(unsigned long owner) } /* - * Trylock variant that retuns the owning task on failure. + * Trylock variant that returns the owning task on failure. */ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) { @@ -207,7 +207,7 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, /* * Give up ownership to a specific task, when @task = NULL, this is equivalent - * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves + * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves * WAITERS. Provides RELEASE semantics like a regular unlock, the * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. */ diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 1de006ed3aa8..d5610ad52b92 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -135,7 +135,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) */ /* - * Wait to acquire the lock or cancelation. Note that need_resched() + * Wait to acquire the lock or cancellation. Note that need_resched() * will come with an IPI, which will wake smp_cond_load_relaxed() if it * is implemented with a monitor-wait. vcpu_is_preempted() relies on * polling, be careful. @@ -164,7 +164,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) /* * We can only fail the cmpxchg() racing against an unlock(), - * in which case we should observe @node->locked becomming + * in which case we should observe @node->locked becoming * true. */ if (smp_load_acquire(&node->locked)) diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 29f09d0b8224..db31bce114f8 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -706,7 +706,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, } else if (prerequeue_top_waiter == waiter) { /* * The waiter was the top waiter on the lock, but is - * no longer the top prority waiter. Replace waiter in + * no longer the top priority waiter. Replace waiter in * the owner tasks pi waiters tree with the new top * (highest priority) waiter and adjust the priority * of the owner. @@ -1194,7 +1194,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, return; /* - * Yell lowdly and stop the task right here. + * Yell loudly and stop the task right here. */ rt_mutex_print_deadlock(w); while (1) { diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index fe9cc65cd522..809b0016d344 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -819,7 +819,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) * we try to get it. The new owner may be a spinnable * writer. * - * To take advantage of two scenarios listed agove, the RT + * To take advantage of two scenarios listed above, the RT * task is made to retry one more time to see if it can * acquire the lock or continue spinning on the new owning * writer. Of course, if the time lag is long enough or the diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 0ff08380f531..c8d7ad9fb9b2 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -58,10 +58,10 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state); /* * We build the __lock_function inlines here. They are too large for * inlining all over the place, but here is only one user per function - * which embedds them into the calling _lock_function below. + * which embeds them into the calling _lock_function below. * * This could be a long-held lock. We both prepare to spin for a long - * time (making _this_ CPU preemptable if possible), and we also signal + * time (making _this_ CPU preemptible if possible), and we also signal * towards that other CPU that it should break the lock ASAP. */ #define BUILD_LOCK_OPS(op, locktype) \ -- cgit v1.2.3 From 8af856d18bfbe89676ade38caa2a5d06f75f211d Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Wed, 24 Mar 2021 13:40:40 +0800 Subject: locking/mutex: Remove repeated declaration Commit 0cd39f4600ed ("locking/seqlock, headers: Untangle the spaghetti monster") introduces 'struct ww_acquire_ctx' again, remove the repeated declaration and move the pre-declarations to the top. Signed-off-by: Shaokun Zhang Signed-off-by: Ingo Molnar Acked-by: Waiman Long Link: https://lore.kernel.org/r/1616564440-61318-1-git-send-email-zhangshaokun@hisilicon.com --- include/linux/mutex.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 0cd631a19727..e7a126796937 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,6 +20,7 @@ #include #include +struct ww_class; struct ww_acquire_ctx; /* @@ -65,9 +66,6 @@ struct mutex { #endif }; -struct ww_class; -struct ww_acquire_ctx; - struct ww_mutex { struct mutex base; struct ww_acquire_ctx *ctx; -- cgit v1.2.3 From c15380b72d7ae821ee090ba5a56fc6310828dbda Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 26 Mar 2021 16:29:30 +0100 Subject: locking/rtmutex: Remove rt_mutex_timed_lock() rt_mutex_timed_lock() has no callers since: c051b21f71d1f ("rtmutex: Confine deadlock logic to futex") Remove it. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.061103415@linutronix.de --- include/linux/rtmutex.h | 3 --- kernel/locking/rtmutex.c | 46 ---------------------------------------------- 2 files changed, 49 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 6fd615a0eea9..32f4a3538c3c 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -115,9 +115,6 @@ extern void rt_mutex_lock(struct rt_mutex *lock); #endif extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); -extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout); - extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index db31bce114f8..ca93e5d7b026 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1394,21 +1394,6 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); } -static inline int -rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) -{ - if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - - return slowfn(lock, state, timeout, chwalk); -} - static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, int (*slowfn)(struct rt_mutex *lock)) @@ -1516,37 +1501,6 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) return __rt_mutex_slowtrylock(lock); } -/** - * rt_mutex_timed_lock - lock a rt_mutex interruptible - * the timeout structure is provided - * by the caller - * - * @lock: the rt_mutex to be locked - * @timeout: timeout structure or NULL (no timeout) - * - * Returns: - * 0 on success - * -EINTR when interrupted by a signal - * -ETIMEDOUT when the timeout expired - */ -int -rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) -{ - int ret; - - might_sleep(); - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_MIN_CHAINWALK, - rt_mutex_slowlock); - if (ret) - mutex_release(&lock->dep_map, _RET_IP_); - - return ret; -} -EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); - /** * rt_mutex_trylock - try to lock a rt_mutex * -- cgit v1.2.3 From 2d445c3e4a8216cfa9703998124c13250cc13e5e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 26 Mar 2021 16:29:31 +0100 Subject: locking/rtmutex: Remove rtmutex deadlock tester leftovers The following debug members of 'struct rtmutex' are unused: - save_state: No users - file,line: Printed if ::name is NULL. This is only used for non-futex locks so ::name is never NULL - magic: Assigned to NULL by rt_mutex_destroy(), no further usage Remove them along with unused inline and macro leftovers related to the long gone deadlock tester. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.195064296@linutronix.de --- include/linux/rtmutex.h | 7 ++----- kernel/locking/rtmutex-debug.c | 7 +------ kernel/locking/rtmutex-debug.h | 2 -- kernel/locking/rtmutex.c | 3 --- kernel/locking/rtmutex.h | 2 -- kernel/locking/rtmutex_common.h | 1 - 6 files changed, 3 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 32f4a3538c3c..48b334b9eb87 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -32,10 +32,7 @@ struct rt_mutex { struct rb_root_cached waiters; struct task_struct *owner; #ifdef CONFIG_DEBUG_RT_MUTEXES - int save_state; - const char *name, *file; - int line; - void *magic; + const char *name; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -60,7 +57,7 @@ struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - , .name = #mutexname, .file = __FILE__, .line = __LINE__ + , .name = #mutexname # define rt_mutex_init(mutex) \ do { \ diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 36e69100e8e0..7e411b946d4c 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -42,12 +42,7 @@ static void printk_task(struct task_struct *p) static void printk_lock(struct rt_mutex *lock, int print_owner) { - if (lock->name) - printk(" [%p] {%s}\n", - lock, lock->name); - else - printk(" [%p] {%s:%d}\n", - lock, lock->file, lock->line); + printk(" [%p] {%s}\n", lock, lock->name); if (print_owner && rt_mutex_owner(lock)) { printk(".. ->owner: %p\n", lock->owner); diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index fc549713bba3..772c9b012b62 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -22,8 +22,6 @@ extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, struct rt_mutex_waiter *waiter, struct rt_mutex *lock); extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); -# define debug_rt_mutex_reset_waiter(w) \ - do { (w)->deadlock_lock = NULL; } while (0) static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, enum rtmutex_chainwalk walk) diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index ca93e5d7b026..11abc60d1478 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1594,9 +1594,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) void rt_mutex_destroy(struct rt_mutex *lock) { WARN_ON(rt_mutex_is_locked(lock)); -#ifdef CONFIG_DEBUG_RT_MUTEXES - lock->magic = NULL; -#endif } EXPORT_SYMBOL_GPL(rt_mutex_destroy); diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index 732f96abf462..4dbdec15f1a0 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -11,7 +11,6 @@ * Non-debug version. */ -#define rt_mutex_deadlock_check(l) (0) #define debug_rt_mutex_init_waiter(w) do { } while (0) #define debug_rt_mutex_free_waiter(w) do { } while (0) #define debug_rt_mutex_lock(l) do { } while (0) @@ -21,7 +20,6 @@ #define debug_rt_mutex_init(m, n, k) do { } while (0) #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) #define debug_rt_mutex_print_deadlock(w) do { } while (0) -#define debug_rt_mutex_reset_waiter(w) do { } while (0) static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) { diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index a5007f00c1b7..aa047436dadf 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -30,7 +30,6 @@ struct rt_mutex_waiter { struct task_struct *task; struct rt_mutex *lock; #ifdef CONFIG_DEBUG_RT_MUTEXES - unsigned long ip; struct pid *deadlock_task_pid; struct rt_mutex *deadlock_lock; #endif -- cgit v1.2.3 From 6d41c675a5394057f6fb1dc97cc0a0e360f2c2f8 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 26 Mar 2021 16:29:32 +0100 Subject: locking/rtmutex: Remove output from deadlock detector The rtmutex specific deadlock detector predates lockdep coverage of rtmutex and since commit f5694788ad8da ("rt_mutex: Add lockdep annotations") it contains a lot of redundant functionality: - lockdep will detect an potential deadlock before rtmutex-debug has a chance to do so - the deadlock debugging is restricted to rtmutexes which are not associated to futexes and have an active waiter, which is covered by lockdep already Remove the redundant functionality and move actual deadlock WARN() into the deadlock code path. The latter needs a seperate cleanup. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.320398604@linutronix.de --- include/linux/rtmutex.h | 7 --- kernel/locking/rtmutex-debug.c | 97 ----------------------------------------- kernel/locking/rtmutex-debug.h | 9 ---- kernel/locking/rtmutex.c | 7 +-- kernel/locking/rtmutex.h | 7 --- kernel/locking/rtmutex_common.h | 4 -- 6 files changed, 1 insertion(+), 130 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 48b334b9eb87..0725c4b45749 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -31,9 +31,6 @@ struct rt_mutex { raw_spinlock_t wait_lock; struct rb_root_cached waiters; struct task_struct *owner; -#ifdef CONFIG_DEBUG_RT_MUTEXES - const char *name; -#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -56,8 +53,6 @@ struct hrtimer_sleeper; #endif #ifdef CONFIG_DEBUG_RT_MUTEXES -# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - , .name = #mutexname # define rt_mutex_init(mutex) \ do { \ @@ -67,7 +62,6 @@ do { \ extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else -# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) # define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL) # define rt_mutex_debug_task_free(t) do { } while (0) #endif @@ -83,7 +77,6 @@ do { \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .waiters = RB_ROOT_CACHED \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} #define DEFINE_RT_MUTEX(mutexname) \ diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 7e411b946d4c..fb150100335f 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -32,105 +32,12 @@ #include "rtmutex_common.h" -static void printk_task(struct task_struct *p) -{ - if (p) - printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio); - else - printk(""); -} - -static void printk_lock(struct rt_mutex *lock, int print_owner) -{ - printk(" [%p] {%s}\n", lock, lock->name); - - if (print_owner && rt_mutex_owner(lock)) { - printk(".. ->owner: %p\n", lock->owner); - printk(".. held by: "); - printk_task(rt_mutex_owner(lock)); - printk("\n"); - } -} - void rt_mutex_debug_task_free(struct task_struct *task) { DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); } -/* - * We fill out the fields in the waiter to store the information about - * the deadlock. We print when we return. act_waiter can be NULL in - * case of a remove waiter operation. - */ -void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, - struct rt_mutex_waiter *act_waiter, - struct rt_mutex *lock) -{ - struct task_struct *task; - - if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter) - return; - - task = rt_mutex_owner(act_waiter->lock); - if (task && task != current) { - act_waiter->deadlock_task_pid = get_pid(task_pid(task)); - act_waiter->deadlock_lock = lock; - } -} - -void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) -{ - struct task_struct *task; - - if (!waiter->deadlock_lock || !debug_locks) - return; - - rcu_read_lock(); - task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); - if (!task) { - rcu_read_unlock(); - return; - } - - if (!debug_locks_off()) { - rcu_read_unlock(); - return; - } - - pr_warn("\n"); - pr_warn("============================================\n"); - pr_warn("WARNING: circular locking deadlock detected!\n"); - pr_warn("%s\n", print_tainted()); - pr_warn("--------------------------------------------\n"); - printk("%s/%d is deadlocking current task %s/%d\n\n", - task->comm, task_pid_nr(task), - current->comm, task_pid_nr(current)); - - printk("\n1) %s/%d is trying to acquire this lock:\n", - current->comm, task_pid_nr(current)); - printk_lock(waiter->lock, 1); - - printk("\n2) %s/%d is blocked on this lock:\n", - task->comm, task_pid_nr(task)); - printk_lock(waiter->deadlock_lock, 1); - - debug_show_held_locks(current); - debug_show_held_locks(task); - - printk("\n%s/%d's [blocked] stackdump:\n\n", - task->comm, task_pid_nr(task)); - show_stack(task, NULL, KERN_DEFAULT); - printk("\n%s/%d's [current] stackdump:\n\n", - current->comm, task_pid_nr(current)); - dump_stack(); - debug_show_all_locks(); - rcu_read_unlock(); - - printk("[ turning off deadlock detection." - "Please report this trace. ]\n\n"); -} - void debug_rt_mutex_lock(struct rt_mutex *lock) { } @@ -153,12 +60,10 @@ void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) { memset(waiter, 0x11, sizeof(*waiter)); - waiter->deadlock_task_pid = NULL; } void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) { - put_pid(waiter->deadlock_task_pid); memset(waiter, 0x22, sizeof(*waiter)); } @@ -168,10 +73,8 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_cl * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lock->name = name; #ifdef CONFIG_DEBUG_LOCK_ALLOC lockdep_init_map(&lock->dep_map, name, key, 0); #endif } - diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index 772c9b012b62..659e93e256c6 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -18,18 +18,9 @@ extern void debug_rt_mutex_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner); extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, - struct rt_mutex_waiter *waiter, - struct rt_mutex *lock); -extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, enum rtmutex_chainwalk walk) { return (waiter != NULL); } - -static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) -{ - debug_rt_mutex_print_deadlock(w); -} diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 11abc60d1478..4beca549aeeb 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -579,7 +579,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * walk, we detected a deadlock. */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { - debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); raw_spin_unlock(&lock->wait_lock); ret = -EDEADLK; goto out_unlock_pi; @@ -1171,8 +1170,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, raw_spin_unlock_irq(&lock->wait_lock); - debug_rt_mutex_print_deadlock(waiter); - schedule(); raw_spin_lock_irq(&lock->wait_lock); @@ -1196,7 +1193,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, /* * Yell loudly and stop the task right here. */ - rt_mutex_print_deadlock(w); + WARN(1, "rtmutex deadlock detected\n"); while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); @@ -1704,8 +1701,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } - debug_rt_mutex_print_deadlock(waiter); - return ret; } diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index 4dbdec15f1a0..d77cb8280aa6 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -18,13 +18,6 @@ #define debug_rt_mutex_proxy_unlock(l) do { } while (0) #define debug_rt_mutex_unlock(l) do { } while (0) #define debug_rt_mutex_init(m, n, k) do { } while (0) -#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) -#define debug_rt_mutex_print_deadlock(w) do { } while (0) - -static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) -{ - WARN(1, "rtmutex deadlock detected\n"); -} static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w, enum rtmutex_chainwalk walk) diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index aa047436dadf..badb2a2803aa 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -29,10 +29,6 @@ struct rt_mutex_waiter { struct rb_node pi_tree_entry; struct task_struct *task; struct rt_mutex *lock; -#ifdef CONFIG_DEBUG_RT_MUTEXES - struct pid *deadlock_task_pid; - struct rt_mutex *deadlock_lock; -#endif int prio; u64 deadline; }; -- cgit v1.2.3 From 199cacd1a625cfc499d624b98b10dc763062f7dd Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 26 Mar 2021 16:29:33 +0100 Subject: locking/rtmutex: Consolidate rt_mutex_init() rt_mutex_init() only initializes lockdep if CONFIG_DEBUG_RT_MUTEXES is enabled, which is fine because all lockdep variants select it, but there is no reason to do so. Move the function outside of the CONFIG_DEBUG_RT_MUTEXES block which removes #ifdeffery. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.437405350@linutronix.de --- include/linux/rtmutex.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 0725c4b45749..243fabc2c85f 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -43,6 +43,7 @@ struct hrtimer_sleeper; extern int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len); extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); + extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else static inline int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len) @@ -50,22 +51,15 @@ struct hrtimer_sleeper; return 0; } # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) +# define rt_mutex_debug_task_free(t) do { } while (0) #endif -#ifdef CONFIG_DEBUG_RT_MUTEXES - -# define rt_mutex_init(mutex) \ +#define rt_mutex_init(mutex) \ do { \ static struct lock_class_key __key; \ __rt_mutex_init(mutex, __func__, &__key); \ } while (0) - extern void rt_mutex_debug_task_free(struct task_struct *tsk); -#else -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL) -# define rt_mutex_debug_task_free(t) do { } while (0) -#endif - #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ , .dep_map = { .name = #mutexname } -- cgit v1.2.3 From 8188d74e68174b11ff7c4a635ffc8fd31eacc6b9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Mar 2021 16:29:34 +0100 Subject: locking/rtmutex: Remove empty and unused debug stubs No users or useless and therefore just ballast. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210326153943.549192485@linutronix.de --- include/linux/rtmutex.h | 14 ++------------ kernel/locking/rtmutex-debug.c | 9 --------- kernel/locking/rtmutex-debug.h | 3 --- kernel/locking/rtmutex.c | 18 ------------------ kernel/locking/rtmutex.h | 2 -- 5 files changed, 2 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 243fabc2c85f..d1672de9ca89 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -40,18 +40,9 @@ struct rt_mutex_waiter; struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES - extern int rt_mutex_debug_check_no_locks_freed(const void *from, - unsigned long len); - extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); - extern void rt_mutex_debug_task_free(struct task_struct *tsk); +extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else - static inline int rt_mutex_debug_check_no_locks_freed(const void *from, - unsigned long len) - { - return 0; - } -# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) -# define rt_mutex_debug_task_free(t) do { } while (0) +static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { } #endif #define rt_mutex_init(mutex) \ @@ -88,7 +79,6 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) } extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); -extern void rt_mutex_destroy(struct rt_mutex *lock); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index fb150100335f..df584c91710b 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -38,20 +38,11 @@ void rt_mutex_debug_task_free(struct task_struct *task) DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); } -void debug_rt_mutex_lock(struct rt_mutex *lock) -{ -} - void debug_rt_mutex_unlock(struct rt_mutex *lock) { DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); } -void -debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) -{ -} - void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) { DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index 659e93e256c6..392468d7253f 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -13,10 +13,7 @@ extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); -extern void debug_rt_mutex_lock(struct rt_mutex *lock); extern void debug_rt_mutex_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, - struct task_struct *powner); extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 4beca549aeeb..96c7c537eab4 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -885,9 +885,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, raw_spin_unlock(&task->pi_lock); takeit: - /* We got the lock. */ - debug_rt_mutex_lock(lock); - /* * This either preserves the RT_MUTEX_HAS_WAITERS bit if there * are still waiters or clears it. @@ -1580,20 +1577,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) rt_mutex_postunlock(&wake_q); } -/** - * rt_mutex_destroy - mark a mutex unusable - * @lock: the mutex to be destroyed - * - * This function marks the mutex uninitialized, and any subsequent - * use of the mutex is forbidden. The mutex must not be locked when - * this function is called. - */ -void rt_mutex_destroy(struct rt_mutex *lock) -{ - WARN_ON(rt_mutex_is_locked(lock)); -} -EXPORT_SYMBOL_GPL(rt_mutex_destroy); - /** * __rt_mutex_init - initialize the rt_mutex * @@ -1635,7 +1618,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL, NULL); - debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index d77cb8280aa6..1e484abc94ae 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -13,8 +13,6 @@ #define debug_rt_mutex_init_waiter(w) do { } while (0) #define debug_rt_mutex_free_waiter(w) do { } while (0) -#define debug_rt_mutex_lock(l) do { } while (0) -#define debug_rt_mutex_proxy_lock(l,p) do { } while (0) #define debug_rt_mutex_proxy_unlock(l) do { } while (0) #define debug_rt_mutex_unlock(l) do { } while (0) #define debug_rt_mutex_init(m, n, k) do { } while (0) -- cgit v1.2.3 From 9432bbd969c667fc9c4b1c140c5a745ff2a7b540 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 23 Mar 2021 16:49:03 +0100 Subject: static_call: Relax static_call_update() function argument type static_call_update() had stronger type requirements than regular C, relax them to match. Instead of requiring the @func argument has the exact matching type, allow any type which C is willing to promote to the right (function) pointer type. Specifically this allows (void *) arguments. This cleans up a bunch of static_call_update() callers for PREEMPT_DYNAMIC and should get around silly GCC11 warnings for free. Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/YFoN7nCl8OfGtpeh@hirez.programming.kicks-ass.net --- include/linux/static_call.h | 4 ++-- kernel/sched/core.c | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 85ecc789f4ff..8d50f62420ca 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -113,9 +113,9 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool #define static_call_update(name, func) \ ({ \ - BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \ + typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \ __static_call_update(&STATIC_CALL_KEY(name), \ - STATIC_CALL_TRAMP_ADDR(name), func); \ + STATIC_CALL_TRAMP_ADDR(name), __F); \ }) #ifdef CONFIG_HAVE_STATIC_CALL_INLINE diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 98191218d891..67f989001894 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5396,25 +5396,25 @@ static void sched_dynamic_update(int mode) switch (mode) { case preempt_dynamic_none: static_call_update(cond_resched, __cond_resched); - static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); - static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); - static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); - static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); + static_call_update(might_resched, (void *)&__static_call_return0); + static_call_update(preempt_schedule, NULL); + static_call_update(preempt_schedule_notrace, NULL); + static_call_update(irqentry_exit_cond_resched, NULL); pr_info("Dynamic Preempt: none\n"); break; case preempt_dynamic_voluntary: static_call_update(cond_resched, __cond_resched); static_call_update(might_resched, __cond_resched); - static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); - static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); - static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); + static_call_update(preempt_schedule, NULL); + static_call_update(preempt_schedule_notrace, NULL); + static_call_update(irqentry_exit_cond_resched, NULL); pr_info("Dynamic Preempt: voluntary\n"); break; case preempt_dynamic_full: - static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0); - static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); + static_call_update(cond_resched, (void *)&__static_call_return0); + static_call_update(might_resched, (void *)&__static_call_return0); static_call_update(preempt_schedule, __preempt_schedule_func); static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); -- cgit v1.2.3