From d3febf16dee28a74b01ba43195ee4965edb6208f Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 19 Dec 2025 16:40:08 +0100 Subject: locking/local_lock: Support Clang's context analysis Add support for Clang's context analysis for local_lock_t and local_trylock_t. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251219154418.3592607-20-elver@google.com --- include/linux/local_lock.h | 51 +++++++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 21 deletions(-) (limited to 'include/linux/local_lock.h') diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index b0e6ab329b00..99c06e499375 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -14,13 +14,13 @@ * local_lock - Acquire a per CPU local lock * @lock: The lock variable */ -#define local_lock(lock) __local_lock(this_cpu_ptr(lock)) +#define local_lock(lock) __local_lock(__this_cpu_local_lock(lock)) /** * local_lock_irq - Acquire a per CPU local lock and disable interrupts * @lock: The lock variable */ -#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock)) +#define local_lock_irq(lock) __local_lock_irq(__this_cpu_local_lock(lock)) /** * local_lock_irqsave - Acquire a per CPU local lock, save and disable @@ -29,19 +29,19 @@ * @flags: Storage for interrupt flags */ #define local_lock_irqsave(lock, flags) \ - __local_lock_irqsave(this_cpu_ptr(lock), flags) + __local_lock_irqsave(__this_cpu_local_lock(lock), flags) /** * local_unlock - Release a per CPU local lock * @lock: The lock variable */ -#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock)) +#define local_unlock(lock) __local_unlock(__this_cpu_local_lock(lock)) /** * local_unlock_irq - Release a per CPU local lock and enable interrupts * @lock: The lock variable */ -#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock)) +#define local_unlock_irq(lock) __local_unlock_irq(__this_cpu_local_lock(lock)) /** * local_unlock_irqrestore - Release a per CPU local lock and restore @@ -50,7 +50,7 @@ * @flags: Interrupt flags to restore */ #define local_unlock_irqrestore(lock, flags) \ - __local_unlock_irqrestore(this_cpu_ptr(lock), flags) + __local_unlock_irqrestore(__this_cpu_local_lock(lock), flags) /** * local_trylock_init - Runtime initialize a lock instance @@ -66,7 +66,7 @@ * locking constrains it will _always_ fail to acquire the lock in NMI or * HARDIRQ context on PREEMPT_RT. */ -#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock)) +#define local_trylock(lock) __local_trylock(__this_cpu_local_lock(lock)) #define local_lock_is_locked(lock) __local_lock_is_locked(lock) @@ -81,27 +81,36 @@ * HARDIRQ context on PREEMPT_RT. */ #define local_trylock_irqsave(lock, flags) \ - __local_trylock_irqsave(this_cpu_ptr(lock), flags) - -DEFINE_GUARD(local_lock, local_lock_t __percpu*, - local_lock(_T), - local_unlock(_T)) -DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*, - local_lock_irq(_T), - local_unlock_irq(_T)) + __local_trylock_irqsave(__this_cpu_local_lock(lock), flags) + +DEFINE_LOCK_GUARD_1(local_lock, local_lock_t __percpu, + local_lock(_T->lock), + local_unlock(_T->lock)) +DEFINE_LOCK_GUARD_1(local_lock_irq, local_lock_t __percpu, + local_lock_irq(_T->lock), + local_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu, local_lock_irqsave(_T->lock, _T->flags), local_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) #define local_lock_nested_bh(_lock) \ - __local_lock_nested_bh(this_cpu_ptr(_lock)) + __local_lock_nested_bh(__this_cpu_local_lock(_lock)) #define local_unlock_nested_bh(_lock) \ - __local_unlock_nested_bh(this_cpu_ptr(_lock)) - -DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, - local_lock_nested_bh(_T), - local_unlock_nested_bh(_T)) + __local_unlock_nested_bh(__this_cpu_local_lock(_lock)) + +DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu, + local_lock_nested_bh(_T->lock), + local_unlock_nested_bh(_T->lock)) + +DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irq, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) +#define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T) #endif -- cgit v1.2.3 From d084a73714f818ce509022e1aa9483cabf797c16 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 19 Jan 2026 10:05:52 +0100 Subject: compiler-context-analysis: Introduce scoped init guards Add scoped init guard definitions for common synchronization primitives supported by context analysis. The scoped init guards treat the context as active within initialization scope of the underlying context lock, given initialization implies exclusive access to the underlying object. This allows initialization of guarded members without disabling context analysis, while documenting initialization from subsequent usage. The documentation is updated with the new recommendation. Where scoped init guards are not provided or cannot be implemented (ww_mutex omitted for lack of multi-arg guard initializers), the alternative is to just disable context analysis where guarded members are initialized. Suggested-by: Peter Zijlstra Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/all/20251212095943.GM3911114@noisy.programming.kicks-ass.net/ Link: https://patch.msgid.link/20260119094029.1344361-3-elver@google.com --- Documentation/dev-tools/context-analysis.rst | 30 +++++++++++++++++++++++++--- include/linux/compiler-context-analysis.h | 9 ++------- include/linux/local_lock.h | 8 ++++++++ include/linux/local_lock_internal.h | 1 + include/linux/mutex.h | 3 +++ include/linux/rwsem.h | 4 ++++ include/linux/seqlock.h | 5 +++++ include/linux/spinlock.h | 12 +++++++++++ lib/test_context-analysis.c | 16 +++++++-------- 9 files changed, 70 insertions(+), 18 deletions(-) (limited to 'include/linux/local_lock.h') diff --git a/Documentation/dev-tools/context-analysis.rst b/Documentation/dev-tools/context-analysis.rst index e69896e597b6..54d9ee28de98 100644 --- a/Documentation/dev-tools/context-analysis.rst +++ b/Documentation/dev-tools/context-analysis.rst @@ -83,9 +83,33 @@ Currently the following synchronization primitives are supported: `bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`, `ww_mutex`. -For context locks with an initialization function (e.g., `spin_lock_init()`), -calling this function before initializing any guarded members or globals -prevents the compiler from issuing warnings about unguarded initialization. +To initialize variables guarded by a context lock with an initialization +function (``type_init(&lock)``), prefer using ``guard(type_init)(&lock)`` or +``scoped_guard(type_init, &lock) { ... }`` to initialize such guarded members +or globals in the enclosing scope. This initializes the context lock and treats +the context as active within the initialization scope (initialization implies +exclusive access to the underlying object). + +For example:: + + struct my_data { + spinlock_t lock; + int counter __guarded_by(&lock); + }; + + void init_my_data(struct my_data *d) + { + ... + guard(spinlock_init)(&d->lock); + d->counter = 0; + ... + } + +Alternatively, initializing guarded variables can be done with context analysis +disabled, preferably in the smallest possible scope (due to lack of any other +checking): either with a ``context_unsafe(var = init)`` expression, or by +marking small initialization functions with the ``__context_unsafe(init)`` +attribute. Lockdep assertions, such as `lockdep_assert_held()`, inform the compiler's context analysis that the associated synchronization primitive is held after diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h index e86b8a3c2f89..00c074a2ccb0 100644 --- a/include/linux/compiler-context-analysis.h +++ b/include/linux/compiler-context-analysis.h @@ -32,13 +32,8 @@ /* * The "assert_capability" attribute is a bit confusingly named. It does not * generate a check. Instead, it tells the analysis to *assume* the capability - * is held. This is used for: - * - * 1. Augmenting runtime assertions, that can then help with patterns beyond the - * compiler's static reasoning abilities. - * - * 2. Initialization of context locks, so we can access guarded variables right - * after initialization (nothing else should access the same object yet). + * is held. This is used for augmenting runtime assertions, that can then help + * with patterns beyond the compiler's static reasoning abilities. */ # define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__))) # define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__))) diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 99c06e499375..b8830148a859 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -104,6 +104,8 @@ DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu, local_lock_nested_bh(_T->lock), local_unlock_nested_bh(_T->lock)) +DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */) + DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) #define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T) DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) @@ -112,5 +114,11 @@ DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(loca #define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T) DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) #define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T)) +#define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T) + +DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T)) +#define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T) #endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 7843ab9059c2..ed2f3fb4c360 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #ifndef CONFIG_PREEMPT_RT diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 89977c215cbd..6b12009351d2 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -254,6 +254,7 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_a DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0) +DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */) DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T)) #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T) @@ -261,6 +262,8 @@ DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex #define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T) DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T)) #define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T) +DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T)) +#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T) extern unsigned long mutex_get_owner(struct mutex *lock); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 8da14a08a4e1..ea1bbdb57a47 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -280,6 +280,10 @@ DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_try, __acquires(_T), __releases(*(struct DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T)) #define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T) +DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T)) +#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T) + /* * downgrade write lock to read lock */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index c00063dffba3..077c8d5b2afd 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -14,6 +14,7 @@ */ #include +#include #include #include #include @@ -1358,4 +1359,8 @@ static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s) #define scoped_seqlock_read(_seqlock, _target) \ __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock)) +DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T)) +#define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T) + #endif /* __LINUX_SEQLOCK_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 396b8c5d6c1b..7b11991c742a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -582,6 +582,10 @@ DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try, DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) #define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T) +DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) +#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T) + DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, spin_lock(_T->lock), spin_unlock(_T->lock)) @@ -626,6 +630,10 @@ DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try, DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T)) #define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T) +DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T)) +#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T) + DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, read_lock(_T->lock), read_unlock(_T->lock)) @@ -664,5 +672,9 @@ DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t, DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T)) #define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T) +DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T)) +#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T) + #undef __LINUX_INSIDE_SPINLOCK_H #endif /* __LINUX_SPINLOCK_H */ diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c index 1c5a381461fc..0f05943d957f 100644 --- a/lib/test_context-analysis.c +++ b/lib/test_context-analysis.c @@ -35,7 +35,7 @@ static void __used test_common_helpers(void) }; \ static void __used test_##class##_init(struct test_##class##_data *d) \ { \ - type_init(&d->lock); \ + guard(type_init)(&d->lock); \ d->counter = 0; \ } \ static void __used test_##class(struct test_##class##_data *d) \ @@ -83,7 +83,7 @@ static void __used test_common_helpers(void) TEST_SPINLOCK_COMMON(raw_spinlock, raw_spinlock_t, - raw_spin_lock_init, + raw_spinlock_init, raw_spin_lock, raw_spin_unlock, raw_spin_trylock, @@ -109,7 +109,7 @@ static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data TEST_SPINLOCK_COMMON(spinlock, spinlock_t, - spin_lock_init, + spinlock_init, spin_lock, spin_unlock, spin_trylock, @@ -163,7 +163,7 @@ struct test_mutex_data { static void __used test_mutex_init(struct test_mutex_data *d) { - mutex_init(&d->mtx); + guard(mutex_init)(&d->mtx); d->counter = 0; } @@ -226,7 +226,7 @@ struct test_seqlock_data { static void __used test_seqlock_init(struct test_seqlock_data *d) { - seqlock_init(&d->sl); + guard(seqlock_init)(&d->sl); d->counter = 0; } @@ -275,7 +275,7 @@ struct test_rwsem_data { static void __used test_rwsem_init(struct test_rwsem_data *d) { - init_rwsem(&d->sem); + guard(rwsem_init)(&d->sem); d->counter = 0; } @@ -475,7 +475,7 @@ static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = { static void __used test_local_lock_init(struct test_local_lock_data *d) { - local_lock_init(&d->lock); + guard(local_lock_init)(&d->lock); d->counter = 0; } @@ -519,7 +519,7 @@ static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = static void __used test_local_trylock_init(struct test_local_trylock_data *d) { - local_trylock_init(&d->lock); + guard(local_trylock_init)(&d->lock); d->counter = 0; } -- cgit v1.2.3