From 0cce06ba859a515bd06224085d3addb870608b6d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 Apr 2023 17:03:13 +0200 Subject: debugobjects,locking: Annotate debug_object_fill_pool() wait type violation There is an explicit wait-type violation in debug_object_fill_pool() for PREEMPT_RT=n kernels which allows them to more easily fill the object pool and reduce the chance of allocation failures. Lockdep's wait-type checks are designed to check the PREEMPT_RT locking rules even for PREEMPT_RT=n kernels and object to this, so create a lockdep annotation to allow this to stand. Specifically, create a 'lock' type that overrides the inner wait-type while it is held -- allowing one to temporarily raise it, such that the violation is hidden. Reported-by: Vlastimil Babka Reported-by: Qi Zheng Signed-off-by: Peter Zijlstra (Intel) Tested-by: Qi Zheng Link: https://lkml.kernel.org/r/20230429100614.GA1489784@hirez.programming.kicks-ass.net --- kernel/locking/lockdep.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 50d4863974e7..62ef295e07e6 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2253,6 +2253,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask) static inline bool usage_skip(struct lock_list *entry, void *mask) { + if (entry->class->lock_type == LD_LOCK_NORMAL) + return false; + /* * Skip local_lock() for irq inversion detection. * @@ -2279,14 +2282,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask) * As a result, we will skip local_lock(), when we search for irq * inversion bugs. */ - if (entry->class->lock_type == LD_LOCK_PERCPU) { - if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) - return false; + if (entry->class->lock_type == LD_LOCK_PERCPU && + DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) + return false; - return true; - } + /* + * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually + * a lock and only used to override the wait_type. + */ - return false; + return true; } /* @@ -4752,7 +4757,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) for (; depth < curr->lockdep_depth; depth++) { struct held_lock *prev = curr->held_locks + depth; - u8 prev_inner = hlock_class(prev)->wait_type_inner; + struct lock_class *class = hlock_class(prev); + u8 prev_inner = class->wait_type_inner; if (prev_inner) { /* @@ -4762,6 +4768,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) * Also due to trylocks. */ curr_inner = min(curr_inner, prev_inner); + + /* + * Allow override for annotations -- this is typically + * only valid/needed for code that only exists when + * CONFIG_PREEMPT_RT=n. + */ + if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE)) + curr_inner = prev_inner; } } -- cgit v1.2.3 From 92cc5d00a431e96e5a49c0b97e5ad4fa7536bd4b Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 3 May 2023 02:33:51 +0000 Subject: locking/rwsem: Add __always_inline annotation to __down_read_common() and inlined callers Apparently despite it being marked inline, the compiler may not inline __down_read_common() which makes it difficult to identify the cause of lock contention, as the blocked function in traceevents will always be listed as __down_read_common(). So this patch adds __always_inline annotation to the common function (as well as the inlined helper callers) to force it to be inlined so the blocking function will be listed (via Wchan) in traceevents. Fixes: c995e638ccbb ("locking/rwsem: Fold __down_{read,write}*()") Reported-by: Tim Murray Signed-off-by: John Stultz Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Waiman Long Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20230503023351.2832796-1-jstultz@google.com --- kernel/locking/rwsem.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index acb5a50309a1..9eabd585ce7a 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1240,7 +1240,7 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) /* * lock for reading */ -static inline int __down_read_common(struct rw_semaphore *sem, int state) +static __always_inline int __down_read_common(struct rw_semaphore *sem, int state) { int ret = 0; long count; @@ -1258,17 +1258,17 @@ out: return ret; } -static inline void __down_read(struct rw_semaphore *sem) +static __always_inline void __down_read(struct rw_semaphore *sem) { __down_read_common(sem, TASK_UNINTERRUPTIBLE); } -static inline int __down_read_interruptible(struct rw_semaphore *sem) +static __always_inline int __down_read_interruptible(struct rw_semaphore *sem) { return __down_read_common(sem, TASK_INTERRUPTIBLE); } -static inline int __down_read_killable(struct rw_semaphore *sem) +static __always_inline int __down_read_killable(struct rw_semaphore *sem) { return __down_read_common(sem, TASK_KILLABLE); } -- cgit v1.2.3 From 1d1bfe30dad50d4bea83cd38d73c441972ea0173 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Tue, 25 Apr 2023 10:32:17 +0000 Subject: perf/core: Fix perf_sample_data not properly initialized for different swevents in perf_tp_event() data->sample_flags may be modified in perf_prepare_sample(), in perf_tp_event(), different swevents use the same on-stack perf_sample_data, the previous swevent may change sample_flags in perf_prepare_sample(), as a result, some members of perf_sample_data are not correctly initialized when next swevent_event preparing sample (for example data->id, the value varies according to swevent). A simple scenario triggers this problem is as follows: # perf record -e sched:sched_switch --switch-output-event sched:sched_switch -a sleep 1 [ perf record: dump data: Woken up 0 times ] [ perf record: Dump perf.data.2023041209014396 ] [ perf record: dump data: Woken up 0 times ] [ perf record: Dump perf.data.2023041209014662 ] [ perf record: dump data: Woken up 0 times ] [ perf record: Dump perf.data.2023041209014910 ] [ perf record: Woken up 0 times to write data ] [ perf record: Dump perf.data.2023041209015164 ] [ perf record: Captured and wrote 0.069 MB perf.data. ] # ls -l total 860 -rw------- 1 root root 95694 Apr 12 09:01 perf.data.2023041209014396 -rw------- 1 root root 606430 Apr 12 09:01 perf.data.2023041209014662 -rw------- 1 root root 82246 Apr 12 09:01 perf.data.2023041209014910 -rw------- 1 root root 82342 Apr 12 09:01 perf.data.2023041209015164 # perf script -i perf.data.2023041209014396 0x11d58 [0x80]: failed to process type: 9 [Bad address] Solution: Re-initialize perf_sample_data after each event is processed. Note that data->raw->frag.data may be accessed in perf_tp_event_match(). Therefore, need to init sample_data and then go through swevent hlist to prevent reference of NULL pointer, reported by [1]. After fix: # perf record -e sched:sched_switch --switch-output-event sched:sched_switch -a sleep 1 [ perf record: dump data: Woken up 0 times ] [ perf record: Dump perf.data.2023041209442259 ] [ perf record: dump data: Woken up 0 times ] [ perf record: Dump perf.data.2023041209442514 ] [ perf record: dump data: Woken up 0 times ] [ perf record: Dump perf.data.2023041209442760 ] [ perf record: Woken up 0 times to write data ] [ perf record: Dump perf.data.2023041209443003 ] [ perf record: Captured and wrote 0.069 MB perf.data. ] # ls -l total 864 -rw------- 1 root root 100166 Apr 12 09:44 perf.data.2023041209442259 -rw------- 1 root root 606438 Apr 12 09:44 perf.data.2023041209442514 -rw------- 1 root root 82246 Apr 12 09:44 perf.data.2023041209442760 -rw------- 1 root root 82342 Apr 12 09:44 perf.data.2023041209443003 # perf script -i perf.data.2023041209442259 | head -n 5 perf 232 [000] 66.846217: sched:sched_switch: prev_comm=perf prev_pid=232 prev_prio=120 prev_state=D ==> next_comm=perf next_pid=234 next_prio=120 perf 234 [000] 66.846449: sched:sched_switch: prev_comm=perf prev_pid=234 prev_prio=120 prev_state=S ==> next_comm=perf next_pid=232 next_prio=120 perf 232 [000] 66.846546: sched:sched_switch: prev_comm=perf prev_pid=232 prev_prio=120 prev_state=R ==> next_comm=perf next_pid=234 next_prio=120 perf 234 [000] 66.846606: sched:sched_switch: prev_comm=perf prev_pid=234 prev_prio=120 prev_state=S ==> next_comm=perf next_pid=232 next_prio=120 perf 232 [000] 66.846646: sched:sched_switch: prev_comm=perf prev_pid=232 prev_prio=120 prev_state=R ==> next_comm=perf next_pid=234 next_prio=120 [1] Link: https://lore.kernel.org/oe-lkp/202304250929.efef2caa-yujie.liu@intel.com Fixes: bb447c27a467 ("perf/core: Set data->sample_flags in perf_prepare_sample()") Signed-off-by: Yang Jihong Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230425103217.130600-1-yangjihong1@huawei.com --- kernel/events/core.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 68baa8194d9f..db016e418931 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10150,8 +10150,20 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, perf_trace_buf_update(record, event_type); hlist_for_each_entry_rcu(event, head, hlist_entry) { - if (perf_tp_event_match(event, &data, regs)) + if (perf_tp_event_match(event, &data, regs)) { perf_swevent_event(event, count, &data, regs); + + /* + * Here use the same on-stack perf_sample_data, + * some members in data are event-specific and + * need to be re-computed for different sweveents. + * Re-initialize data->sample_flags safely to avoid + * the problem that next event skips preparing data + * because data->sample_flags is set. + */ + perf_sample_data_init(&data, 0, 0); + perf_sample_save_raw_data(&data, &raw); + } } /* -- cgit v1.2.3 From 0019a2d4b7e37a983d133d42b707b8a3018ae6f4 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 27 Apr 2023 20:11:11 -0700 Subject: sched: fix cid_lock kernel-doc warnings Fix kernel-doc warnings for cid_lock and use_cid_lock. These comments are not in kernel-doc format. kernel/sched/core.c:11496: warning: Cannot understand * @cid_lock: Guarantee forward-progress of cid allocation. on line 11496 - I thought it was a doc line kernel/sched/core.c:11505: warning: Cannot understand * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock. on line 11505 - I thought it was a doc line Fixes: 223baf9d17f2 ("sched: Fix performance regression introduced by mm_cid") Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230428031111.322-1-rdunlap@infradead.org --- kernel/sched/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 944c3ae39861..a68d1276bab0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11492,7 +11492,7 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) #ifdef CONFIG_SCHED_MM_CID -/** +/* * @cid_lock: Guarantee forward-progress of cid allocation. * * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock @@ -11501,7 +11501,7 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) */ DEFINE_RAW_SPINLOCK(cid_lock); -/** +/* * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock. * * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is -- cgit v1.2.3 From f9d36cf445ffff0b913ba187a3eff78028f9b1fb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 6 May 2023 18:40:57 +0200 Subject: tick/broadcast: Make broadcast device replacement work correctly When a tick broadcast clockevent device is initialized for one shot mode then tick_broadcast_setup_oneshot() OR's the periodic broadcast mode cpumask into the oneshot broadcast cpumask. This is required when switching from periodic broadcast mode to oneshot broadcast mode to ensure that CPUs which are waiting for periodic broadcast are woken up on the next tick. But it is subtly broken, when an active broadcast device is replaced and the system is already in oneshot (NOHZ/HIGHRES) mode. Victor observed this and debugged the issue. Then the OR of the periodic broadcast CPU mask is wrong as the periodic cpumask bits are sticky after tick_broadcast_enable() set it for a CPU unless explicitly cleared via tick_broadcast_disable(). That means that this sets all other CPUs which have tick broadcasting enabled at that point unconditionally in the oneshot broadcast mask. If the affected CPUs were already idle and had their bits set in the oneshot broadcast mask then this does no harm. But for non idle CPUs which were not set this corrupts their state. On their next invocation of tick_broadcast_enable() they observe the bit set, which indicates that the broadcast for the CPU is already set up. As a consequence they fail to update the broadcast event even if their earliest expiring timer is before the actually programmed broadcast event. If the programmed broadcast event is far in the future, then this can cause stalls or trigger the hung task detector. Avoid this by telling tick_broadcast_setup_oneshot() explicitly whether this is the initial switch over from periodic to oneshot broadcast which must take the periodic broadcast mask into account. In the case of initialization of a replacement device this prevents that the broadcast oneshot mask is modified. There is a second problem with broadcast device replacement in this function. The broadcast device is only armed when the previous state of the device was periodic. That is correct for the switch from periodic broadcast mode to oneshot broadcast mode as the underlying broadcast device could operate in oneshot state already due to lack of periodic state in hardware. In that case it is already armed to expire at the next tick. For the replacement case this is wrong as the device is in shutdown state. That means that any already pending broadcast event will not be armed. This went unnoticed because any CPU which goes idle will observe that the broadcast device has an expiry time of KTIME_MAX and therefore any CPUs next timer event will be earlier and cause a reprogramming of the broadcast device. But that does not guarantee that the events of the CPUs which were already in idle are delivered on time. Fix this by arming the newly installed device for an immediate event which will reevaluate the per CPU expiry times and reprogram the broadcast device accordingly. This is simpler than caching the last expiry time in yet another place or saving it before the device exchange and handing it down to the setup function. Replacement of broadcast devices is not a frequent operation and usually happens once somewhere late in the boot process. Fixes: 9c336c9935cf ("tick/broadcast: Allow late registered device to enter oneshot mode") Reported-by: Victor Hassan Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/87pm7d2z1i.ffs@tglx --- kernel/time/tick-broadcast.c | 120 +++++++++++++++++++++++++++++++------------ 1 file changed, 88 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 93bf2b4e47e5..771d1e040303 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -35,14 +35,15 @@ static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); #ifdef CONFIG_TICK_ONESHOT static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device); -static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); +static void tick_broadcast_setup_oneshot(struct clock_event_device *bc, bool from_periodic); static void tick_broadcast_clear_oneshot(int cpu); static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); # ifdef CONFIG_HOTPLUG_CPU static void tick_broadcast_oneshot_offline(unsigned int cpu); # endif #else -static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } +static inline void +tick_broadcast_setup_oneshot(struct clock_event_device *bc, bool from_periodic) { BUG(); } static inline void tick_broadcast_clear_oneshot(int cpu) { } static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } # ifdef CONFIG_HOTPLUG_CPU @@ -264,7 +265,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_broadcast_start_periodic(bc); else - tick_broadcast_setup_oneshot(bc); + tick_broadcast_setup_oneshot(bc, false); ret = 1; } else { /* @@ -500,7 +501,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_broadcast_start_periodic(bc); else - tick_broadcast_setup_oneshot(bc); + tick_broadcast_setup_oneshot(bc, false); } } out: @@ -1020,48 +1021,101 @@ static inline ktime_t tick_get_next_period(void) /** * tick_broadcast_setup_oneshot - setup the broadcast device */ -static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) +static void tick_broadcast_setup_oneshot(struct clock_event_device *bc, + bool from_periodic) { int cpu = smp_processor_id(); + ktime_t nexttick = 0; if (!bc) return; - /* Set it up only once ! */ - if (bc->event_handler != tick_handle_oneshot_broadcast) { - int was_periodic = clockevent_state_periodic(bc); - - bc->event_handler = tick_handle_oneshot_broadcast; - + /* + * When the broadcast device was switched to oneshot by the first + * CPU handling the NOHZ change, the other CPUs will reach this + * code via hrtimer_run_queues() -> tick_check_oneshot_change() + * too. Set up the broadcast device only once! + */ + if (bc->event_handler == tick_handle_oneshot_broadcast) { /* - * We must be careful here. There might be other CPUs - * waiting for periodic broadcast. We need to set the - * oneshot_mask bits for those and program the - * broadcast device to fire. + * The CPU which switched from periodic to oneshot mode + * set the broadcast oneshot bit for all other CPUs which + * are in the general (periodic) broadcast mask to ensure + * that CPUs which wait for the periodic broadcast are + * woken up. + * + * Clear the bit for the local CPU as the set bit would + * prevent the first tick_broadcast_enter() after this CPU + * switched to oneshot state to program the broadcast + * device. + * + * This code can also be reached via tick_broadcast_control(), + * but this cannot avoid the tick_broadcast_clear_oneshot() + * as that would break the periodic to oneshot transition of + * secondary CPUs. But that's harmless as the below only + * clears already cleared bits. */ + tick_broadcast_clear_oneshot(cpu); + return; + } + + + bc->event_handler = tick_handle_oneshot_broadcast; + bc->next_event = KTIME_MAX; + + /* + * When the tick mode is switched from periodic to oneshot it must + * be ensured that CPUs which are waiting for periodic broadcast + * get their wake-up at the next tick. This is achieved by ORing + * tick_broadcast_mask into tick_broadcast_oneshot_mask. + * + * For other callers, e.g. broadcast device replacement, + * tick_broadcast_oneshot_mask must not be touched as this would + * set bits for CPUs which are already NOHZ, but not idle. Their + * next tick_broadcast_enter() would observe the bit set and fail + * to update the expiry time and the broadcast event device. + */ + if (from_periodic) { cpumask_copy(tmpmask, tick_broadcast_mask); + /* Remove the local CPU as it is obviously not idle */ cpumask_clear_cpu(cpu, tmpmask); - cpumask_or(tick_broadcast_oneshot_mask, - tick_broadcast_oneshot_mask, tmpmask); + cpumask_or(tick_broadcast_oneshot_mask, tick_broadcast_oneshot_mask, tmpmask); - if (was_periodic && !cpumask_empty(tmpmask)) { - ktime_t nextevt = tick_get_next_period(); + /* + * Ensure that the oneshot broadcast handler will wake the + * CPUs which are still waiting for periodic broadcast. + */ + nexttick = tick_get_next_period(); + tick_broadcast_init_next_event(tmpmask, nexttick); - clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); - tick_broadcast_init_next_event(tmpmask, nextevt); - tick_broadcast_set_event(bc, cpu, nextevt); - } else - bc->next_event = KTIME_MAX; - } else { /* - * The first cpu which switches to oneshot mode sets - * the bit for all other cpus which are in the general - * (periodic) broadcast mask. So the bit is set and - * would prevent the first broadcast enter after this - * to program the bc device. + * If the underlying broadcast clock event device is + * already in oneshot state, then there is nothing to do. + * The device was already armed for the next tick + * in tick_handle_broadcast_periodic() */ - tick_broadcast_clear_oneshot(cpu); + if (clockevent_state_oneshot(bc)) + return; } + + /* + * When switching from periodic to oneshot mode arm the broadcast + * device for the next tick. + * + * If the broadcast device has been replaced in oneshot mode and + * the oneshot broadcast mask is not empty, then arm it to expire + * immediately in order to reevaluate the next expiring timer. + * @nexttick is 0 and therefore in the past which will cause the + * clockevent code to force an event. + * + * For both cases the programming can be avoided when the oneshot + * broadcast mask is empty. + * + * tick_broadcast_set_event() implicitly switches the broadcast + * device to oneshot state. + */ + if (!cpumask_empty(tick_broadcast_oneshot_mask)) + tick_broadcast_set_event(bc, cpu, nexttick); } /* @@ -1070,14 +1124,16 @@ static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) void tick_broadcast_switch_to_oneshot(void) { struct clock_event_device *bc; + enum tick_device_mode oldmode; unsigned long flags; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); + oldmode = tick_broadcast_device.mode; tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; bc = tick_broadcast_device.evtdev; if (bc) - tick_broadcast_setup_oneshot(bc); + tick_broadcast_setup_oneshot(bc, oldmode == TICKDEV_MODE_PERIODIC); raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } -- cgit v1.2.3 From e1505c1cc8d527fcc5bcaf9c1ad82eed817e3e10 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 5 May 2023 14:58:36 -0700 Subject: bpf: netdev: init the offload table earlier Some netdevices may get unregistered before late_initcall(), we have to move the hashtable init earlier. Fixes: f1fc43d03946 ("bpf: Move offload initialization into late_initcall") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217399 Signed-off-by: Jakub Kicinski Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230505215836.491485-1-kuba@kernel.org Signed-off-by: Alexei Starovoitov --- kernel/bpf/offload.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index d9c9f45e3529..8a26cd8814c1 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -859,4 +859,4 @@ static int __init bpf_offload_init(void) return rhashtable_init(&offdevs, &offdevs_params); } -late_initcall(bpf_offload_init); +core_initcall(bpf_offload_init); -- cgit v1.2.3 From 6049674b5720edbbb13a7cb3e2f3d2affaa40c19 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Sun, 9 Apr 2023 11:28:31 +0900 Subject: tracing: fprobe: Initialize ret valiable to fix smatch error The commit 39d954200bf6 ("fprobe: Skip exit_handler if entry_handler returns !0") introduced a hidden dependency of 'ret' local variable in the fprobe_handler(), Smatch warns the `ret` can be accessed without initialization. kernel/trace/fprobe.c:59 fprobe_handler() error: uninitialized symbol 'ret'. kernel/trace/fprobe.c 49 fpr->entry_ip = ip; 50 if (fp->entry_data_size) 51 entry_data = fpr->data; 52 } 53 54 if (fp->entry_handler) 55 ret = fp->entry_handler(fp, ip, ftrace_get_regs(fregs), entry_data); ret is only initialized if there is an ->entry_handler 56 57 /* If entry_handler returns !0, nmissed is not counted. */ 58 if (rh) { rh is only true if there is an ->exit_handler. Presumably if you have and ->exit_handler that means you also have a ->entry_handler but Smatch is not smart enough to figure it out. --> 59 if (ret) ^^^ Warning here. 60 rethook_recycle(rh); 61 else 62 rethook_hook(rh, ftrace_get_regs(fregs), true); 63 } 64 out: 65 ftrace_test_recursion_unlock(bit); 66 } Link: https://lore.kernel.org/all/168100731160.79534.374827110083836722.stgit@devnote2/ Reported-by: Dan Carpenter Link: https://lore.kernel.org/all/85429a5c-a4b9-499e-b6c0-cbd313291c49@kili.mountain Fixes: 39d954200bf6 ("fprobe: Skip exit_handler if entry_handler returns !0") Acked-by: Dan Carpenter Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/fprobe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 9abb3905bc8e..293184227394 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -27,7 +27,7 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip, struct rethook_node *rh = NULL; struct fprobe *fp; void *entry_data = NULL; - int bit, ret; + int bit, ret = 0; fp = container_of(ops, struct fprobe, ops); if (fprobe_disabled(fp)) -- cgit v1.2.3 From be243bacfb25f5219f2396d787408e8cf1301dd1 Mon Sep 17 00:00:00 2001 From: Ze Gao Date: Wed, 17 May 2023 11:45:06 +0800 Subject: rethook: use preempt_{disable, enable}_notrace in rethook_trampoline_handler This patch replaces preempt_{disable, enable} with its corresponding notrace version in rethook_trampoline_handler so no worries about stack recursion or overflow introduced by preempt_count_{add, sub} under fprobe + rethook context. Link: https://lore.kernel.org/all/20230517034510.15639-2-zegao@tencent.com/ Fixes: 54ecbe6f1ed5 ("rethook: Add a generic return hook") Signed-off-by: Ze Gao Acked-by: Masami Hiramatsu (Google) Cc: Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/rethook.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c index 32c3dfdb4d6a..60f6cb2b486b 100644 --- a/kernel/trace/rethook.c +++ b/kernel/trace/rethook.c @@ -288,7 +288,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs, * These loops must be protected from rethook_free_rcu() because those * are accessing 'rhn->rethook'. */ - preempt_disable(); + preempt_disable_notrace(); /* * Run the handler on the shadow stack. Do not unlink the list here because @@ -321,7 +321,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs, first = first->next; rethook_recycle(rhn); } - preempt_enable(); + preempt_enable_notrace(); return correct_ret_addr; } -- cgit v1.2.3 From 3cc4e2c5fbae84e5033723fb7e350bc6c164e3a2 Mon Sep 17 00:00:00 2001 From: Ze Gao Date: Wed, 17 May 2023 11:45:07 +0800 Subject: fprobe: make fprobe_kprobe_handler recursion free Current implementation calls kprobe related functions before doing ftrace recursion check in fprobe_kprobe_handler, which opens door to kernel crash due to stack recursion if preempt_count_{add, sub} is traceable in kprobe_busy_{begin, end}. Things goes like this without this patch quoted from Steven: " fprobe_kprobe_handler() { kprobe_busy_begin() { preempt_disable() { preempt_count_add() { <-- trace fprobe_kprobe_handler() { [ wash, rinse, repeat, CRASH!!! ] " By refactoring the common part out of fprobe_kprobe_handler and fprobe_handler and call ftrace recursion detection at the very beginning, the whole fprobe_kprobe_handler is free from recursion. [ Fix the indentation of __fprobe_handler() parameters. ] Link: https://lore.kernel.org/all/20230517034510.15639-3-zegao@tencent.com/ Fixes: ab51e15d535e ("fprobe: Introduce FPROBE_FL_KPROBE_SHARED flag for fprobe") Signed-off-by: Ze Gao Acked-by: Masami Hiramatsu (Google) Cc: stable@vger.kernel.org Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/fprobe.c | 59 ++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 293184227394..7a692c02f787 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -20,30 +20,22 @@ struct fprobe_rethook_node { char data[]; }; -static void fprobe_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct ftrace_regs *fregs) +static inline void __fprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) { struct fprobe_rethook_node *fpr; struct rethook_node *rh = NULL; struct fprobe *fp; void *entry_data = NULL; - int bit, ret = 0; + int ret = 0; fp = container_of(ops, struct fprobe, ops); - if (fprobe_disabled(fp)) - return; - - bit = ftrace_test_recursion_trylock(ip, parent_ip); - if (bit < 0) { - fp->nmissed++; - return; - } if (fp->exit_handler) { rh = rethook_try_get(fp->rethook); if (!rh) { fp->nmissed++; - goto out; + return; } fpr = container_of(rh, struct fprobe_rethook_node, node); fpr->entry_ip = ip; @@ -61,23 +53,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip, else rethook_hook(rh, ftrace_get_regs(fregs), true); } -out: +} + +static void fprobe_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct fprobe *fp; + int bit; + + fp = container_of(ops, struct fprobe, ops); + if (fprobe_disabled(fp)) + return; + + /* recursion detection has to go before any traceable function and + * all functions before this point should be marked as notrace + */ + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) { + fp->nmissed++; + return; + } + __fprobe_handler(ip, parent_ip, ops, fregs); ftrace_test_recursion_unlock(bit); + } NOKPROBE_SYMBOL(fprobe_handler); static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { - struct fprobe *fp = container_of(ops, struct fprobe, ops); + struct fprobe *fp; + int bit; + + fp = container_of(ops, struct fprobe, ops); + if (fprobe_disabled(fp)) + return; + + /* recursion detection has to go before any traceable function and + * all functions called before this point should be marked as notrace + */ + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) { + fp->nmissed++; + return; + } if (unlikely(kprobe_running())) { fp->nmissed++; return; } + kprobe_busy_begin(); - fprobe_handler(ip, parent_ip, ops, fregs); + __fprobe_handler(ip, parent_ip, ops, fregs); kprobe_busy_end(); + ftrace_test_recursion_unlock(bit); } static void fprobe_exit_handler(struct rethook_node *rh, void *data, -- cgit v1.2.3 From 2752741080f84f9b2fc93fa92735315d10a415bf Mon Sep 17 00:00:00 2001 From: Ze Gao Date: Wed, 17 May 2023 11:45:08 +0800 Subject: fprobe: add recursion detection in fprobe_exit_handler fprobe_hander and fprobe_kprobe_handler has guarded ftrace recursion detection but fprobe_exit_handler has not, which possibly introduce recursive calls if the fprobe exit callback calls any traceable functions. Checking in fprobe_hander or fprobe_kprobe_handler is not enough and misses this case. So add recursion free guard the same way as fprobe_hander. Since ftrace recursion check does not employ ip(s), so here use entry_ip and entry_parent_ip the same as fprobe_handler. Link: https://lore.kernel.org/all/20230517034510.15639-4-zegao@tencent.com/ Fixes: 5b0ab78998e3 ("fprobe: Add exit_handler support") Signed-off-by: Ze Gao Cc: stable@vger.kernel.org Acked-by: Masami Hiramatsu (Google) Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/fprobe.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 7a692c02f787..18d36842faf5 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -17,6 +17,7 @@ struct fprobe_rethook_node { struct rethook_node node; unsigned long entry_ip; + unsigned long entry_parent_ip; char data[]; }; @@ -39,6 +40,7 @@ static inline void __fprobe_handler(unsigned long ip, unsigned long parent_ip, } fpr = container_of(rh, struct fprobe_rethook_node, node); fpr->entry_ip = ip; + fpr->entry_parent_ip = parent_ip; if (fp->entry_data_size) entry_data = fpr->data; } @@ -114,14 +116,26 @@ static void fprobe_exit_handler(struct rethook_node *rh, void *data, { struct fprobe *fp = (struct fprobe *)data; struct fprobe_rethook_node *fpr; + int bit; if (!fp || fprobe_disabled(fp)) return; fpr = container_of(rh, struct fprobe_rethook_node, node); + /* + * we need to assure no calls to traceable functions in-between the + * end of fprobe_handler and the beginning of fprobe_exit_handler. + */ + bit = ftrace_test_recursion_trylock(fpr->entry_ip, fpr->entry_parent_ip); + if (bit < 0) { + fp->nmissed++; + return; + } + fp->exit_handler(fp, fpr->entry_ip, regs, fp->entry_data_size ? (void *)fpr->data : NULL); + ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(fprobe_exit_handler); -- cgit v1.2.3 From 0613d8ca9ab382caabe9ed2dceb429e9781e443f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 18 May 2023 11:25:28 +0100 Subject: bpf: Fix mask generation for 32-bit narrow loads of 64-bit fields A narrow load from a 64-bit context field results in a 64-bit load followed potentially by a 64-bit right-shift and then a bitwise AND operation to extract the relevant data. In the case of a 32-bit access, an immediate mask of 0xffffffff is used to construct a 64-bit BPP_AND operation which then sign-extends the mask value and effectively acts as a glorified no-op. For example: 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) results in the following code generation for a 64-bit field: ldr x7, [x7] // 64-bit load mov x10, #0xffffffffffffffff and x7, x7, x10 Fix the mask generation so that narrow loads always perform a 32-bit AND operation: ldr x7, [x7] // 64-bit load mov w10, #0xffffffff and w7, w7, w10 Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: John Fastabend Cc: Krzesimir Nowak Cc: Andrey Ignatov Acked-by: Yonghong Song Fixes: 31fd85816dbe ("bpf: permits narrower load from bpf program context fields") Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230518102528.1341-1-will@kernel.org Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fbcf5a4e2fcd..5871aa78d01a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -17033,7 +17033,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, insn->dst_reg, shift); - insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1ULL << size * 8) - 1); } } -- cgit v1.2.3 From b34ffb0c6d23583830f9327864b9c1f486003305 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 22 May 2023 15:45:58 +0000 Subject: bpf: fix a memory leak in the LRU and LRU_PERCPU hash maps The LRU and LRU_PERCPU maps allocate a new element on update before locking the target hash table bucket. Right after that the maps try to lock the bucket. If this fails, then maps return -EBUSY to the caller without releasing the allocated element. This makes the element untracked: it doesn't belong to either of free lists, and it doesn't belong to the hash table, so can't be re-used; this eventually leads to the permanent -ENOMEM on LRU map updates, which is unexpected. Fix this by returning the element to the local free list if bucket locking fails. Fixes: 20b6cc34ea74 ("bpf: Avoid hashtab deadlock with map_locked") Signed-off-by: Anton Protopopov Link: https://lore.kernel.org/r/20230522154558.2166815-1-aspsk@isovalent.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/hashtab.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 00c253b84bf5..9901efee4339 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1215,7 +1215,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value ret = htab_lock_bucket(htab, b, hash, &flags); if (ret) - return ret; + goto err_lock_bucket; l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1236,6 +1236,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value err: htab_unlock_bucket(htab, b, hash, flags); +err_lock_bucket: if (ret) htab_lru_push_free(htab, l_new); else if (l_old) @@ -1338,7 +1339,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ret = htab_lock_bucket(htab, b, hash, &flags); if (ret) - return ret; + goto err_lock_bucket; l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1361,6 +1362,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ret = 0; err: htab_unlock_bucket(htab, b, hash, flags); +err_lock_bucket: if (l_new) bpf_lru_push_free(&htab->lru, &l_new->lru_node); return ret; -- cgit v1.2.3 From d36f6efbe0cb422fe1e4475717d75f3737088832 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Thu, 27 Apr 2023 22:59:33 -0700 Subject: module: Fix use-after-free bug in read_file_mod_stats() Smatch warns: kernel/module/stats.c:394 read_file_mod_stats() warn: passing freed memory 'buf' We are passing 'buf' to simple_read_from_buffer() after freeing it. Fix this by changing the order of 'simple_read_from_buffer' and 'kfree'. Fixes: df3e764d8e5c ("module: add debug stats to help identify memory pressure") Signed-off-by: Harshit Mogalapalli Signed-off-by: Luis Chamberlain --- kernel/module/stats.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module/stats.c b/kernel/module/stats.c index ad7b6ada29f2..6ab2c94d6bc3 100644 --- a/kernel/module/stats.c +++ b/kernel/module/stats.c @@ -276,6 +276,7 @@ static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf, struct mod_fail_load *mod_fail; unsigned int len, size, count_failed = 0; char *buf; + int ret; u32 live_mod_count, fkreads, fdecompress, fbecoming, floads; unsigned long total_size, text_size, ikread_bytes, ibecoming_bytes, idecompress_bytes, imod_bytes, total_virtual_lost; @@ -390,8 +391,9 @@ static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf, out_unlock: mutex_unlock(&module_mutex); out: + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); + return ret; } #undef MAX_PREAMBLE #undef MAX_FAILED_MOD_PRINT -- cgit v1.2.3 From 335b4223466dd75f9f3ea4918187afbadd22e5c8 Mon Sep 17 00:00:00 2001 From: Maximilian Heyne Date: Wed, 3 May 2023 13:16:53 +0000 Subject: x86/pci/xen: populate MSI sysfs entries Commit bf5e758f02fc ("genirq/msi: Simplify sysfs handling") reworked the creation of sysfs entries for MSI IRQs. The creation used to be in msi_domain_alloc_irqs_descs_locked after calling ops->domain_alloc_irqs. Then it moved into __msi_domain_alloc_irqs which is an implementation of domain_alloc_irqs. However, Xen comes with the only other implementation of domain_alloc_irqs and hence doesn't run the sysfs population code anymore. Commit 6c796996ee70 ("x86/pci/xen: Fixup fallout from the PCI/MSI overhaul") set the flag MSI_FLAG_DEV_SYSFS for the xen msi_domain_info but that doesn't actually have an effect because Xen uses it's own domain_alloc_irqs implementation. Fix this by making use of the fallback functions for sysfs population. Fixes: bf5e758f02fc ("genirq/msi: Simplify sysfs handling") Signed-off-by: Maximilian Heyne Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/20230503131656.15928-1-mheyne@amazon.de Signed-off-by: Juergen Gross --- arch/x86/pci/xen.c | 8 +++++--- include/linux/msi.h | 9 ++++++++- kernel/irq/msi.c | 4 ++-- 3 files changed, 15 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 8babce71915f..014c508e914d 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -198,7 +198,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) i++; } kfree(v); - return 0; + return msi_device_populate_sysfs(&dev->dev); error: if (ret == -ENOSYS) @@ -254,7 +254,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) dev_dbg(&dev->dev, "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq); } - return 0; + return msi_device_populate_sysfs(&dev->dev); error: dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n", @@ -346,7 +346,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (ret < 0) goto out; } - ret = 0; + ret = msi_device_populate_sysfs(&dev->dev); out: return ret; } @@ -394,6 +394,8 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev) xen_destroy_irq(msidesc->irq + i); msidesc->irq = 0; } + + msi_device_destroy_sysfs(&dev->dev); } static void xen_pv_teardown_msi_irqs(struct pci_dev *dev) diff --git a/include/linux/msi.h b/include/linux/msi.h index cdb14a1ef268..a50ea79522f8 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -383,6 +383,13 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); +#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ + +/* + * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs + * entries of MSI IRQs. + */ +#if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) #ifdef CONFIG_SYSFS int msi_device_populate_sysfs(struct device *dev); void msi_device_destroy_sysfs(struct device *dev); @@ -390,7 +397,7 @@ void msi_device_destroy_sysfs(struct device *dev); static inline int msi_device_populate_sysfs(struct device *dev) { return 0; } static inline void msi_device_destroy_sysfs(struct device *dev) { } #endif /* !CONFIG_SYSFS */ -#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */ +#endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */ /* * The restore hook is still available even for fully irq domain based diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 7a97bcb086bf..b4c31a5c1147 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -542,7 +542,7 @@ fail: return ret; } -#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS +#if defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) || defined(CONFIG_PCI_XEN) /** * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device * @dev: The device (PCI, platform etc) which will get sysfs entries @@ -574,7 +574,7 @@ void msi_device_destroy_sysfs(struct device *dev) msi_for_each_desc(desc, dev, MSI_DESC_ALL) msi_sysfs_remove_desc(dev, desc); } -#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */ +#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK || CONFIG_PCI_XEN */ #else /* CONFIG_SYSFS */ static inline int msi_sysfs_create_group(struct device *dev) { return 0; } static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; } -- cgit v1.2.3 From 9828ed3f695a138f7add89fa2a186ababceb8006 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 25 May 2023 09:32:25 -0700 Subject: module: error out early on concurrent load of the same module file It turns out that udev under certain circumstances will concurrently try to load the same modules over-and-over excessively. This isn't a kernel bug, but it ends up affecting the kernel, to the point that under certain circumstances we can fail to boot, because the kernel uses a lot of memory to read all the module data all at once. Note that it isn't a memory leak, it's just basically a thundering herd problem happening at bootup with a lot of CPUs, with the worst cases then being pretty bad. Admittedly the worst situations are somewhat contrived: lots and lots of CPUs, not a lot of memory, and KASAN enabled to make it all slower and as such (unintentionally) exacerbate the problem. Luis explains: [1] "My best assessment of the situation is that each CPU in udev ends up triggering a load of duplicate set of modules, not just one, but *a lot*. Not sure what heuristics udev uses to load a set of modules per CPU." Petr Pavlu chimes in: [2] "My understanding is that udev workers are forked. An initial kmod context is created by the main udevd process but no sharing happens after the fork. It means that the mentioned memory pool logic doesn't really kick in. Multiple parallel load requests come from multiple udev workers, for instance, each handling an udev event for one CPU device and making the exactly same requests as all others are doing at the same time. The optimization idea would be to recognize these duplicate requests at the udevd/kmod level and converge them" Note that module loading has tried to mitigate this issue before, see for example commit 064f4536d139 ("module: avoid allocation if module is already present and ready"), which has a few ASCII graphs on memory use due to this same issue. However, while that noticed that the module was already loaded, and exited with an error early before spending any more time on setting up the module, it didn't handle the case of multiple concurrent module loads all being active - but not complete - at the same time. Yes, one of them will eventually win the race and finalize its copy, and the others will then notice that the module already exists and error out, but while this all happens, we have tons of unnecessary concurrent work being done. Again, the real fix is for udev to not do that (maybe it should use threads instead of fork, and have actual shared data structures and not cause duplicate work). That real fix is apparently not trivial. But it turns out that the kernel already has a pretty good model for dealing with concurrent access to the same file: the i_writecount of the inode. In fact, the module loading already indirectly uses 'i_writecount' , because 'kernel_file_read()' will in fact do ret = deny_write_access(file); if (ret) return ret; ... allow_write_access(file); around the read of the file data. We do not allow concurrent writes to the file, and return -ETXTBUSY if the file was open for writing at the same time as the module data is loaded from it. And the solution to the reader concurrency problem is to simply extend this "no concurrent writers" logic to simply be "exclusive access". Note that "exclusive" in this context isn't really some absolute thing: it's only exclusion from writers and from other "special readers" that do this writer denial. So we simply introduce a variation of that "deny_write_access()" logic that not only denies write access, but also requires that this is the _only_ such access that denies write access. Which means that you can't start loading a module that is already being loaded as a module by somebody else, or you will get the same -ETXTBSY error that you would get if there were writers around. [ It also means that you can't try to load a currently executing executable as a module, for the same reason: executables do that same "deny_write_access()" thing, and that's obviously where the whole ETXTBSY logic traditionally came from. This is not a problem for kernel modules, since the set of normal executable files and kernel module files is entirely disjoint. ] This new function is called "exclusive_deny_write_access()", and the implementation is trivial, in that it's just an atomic decrement of i_writecount if it was 0 before. To use that new exclusivity check, all we then do is wrap the module loading with that exclusive_deny_write_access()() / allow_write_access() pair. The actual patch is a bit bigger than that, because we want to surround not just the "load file data" part, but the whole module setup, to get maximum exclusion. So this ends up splitting up "finit_module()" into a few helper functions to make it all very clear and legible. In Luis' test-case (bringing up 255 vcpu's in a virtual machine [3]), the "wasted vmalloc" space (ie module data read into a vmalloc'ed area in order to be loaded as a module, but then discarded because somebody else loaded the same module instead) dropped from 1.8GiB to 474kB. Yes, that's gigabytes to kilobytes. It doesn't drop completely to zero, because even with this change, you can still end up having completely serial pointless module loads, where one udev process has loaded a module fully (and thus the kernel has released that exclusive lock on the module file), and then another udev process tries to load the same module again. So while we cannot fully get rid of the fundamental bug in user space, we _can_ get rid of the excessive concurrent thundering herd effect. A couple of final side notes on this all: - This tweak only affects the "finit_module()" system call, which gives the kernel a file descriptor with the module data. You can also just feed the module data as raw data from user space with "init_module()" (note the lack of 'f' at the beginning), and obviously for that case we do _not_ have any "exclusive read" logic. So if you absolutely want to do things wrong in user space, and try to load the same module multiple times, and error out only later when the kernel ends up saying "you can't load the same module name twice", you can still do that. And in fact, some distros will do exactly that, because they will uncompress the kernel module data in user space before feeding it to the kernel (mainly because they haven't started using the new kernel side decompression yet). So this is not some absolute "you can't do concurrent loads of the same module". It's literally just a very simple heuristic that will catch it early in case you try to load the exact same module file at the same time, and in that case avoid a potentially nasty situation. - There is another user of "deny_write_access()": the verity code that enables fs-verity on a file (the FS_IOC_ENABLE_VERITY ioctl). If you use fs-verity and you care about verifying the kernel modules (which does make sense), you should do it *before* loading said kernel module. That may sound obvious, but now the implementation basically requires it. Because if you try to do it concurrently, the kernel may refuse to load the module file that is being set up by the fs-verity code. - This all will obviously mean that if you insist on loading the same module in parallel, only one module load will succeed, and the others will return with an error. That was true before too, but what is different is that the -ETXTBSY error can be returned *before* the success case of another process fully loading and instantiating the module. Again, that might sound obvious, and it is indeed the whole point of the whole change: we are much quicker to notice the whole "you're already in the process of loading this module". So it's very much intentional, but it does mean that if you just spray the kernel with "finit_module()", and expect that the module is immediately loaded afterwards without checking the return value, you are doing something horribly horribly wrong. I'd like to say that that would never happen, but the whole _reason_ for this commit is that udev is currently doing something horribly horribly wrong, so ... Link: https://lore.kernel.org/all/ZEGopJ8VAYnE7LQ2@bombadil.infradead.org/ [1] Link: https://lore.kernel.org/all/23bd0ce6-ef78-1cd8-1f21-0e706a00424a@suse.com/ [2] Link: https://lore.kernel.org/lkml/ZG%2Fa+nrt4%2FAAUi5z@bombadil.infradead.org/ [3] Cc: Greg Kroah-Hartman Cc: Lucas De Marchi Cc: Petr Pavlu Tested-by: Luis Chamberlain Signed-off-by: Linus Torvalds --- include/linux/fs.h | 6 ++++++ kernel/module/main.c | 58 ++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 49 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/include/linux/fs.h b/include/linux/fs.h index 133f0640fb24..86b50271b4f7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2566,6 +2566,12 @@ static inline int deny_write_access(struct file *file) struct inode *inode = file_inode(file); return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; } +static inline int exclusive_deny_write_access(struct file *file) +{ + int old = 0; + struct inode *inode = file_inode(file); + return atomic_try_cmpxchg(&inode->i_writecount, &old, -1) ? 0 : -ETXTBSY; +} static inline void put_write_access(struct inode * inode) { atomic_dec(&inode->i_writecount); diff --git a/kernel/module/main.c b/kernel/module/main.c index 044aa2c9e3cb..b4c7e925fdb0 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3057,25 +3057,13 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, return load_module(&info, uargs, 0); } -SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) +static int file_init_module(struct file *file, const char __user * uargs, int flags) { struct load_info info = { }; void *buf = NULL; int len; - int err; - - err = may_init_module(); - if (err) - return err; - - pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); - if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS - |MODULE_INIT_IGNORE_VERMAGIC - |MODULE_INIT_COMPRESSED_FILE)) - return -EINVAL; - - len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL, + len = kernel_read_file(file, 0, &buf, INT_MAX, NULL, READING_MODULE); if (len < 0) { mod_stat_inc(&failed_kreads); @@ -3084,7 +3072,7 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) } if (flags & MODULE_INIT_COMPRESSED_FILE) { - err = module_decompress(&info, buf, len); + int err = module_decompress(&info, buf, len); vfree(buf); /* compressed data is no longer needed */ if (err) { mod_stat_inc(&failed_decompress); @@ -3099,6 +3087,46 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) return load_module(&info, uargs, flags); } +/* + * kernel_read_file() will already deny write access, but module + * loading wants _exclusive_ access to the file, so we do that + * here, along with basic sanity checks. + */ +static int prepare_file_for_module_load(struct file *file) +{ + if (!file || !(file->f_mode & FMODE_READ)) + return -EBADF; + if (!S_ISREG(file_inode(file)->i_mode)) + return -EINVAL; + return exclusive_deny_write_access(file); +} + +SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) +{ + struct fd f; + int err; + + err = may_init_module(); + if (err) + return err; + + pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); + + if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS + |MODULE_INIT_IGNORE_VERMAGIC + |MODULE_INIT_COMPRESSED_FILE)) + return -EINVAL; + + f = fdget(fd); + err = prepare_file_for_module_load(f.file); + if (!err) { + err = file_init_module(f.file, uargs, flags); + allow_write_access(f.file); + } + fdput(f); + return err; +} + /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ char *module_flags(struct module *mod, char *buf, bool show_state) { -- cgit v1.2.3 From 7cc148a32f1e7496e22c0005dd113a31d4a3b3d4 Mon Sep 17 00:00:00 2001 From: James Gowans Date: Thu, 8 Jun 2023 14:00:19 +0200 Subject: genirq: Expand doc for PENDING and REPLAY flags Adding a bit more info about what the flags are used for may help future code readers. Signed-off-by: James Gowans Cc: Thomas Gleixner Cc: Marc Zyngier Cc: Liao Chang Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230608120021.3273400-2-jgowans@amazon.com --- kernel/irq/internals.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 5fdc0b557579..c443a0ddc07e 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -47,9 +47,12 @@ enum { * detection * IRQS_POLL_INPROGRESS - polling in progress * IRQS_ONESHOT - irq is not unmasked in primary handler - * IRQS_REPLAY - irq is replayed + * IRQS_REPLAY - irq has been resent and will not be resent + * again until the handler has run and cleared + * this flag. * IRQS_WAITING - irq is waiting - * IRQS_PENDING - irq is pending and replayed later + * IRQS_PENDING - irq needs to be resent and should be resent + * at the next available opportunity. * IRQS_SUSPENDED - irq is suspended * IRQS_NMI - irq line is used to deliver NMIs * IRQS_SYSFS - descriptor has been added to sysfs -- cgit v1.2.3 From 9c15eeb5362c48dd27d51bd72e8873341fa9383c Mon Sep 17 00:00:00 2001 From: James Gowans Date: Thu, 8 Jun 2023 14:00:20 +0200 Subject: genirq: Allow fasteoi handler to resend interrupts on concurrent handling There is a class of interrupt controllers out there that, once they have signalled a given interrupt number, will still signal incoming instances of the *same* interrupt despite the original interrupt not having been EOIed yet. As long as the new interrupt reaches the *same* CPU, nothing bad happens, as that CPU still has its interrupts globally disabled, and we will only take the new interrupt once the interrupt has been EOIed. However, things become more "interesting" if an affinity change comes in while the interrupt is being handled. More specifically, while the per-irq lock is being dropped. This results in the affinity change taking place immediately. At this point, there is nothing that prevents the interrupt from firing on the new target CPU. We end-up with the interrupt running concurrently on two CPUs, which isn't a good thing. And that's where things become worse: the new CPU notices that the interrupt handling is in progress (irq_may_run() return false), and *drops the interrupt on the floor*. The whole race looks like this: CPU 0 | CPU 1 -----------------------------|----------------------------- interrupt start | handle_fasteoi_irq | set_affinity(CPU 1) handler | ... | interrupt start ... | handle_fasteoi_irq -> early out handle_fasteoi_irq return | interrupt end interrupt end | If the interrupt was an edge, too bad. The interrupt is lost, and the system will eventually die one way or another. Not great. A way to avoid this situation is to detect this problem at the point we handle the interrupt on the new target. Instead of dropping the interrupt, use the resend mechanism to force it to be replayed. Also, in order to limit the impact of this workaround to the pathetic architectures that require it, gate it behind a new irq flag aptly named IRQD_RESEND_WHEN_IN_PROGRESS. Suggested-by: Marc Zyngier Signed-off-by: James Gowans Cc: Thomas Gleixner Cc: Marc Zyngier Cc: KarimAllah Raslan Cc: Yipeng Zou Cc: Zhang Jianhua [maz: reworded commit mesage] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230608120021.3273400-3-jgowans@amazon.com --- include/linux/irq.h | 13 +++++++++++++ kernel/irq/chip.c | 16 +++++++++++++++- kernel/irq/debugfs.c | 2 ++ 3 files changed, 30 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index d9c86db69982..d8a6fdce9373 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -223,6 +223,8 @@ struct irq_data { * irq_chip::irq_set_affinity() when deactivated. * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. + * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which + * case it must be resent at the next available opportunity. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -249,6 +251,7 @@ enum { IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28), IRQD_AFFINITY_ON_ACTIVATE = BIT(29), IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30), + IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -448,6 +451,16 @@ static inline bool irqd_affinity_on_activate(struct irq_data *d) return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; } +static inline void irqd_set_resend_when_in_progress(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS; +} + +static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 49e7bc871fec..57cd8f475302 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -692,8 +692,16 @@ void handle_fasteoi_irq(struct irq_desc *desc) raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + /* + * When an affinity change races with IRQ handling, the next interrupt + * can arrive on the new CPU before the original CPU has completed + * handling the previous one - it may need to be resent. + */ + if (!irq_may_run(desc)) { + if (irqd_needs_resend_when_in_progress(&desc->irq_data)) + desc->istate |= IRQS_PENDING; goto out; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -715,6 +723,12 @@ void handle_fasteoi_irq(struct irq_desc *desc) cond_unmask_eoi_irq(desc, chip); + /* + * When the race described above happens this will resend the interrupt. + */ + if (unlikely(desc->istate & IRQS_PENDING)) + check_irq_resend(desc, false); + raw_spin_unlock(&desc->lock); return; out: diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index bbcaac64038e..5971a66be034 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -133,6 +133,8 @@ static const struct irq_bit_descr irqdata_states[] = { BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX), BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND), + + BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS), }; static const struct irq_bit_descr irqdesc_states[] = { -- cgit v1.2.3 From 8091f56ee4e51037662590edc0b0e44807fbab4d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 22:04:28 +0200 Subject: irqdomain: Include internals.h for function prototypes irq_domain_debugfs_init() is defined in irqdomain.c, but the declaration is in a header that is not included here: kernel/irq/irqdomain.c:1965:13: error: no previous prototype for 'irq_domain_debugfs_init' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230516200432.554240-1-arnd@kernel.org --- kernel/irq/irqdomain.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index f34760a1e222..5bd01624e447 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -1915,6 +1915,8 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain) #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS +#include "internals.h" + static struct dentry *domain_dir; static void -- cgit v1.2.3