From 976516404ff3fab2a8caa8bd6f5efc1437fed0b8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Aug 2018 14:02:57 +0200 Subject: y2038: remove unused time interfaces After many small patches, at least some of the deprecated interfaces have no remaining users any more and can be removed: current_kernel_time do_settimeofday get_monotonic_boottime get_monotonic_boottime64 get_monotonic_coarse get_monotonic_coarse64 getrawmonotonic64 ktime_get_real_ts timekeeping_clocktai timespec_trunc timespec_valid_strict time_to_tm For many of the remaining time functions, we are missing one or two patches that failed to make it into 4.19, they will be removed in the following merge window. The replacement functions for the removed interfaces are documented in Documentation/core-api/timekeeping.rst. Signed-off-by: Arnd Bergmann --- kernel/time/time.c | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'kernel') diff --git a/kernel/time/time.c b/kernel/time/time.c index ccdb351277ee..712543011106 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -342,30 +342,6 @@ unsigned int jiffies_to_usecs(const unsigned long j) } EXPORT_SYMBOL(jiffies_to_usecs); -/** - * timespec_trunc - Truncate timespec to a granularity - * @t: Timespec - * @gran: Granularity in ns. - * - * Truncate a timespec to a granularity. Always rounds down. gran must - * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). - */ -struct timespec timespec_trunc(struct timespec t, unsigned gran) -{ - /* Avoid division in the common cases 1 ns and 1 s. */ - if (gran == 1) { - /* nothing */ - } else if (gran == NSEC_PER_SEC) { - t.tv_nsec = 0; - } else if (gran > 1 && gran < NSEC_PER_SEC) { - t.tv_nsec -= t.tv_nsec % gran; - } else { - WARN(1, "illegal file time granularity: %u", gran); - } - return t; -} -EXPORT_SYMBOL(timespec_trunc); - /* * mktime64 - Converts date to seconds. * Converts Gregorian date to seconds since 1970-01-01 00:00:00. -- cgit v1.2.3 From 33e26418193f58d1895f2f968e1953b1caf8deb7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Aug 2018 15:18:20 +0200 Subject: y2038: make do_gettimeofday() and get_seconds() inline get_seconds() and do_gettimeofday() are only used by a few modules now any more (waiting for the respective patches to get accepted), and they are among the last holdouts of code that is not y2038 safe in the core kernel. Move the implementation into the timekeeping32.h header to clean up the core kernel and isolate the old interfaces further. Signed-off-by: Arnd Bergmann --- include/linux/timekeeping32.h | 15 +++++++++++++-- kernel/time/time.c | 15 +++++++++------ kernel/time/timekeeping.c | 24 ------------------------ 3 files changed, 22 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h index a4a4991160fb..a502616f7e1c 100644 --- a/include/linux/timekeeping32.h +++ b/include/linux/timekeeping32.h @@ -6,8 +6,19 @@ * over time so we can remove the file here. */ -extern void do_gettimeofday(struct timeval *tv); -unsigned long get_seconds(void); +static inline void do_gettimeofday(struct timeval *tv) +{ + struct timespec64 now; + + ktime_get_real_ts64(&now); + tv->tv_sec = now.tv_sec; + tv->tv_usec = now.tv_nsec/1000; +} + +static inline unsigned long get_seconds(void) +{ + return ktime_get_real_seconds(); +} static inline void getnstimeofday(struct timespec *ts) { diff --git a/kernel/time/time.c b/kernel/time/time.c index 712543011106..de332250d6fa 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -144,9 +144,11 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv, struct timezone __user *, tz) { if (likely(tv != NULL)) { - struct timeval ktv; - do_gettimeofday(&ktv); - if (copy_to_user(tv, &ktv, sizeof(ktv))) + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + if (put_user(ts.tv_sec, &tv->tv_sec) || + put_user(ts.tv_nsec / 1000, &tv->tv_usec)) return -EFAULT; } if (unlikely(tz != NULL)) { @@ -227,10 +229,11 @@ COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv, struct timezone __user *, tz) { if (tv) { - struct timeval ktv; + struct timespec64 ts; - do_gettimeofday(&ktv); - if (compat_put_timeval(&ktv, tv)) + ktime_get_real_ts64(&ts); + if (put_user(ts.tv_sec, &tv->tv_sec) || + put_user(ts.tv_nsec / 1000, &tv->tv_usec)) return -EFAULT; } if (tz) { diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f3b22f456fac..2d110c948805 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1211,22 +1211,6 @@ int get_device_system_crosststamp(int (*get_time_fn) } EXPORT_SYMBOL_GPL(get_device_system_crosststamp); -/** - * do_gettimeofday - Returns the time of day in a timeval - * @tv: pointer to the timeval to be set - * - * NOTE: Users should be converted to using getnstimeofday() - */ -void do_gettimeofday(struct timeval *tv) -{ - struct timespec64 now; - - getnstimeofday64(&now); - tv->tv_sec = now.tv_sec; - tv->tv_usec = now.tv_nsec/1000; -} -EXPORT_SYMBOL(do_gettimeofday); - /** * do_settimeofday64 - Sets the time of day. * @ts: pointer to the timespec64 variable containing the new time @@ -2174,14 +2158,6 @@ void getboottime64(struct timespec64 *ts) } EXPORT_SYMBOL_GPL(getboottime64); -unsigned long get_seconds(void) -{ - struct timekeeper *tk = &tk_core.timekeeper; - - return tk->xtime_sec; -} -EXPORT_SYMBOL(get_seconds); - void ktime_get_coarse_real_ts64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; -- cgit v1.2.3 From 9afc5eee65ca7d717a99d6fe8f4adfe32a40940a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 13 Jul 2018 12:52:28 +0200 Subject: y2038: globally rename compat_time to old_time32 Christoph Hellwig suggested a slightly different path for handling backwards compatibility with the 32-bit time_t based system calls: Rather than simply reusing the compat_sys_* entry points on 32-bit architectures unchanged, we get rid of those entry points and the compat_time types by renaming them to something that makes more sense on 32-bit architectures (which don't have a compat mode otherwise), and then share the entry points under the new name with the 64-bit architectures that use them for implementing the compatibility. The following types and interfaces are renamed here, and moved from linux/compat_time.h to linux/time32.h: old new --- --- compat_time_t old_time32_t struct compat_timeval struct old_timeval32 struct compat_timespec struct old_timespec32 struct compat_itimerspec struct old_itimerspec32 ns_to_compat_timeval() ns_to_old_timeval32() get_compat_itimerspec64() get_old_itimerspec32() put_compat_itimerspec64() put_old_itimerspec32() compat_get_timespec64() get_old_timespec32() compat_put_timespec64() put_old_timespec32() As we already have aliases in place, this patch addresses only the instances that are relevant to the system call interface in particular, not those that occur in device drivers and other modules. Those will get handled separately, while providing the 64-bit version of the respective interfaces. I'm not renaming the timex, rusage and itimerval structures, as we are still debating what the new interface will look like, and whether we will need a replacement at all. This also doesn't change the names of the syscall entry points, which can be done more easily when we actually switch over the 32-bit architectures to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix. Suggested-by: Christoph Hellwig Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/ Signed-off-by: Arnd Bergmann --- arch/arm64/include/asm/compat.h | 6 +-- arch/arm64/include/asm/stat.h | 2 +- arch/mips/include/asm/compat.h | 6 +-- arch/mips/kernel/binfmt_elfn32.c | 14 +++--- arch/mips/kernel/binfmt_elfo32.c | 14 +++--- arch/parisc/include/asm/compat.h | 6 +-- arch/powerpc/include/asm/compat.h | 6 +-- arch/powerpc/kernel/asm-offsets.c | 8 ++-- arch/powerpc/oprofile/backtrace.c | 2 +- arch/sparc/include/asm/compat.h | 6 +-- fs/aio.c | 8 ++-- fs/compat_binfmt_elf.c | 2 +- fs/select.c | 20 ++++---- fs/timerfd.c | 12 ++--- fs/utimes.c | 12 ++--- include/linux/compat.h | 96 +++++++++++++++++++-------------------- include/linux/compat_time.h | 32 ------------- include/linux/elfcore-compat.h | 8 ++-- include/linux/restart_block.h | 4 +- include/linux/syscalls.h | 2 +- include/linux/time32.h | 48 +++++++++++++++----- ipc/mqueue.c | 8 ++-- ipc/msg.c | 6 +-- ipc/sem.c | 10 ++-- ipc/shm.c | 6 +-- ipc/syscall.c | 2 +- ipc/util.h | 2 +- kernel/compat.c | 8 ++-- kernel/futex_compat.c | 2 +- kernel/sched/core.c | 4 +- kernel/signal.c | 2 +- kernel/time/hrtimer.c | 8 ++-- kernel/time/posix-stubs.c | 18 ++++---- kernel/time/posix-timers.c | 30 ++++++------ kernel/time/time.c | 58 +++++++++++------------ net/compat.c | 4 +- 36 files changed, 237 insertions(+), 245 deletions(-) delete mode 100644 include/linux/compat_time.h (limited to 'kernel') diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 1a037b94eba1..4a89007db14a 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -86,11 +86,11 @@ struct compat_stat { compat_off_t st_size; compat_off_t st_blksize; compat_off_t st_blocks; - compat_time_t st_atime; + old_time32_t st_atime; compat_ulong_t st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; compat_ulong_t st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; compat_ulong_t st_ctime_nsec; compat_ulong_t __unused4[2]; }; diff --git a/arch/arm64/include/asm/stat.h b/arch/arm64/include/asm/stat.h index eab738019707..397c6ccd04e7 100644 --- a/arch/arm64/include/asm/stat.h +++ b/arch/arm64/include/asm/stat.h @@ -20,7 +20,7 @@ #ifdef CONFIG_COMPAT -#include +#include #include /* diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 78675f19440f..7dcbd855814e 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -59,11 +59,11 @@ struct compat_stat { s32 st_pad2[2]; compat_off_t st_size; s32 st_pad3; - compat_time_t st_atime; + old_time32_t st_atime; s32 st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; s32 st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; s32 st_ctime_nsec; s32 st_blksize; s32 st_blocks; diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 89b234844534..7a12763d553a 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -54,10 +54,10 @@ struct elf_prstatus32 pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ - struct compat_timeval pr_cutime;/* Cumulative user time */ - struct compat_timeval pr_cstime;/* Cumulative system time */ + struct old_timeval32 pr_utime; /* User time */ + struct old_timeval32 pr_stime; /* System time */ + struct old_timeval32 pr_cutime;/* Cumulative user time */ + struct old_timeval32 pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; @@ -81,9 +81,9 @@ struct elf_prpsinfo32 #define elf_caddr_t u32 #define init_elf_binfmt init_elfn32_binfmt -#define jiffies_to_timeval jiffies_to_compat_timeval +#define jiffies_to_timeval jiffies_to_old_timeval32 static __inline__ void -jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) +jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) { /* * Convert jiffies to nanoseconds and separate with @@ -101,6 +101,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) #define TASK_SIZE TASK_SIZE32 #undef ns_to_timeval -#define ns_to_timeval ns_to_compat_timeval +#define ns_to_timeval ns_to_old_timeval32 #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index a88c59db3d48..e6db06a1d31a 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -59,10 +59,10 @@ struct elf_prstatus32 pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ - struct compat_timeval pr_cutime;/* Cumulative user time */ - struct compat_timeval pr_cstime;/* Cumulative system time */ + struct old_timeval32 pr_utime; /* User time */ + struct old_timeval32 pr_stime; /* System time */ + struct old_timeval32 pr_cutime;/* Cumulative user time */ + struct old_timeval32 pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; @@ -86,9 +86,9 @@ struct elf_prpsinfo32 #define elf_caddr_t u32 #define init_elf_binfmt init_elf32_binfmt -#define jiffies_to_timeval jiffies_to_compat_timeval +#define jiffies_to_timeval jiffies_to_old_timeval32 static inline void -jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) +jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) { /* * Convert jiffies to nanoseconds and separate with @@ -104,6 +104,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) #define TASK_SIZE TASK_SIZE32 #undef ns_to_timeval -#define ns_to_timeval ns_to_compat_timeval +#define ns_to_timeval ns_to_old_timeval32 #include "../../../fs/binfmt_elf.c" diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index ab8a54771507..32dadaf2b534 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -48,11 +48,11 @@ struct compat_stat { u16 st_reserved2; /* old st_gid */ compat_dev_t st_rdev; compat_off_t st_size; - compat_time_t st_atime; + old_time32_t st_atime; u32 st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; u32 st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; u32 st_ctime_nsec; s32 st_blksize; s32 st_blocks; diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 85c8af2bb272..93f79d1a03c3 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -55,11 +55,11 @@ struct compat_stat { compat_off_t st_size; compat_off_t st_blksize; compat_off_t st_blocks; - compat_time_t st_atime; + old_time32_t st_atime; u32 st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; u32 st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; u32 st_ctime_nsec; u32 __unused4[2]; }; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 89cf15566c4e..041a115789a1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -387,12 +387,12 @@ int main(void) OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64); OFFSET(TVAL64_TV_SEC, timeval, tv_sec); OFFSET(TVAL64_TV_USEC, timeval, tv_usec); - OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec); - OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec); + OFFSET(TVAL32_TV_SEC, old_timeval32, tv_sec); + OFFSET(TVAL32_TV_USEC, old_timeval32, tv_usec); OFFSET(TSPC64_TV_SEC, timespec, tv_sec); OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec); - OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec); - OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec); + OFFSET(TSPC32_TV_SEC, old_timespec32, tv_sec); + OFFSET(TSPC32_TV_NSEC, old_timespec32, tv_nsec); #else OFFSET(TVAL32_TV_SEC, timeval, tv_sec); OFFSET(TVAL32_TV_USEC, timeval, tv_usec); diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index ad054dd0d666..5df6290d1ccc 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c @@ -7,7 +7,7 @@ * 2 of the License, or (at your option) any later version. **/ -#include +#include #include #include #include diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 4eb51d2dae98..7018cb60beef 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -47,11 +47,11 @@ struct compat_stat { __compat_gid_t st_gid; compat_dev_t st_rdev; compat_off_t st_size; - compat_time_t st_atime; + old_time32_t st_atime; compat_ulong_t st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; compat_ulong_t st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; compat_ulong_t st_ctime_nsec; compat_off_t st_blksize; compat_off_t st_blocks; diff --git a/fs/aio.c b/fs/aio.c index b9350f3360c6..301e6314183b 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -2135,12 +2135,12 @@ COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id, compat_long_t, min_nr, compat_long_t, nr, struct io_event __user *, events, - struct compat_timespec __user *, timeout) + struct old_timespec32 __user *, timeout) { struct timespec64 t; int ret; - if (timeout && compat_get_timespec64(&t, timeout)) + if (timeout && get_old_timespec32(&t, timeout)) return -EFAULT; ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); @@ -2160,7 +2160,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, compat_long_t, min_nr, compat_long_t, nr, struct io_event __user *, events, - struct compat_timespec __user *, timeout, + struct old_timespec32 __user *, timeout, const struct __compat_aio_sigset __user *, usig) { struct __compat_aio_sigset ksig = { NULL, }; @@ -2168,7 +2168,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents, struct timespec64 t; int ret; - if (timeout && compat_get_timespec64(&t, timeout)) + if (timeout && get_old_timespec32(&t, timeout)) return -EFAULT; if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index 504b3c3539dc..15f6e96b3bd9 100644 --- a/fs/compat_binfmt_elf.c +++ b/fs/compat_binfmt_elf.c @@ -52,7 +52,7 @@ #define elf_prpsinfo compat_elf_prpsinfo #undef ns_to_timeval -#define ns_to_timeval ns_to_compat_timeval +#define ns_to_timeval ns_to_old_timeval32 /* * To use this file, asm/elf.h must define compat_elf_check_arch. diff --git a/fs/select.c b/fs/select.c index 4a6b6e4b21cb..22b3bf89f051 100644 --- a/fs/select.c +++ b/fs/select.c @@ -1120,7 +1120,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user * ts.tv_sec = ts.tv_nsec = 0; if (timeval) { - struct compat_timeval rtv; + struct old_timeval32 rtv; rtv.tv_sec = ts.tv_sec; rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC; @@ -1128,7 +1128,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user * if (!copy_to_user(p, &rtv, sizeof(rtv))) return ret; } else { - if (!compat_put_timespec64(&ts, p)) + if (!put_old_timespec32(&ts, p)) return ret; } /* @@ -1257,10 +1257,10 @@ out_nofds: static int do_compat_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct compat_timeval __user *tvp) + struct old_timeval32 __user *tvp) { struct timespec64 end_time, *to = NULL; - struct compat_timeval tv; + struct old_timeval32 tv; int ret; if (tvp) { @@ -1282,7 +1282,7 @@ static int do_compat_select(int n, compat_ulong_t __user *inp, COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, - struct compat_timeval __user *, tvp) + struct old_timeval32 __user *, tvp) { return do_compat_select(n, inp, outp, exp, tvp); } @@ -1307,7 +1307,7 @@ COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg) static long do_compat_pselect(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, + struct old_timespec32 __user *tsp, compat_sigset_t __user *sigmask, compat_size_t sigsetsize) { sigset_t ksigmask, sigsaved; @@ -1315,7 +1315,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp, int ret; if (tsp) { - if (compat_get_timespec64(&ts, tsp)) + if (get_old_timespec32(&ts, tsp)) return -EFAULT; to = &end_time; @@ -1355,7 +1355,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp, COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp, compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, - struct compat_timespec __user *, tsp, void __user *, sig) + struct old_timespec32 __user *, tsp, void __user *, sig) { compat_size_t sigsetsize = 0; compat_uptr_t up = 0; @@ -1373,7 +1373,7 @@ COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp, } COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, - unsigned int, nfds, struct compat_timespec __user *, tsp, + unsigned int, nfds, struct old_timespec32 __user *, tsp, const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) { sigset_t ksigmask, sigsaved; @@ -1381,7 +1381,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, int ret; if (tsp) { - if (compat_get_timespec64(&ts, tsp)) + if (get_old_timespec32(&ts, tsp)) return -EFAULT; to = &end_time; diff --git a/fs/timerfd.c b/fs/timerfd.c index d69ad801eb80..803ca070d42e 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -561,29 +561,29 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *, #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, - const struct compat_itimerspec __user *, utmr, - struct compat_itimerspec __user *, otmr) + const struct old_itimerspec32 __user *, utmr, + struct old_itimerspec32 __user *, otmr) { struct itimerspec64 new, old; int ret; - if (get_compat_itimerspec64(&new, utmr)) + if (get_old_itimerspec32(&new, utmr)) return -EFAULT; ret = do_timerfd_settime(ufd, flags, &new, &old); if (ret) return ret; - if (otmr && put_compat_itimerspec64(&old, otmr)) + if (otmr && put_old_itimerspec32(&old, otmr)) return -EFAULT; return ret; } COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd, - struct compat_itimerspec __user *, otmr) + struct old_itimerspec32 __user *, otmr) { struct itimerspec64 kotmr; int ret = do_timerfd_gettime(ufd, &kotmr); if (ret) return ret; - return put_compat_itimerspec64(&kotmr, otmr) ? -EFAULT : 0; + return put_old_itimerspec32(&kotmr, otmr) ? -EFAULT : 0; } #endif diff --git a/fs/utimes.c b/fs/utimes.c index 69d4b6ba1bfb..79a65c163f40 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -245,13 +245,13 @@ COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename, return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0); } -COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct compat_timespec __user *, t, int, flags) +COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags) { struct timespec64 tv[2]; if (t) { - if (compat_get_timespec64(&tv[0], &t[0]) || - compat_get_timespec64(&tv[1], &t[1])) + if (get_old_timespec32(&tv[0], &t[0]) || + get_old_timespec32(&tv[1], &t[1])) return -EFAULT; if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT) @@ -261,7 +261,7 @@ COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filena } static long do_compat_futimesat(unsigned int dfd, const char __user *filename, - struct compat_timeval __user *t) + struct old_timeval32 __user *t) { struct timespec64 tv[2]; @@ -282,12 +282,12 @@ static long do_compat_futimesat(unsigned int dfd, const char __user *filename, COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd, const char __user *, filename, - struct compat_timeval __user *, t) + struct old_timeval32 __user *, t) { return do_compat_futimesat(dfd, filename, t); } -COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct compat_timeval __user *, t) +COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct old_timeval32 __user *, t) { return do_compat_futimesat(AT_FDCWD, filename, t); } diff --git a/include/linux/compat.h b/include/linux/compat.h index 1a3c4f37e908..0e058792ecf6 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -7,7 +7,7 @@ */ #include -#include +#include #include #include /* for HZ */ @@ -116,13 +116,13 @@ struct compat_sel_arg_struct; struct rusage; struct compat_utimbuf { - compat_time_t actime; - compat_time_t modtime; + old_time32_t actime; + old_time32_t modtime; }; struct compat_itimerval { - struct compat_timeval it_interval; - struct compat_timeval it_value; + struct old_timeval32 it_interval; + struct old_timeval32 it_value; }; struct itimerval; @@ -146,7 +146,7 @@ struct compat_timex { compat_long_t constant; compat_long_t precision; compat_long_t tolerance; - struct compat_timeval time; + struct old_timeval32 time; compat_long_t tick; compat_long_t ppsfreq; compat_long_t jitter; @@ -307,8 +307,8 @@ struct compat_rlimit { }; struct compat_rusage { - struct compat_timeval ru_utime; - struct compat_timeval ru_stime; + struct old_timeval32 ru_utime; + struct old_timeval32 ru_stime; compat_long_t ru_maxrss; compat_long_t ru_ixrss; compat_long_t ru_idrss; @@ -457,8 +457,8 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *fr int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); -static inline int compat_timeval_compare(struct compat_timeval *lhs, - struct compat_timeval *rhs) +static inline int old_timeval32_compare(struct old_timeval32 *lhs, + struct old_timeval32 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; @@ -467,8 +467,8 @@ static inline int compat_timeval_compare(struct compat_timeval *lhs, return lhs->tv_usec - rhs->tv_usec; } -static inline int compat_timespec_compare(struct compat_timespec *lhs, - struct compat_timespec *rhs) +static inline int old_timespec32_compare(struct old_timespec32 *lhs, + struct old_timespec32 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; @@ -552,12 +552,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, struct io_event __user *events, - struct compat_timespec __user *timeout); + struct old_timespec32 __user *timeout); asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, struct io_event __user *events, - struct compat_timespec __user *timeout, + struct old_timespec32 __user *timeout, const struct __compat_aio_sigset __user *usig); /* fs/cookies.c */ @@ -642,11 +642,11 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct compat_timespec __user *tsp, + struct old_timespec32 __user *tsp, void __user *sig); asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, - struct compat_timespec __user *tsp, + struct old_timespec32 __user *tsp, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); @@ -671,15 +671,15 @@ asmlinkage long compat_sys_newfstat(unsigned int fd, /* fs/timerfd.c */ asmlinkage long compat_sys_timerfd_gettime(int ufd, - struct compat_itimerspec __user *otmr); + struct old_itimerspec32 __user *otmr); asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, - const struct compat_itimerspec __user *utmr, - struct compat_itimerspec __user *otmr); + const struct old_itimerspec32 __user *utmr, + struct old_itimerspec32 __user *otmr); /* fs/utimes.c */ asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename, - struct compat_timespec __user *t, + struct old_timespec32 __user *t, int flags); /* kernel/exit.c */ @@ -691,7 +691,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t, /* kernel/futex.c */ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, - struct compat_timespec __user *utime, u32 __user *uaddr2, + struct old_timespec32 __user *utime, u32 __user *uaddr2, u32 val3); asmlinkage long compat_sys_set_robust_list(struct compat_robust_list_head __user *head, @@ -701,8 +701,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, compat_size_t __user *len_ptr); /* kernel/hrtimer.c */ -asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, - struct compat_timespec __user *rmtp); +asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/itimer.c */ asmlinkage long compat_sys_getitimer(int which, @@ -722,19 +722,19 @@ asmlinkage long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id); asmlinkage long compat_sys_timer_gettime(timer_t timer_id, - struct compat_itimerspec __user *setting); + struct old_itimerspec32 __user *setting); asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, - struct compat_itimerspec __user *new, - struct compat_itimerspec __user *old); + struct old_itimerspec32 __user *new, + struct old_itimerspec32 __user *old); asmlinkage long compat_sys_clock_settime(clockid_t which_clock, - struct compat_timespec __user *tp); + struct old_timespec32 __user *tp); asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, - struct compat_timespec __user *tp); + struct old_timespec32 __user *tp); asmlinkage long compat_sys_clock_getres(clockid_t which_clock, - struct compat_timespec __user *tp); + struct old_timespec32 __user *tp); asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, - struct compat_timespec __user *rqtp, - struct compat_timespec __user *rmtp); + struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/ptrace.c */ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, @@ -748,7 +748,7 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval); + struct old_timespec32 __user *interval); /* kernel/signal.c */ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, @@ -768,7 +768,7 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, - struct compat_timespec __user *uts, compat_size_t sigsetsize); + struct old_timespec32 __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); /* No generic prototype for rt_sigreturn */ @@ -782,9 +782,9 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); /* kernel/time.c */ -asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, +asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); -asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, +asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); @@ -798,11 +798,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, compat_size_t msg_len, unsigned int msg_prio, - const struct compat_timespec __user *u_abs_timeout); + const struct old_timespec32 __user *u_abs_timeout); asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, compat_size_t msg_len, unsigned int __user *u_msg_prio, - const struct compat_timespec __user *u_abs_timeout); + const struct old_timespec32 __user *u_abs_timeout); asmlinkage long compat_sys_mq_notify(mqd_t mqdes, const struct compat_sigevent __user *u_notification); asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, @@ -819,7 +819,7 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, /* ipc/sem.c */ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsems, const struct compat_timespec __user *timeout); + unsigned nsems, const struct old_timespec32 __user *timeout); /* ipc/shm.c */ asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); @@ -876,7 +876,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, struct compat_siginfo __user *uinfo); asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, - struct compat_timespec __user *timeout); + struct old_timespec32 __user *timeout); asmlinkage long compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru); @@ -928,7 +928,7 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd, asmlinkage long compat_sys_open(const char __user *filename, int flags, umode_t mode); asmlinkage long compat_sys_utimes(const char __user *filename, - struct compat_timeval __user *t); + struct old_timeval32 __user *t); /* __ARCH_WANT_SYSCALL_NO_FLAGS */ asmlinkage long compat_sys_signalfd(int ufd, @@ -942,15 +942,15 @@ asmlinkage long compat_sys_newlstat(const char __user *filename, struct compat_stat __user *statbuf); /* __ARCH_WANT_SYSCALL_DEPRECATED */ -asmlinkage long compat_sys_time(compat_time_t __user *tloc); +asmlinkage long compat_sys_time(old_time32_t __user *tloc); asmlinkage long compat_sys_utime(const char __user *filename, struct compat_utimbuf __user *t); asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename, - struct compat_timeval __user *t); + struct old_timeval32 __user *t); asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct compat_timeval __user *tvp); + struct old_timeval32 __user *tvp); asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, unsigned flags); @@ -983,7 +983,7 @@ asmlinkage long compat_sys_sigaction(int sig, #endif /* obsolete: kernel/time/time.c */ -asmlinkage long compat_sys_stime(compat_time_t __user *tptr); +asmlinkage long compat_sys_stime(old_time32_t __user *tptr); /* obsolete: net/socket.c */ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); @@ -1002,15 +1002,15 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); } #endif /** - * ns_to_compat_timeval - Compat version of ns_to_timeval + * ns_to_old_timeval32 - Compat version of ns_to_timeval * @nsec: the nanoseconds value to be converted * - * Returns the compat_timeval representation of the nsec parameter. + * Returns the old_timeval32 representation of the nsec parameter. */ -static inline struct compat_timeval ns_to_compat_timeval(s64 nsec) +static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec) { struct timeval tv; - struct compat_timeval ctv; + struct old_timeval32 ctv; tv = ns_to_timeval(nsec); ctv.tv_sec = tv.tv_sec; diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h deleted file mode 100644 index e70bfd1d2c3f..000000000000 --- a/include/linux/compat_time.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_COMPAT_TIME_H -#define _LINUX_COMPAT_TIME_H - -#include -#include - -typedef s32 compat_time_t; - -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - -struct compat_itimerspec { - struct compat_timespec it_interval; - struct compat_timespec it_value; -}; - -extern int compat_get_timespec64(struct timespec64 *, const void __user *); -extern int compat_put_timespec64(const struct timespec64 *, void __user *); -extern int get_compat_itimerspec64(struct itimerspec64 *its, - const struct compat_itimerspec __user *uits); -extern int put_compat_itimerspec64(const struct itimerspec64 *its, - struct compat_itimerspec __user *uits); - -#endif /* _LINUX_COMPAT_TIME_H */ diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index b5f2efdd05e0..7a37f4ce9fd2 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h @@ -27,10 +27,10 @@ struct compat_elf_prstatus compat_pid_t pr_ppid; compat_pid_t pr_pgrp; compat_pid_t pr_sid; - struct compat_timeval pr_utime; - struct compat_timeval pr_stime; - struct compat_timeval pr_cutime; - struct compat_timeval pr_cstime; + struct old_timeval32 pr_utime; + struct old_timeval32 pr_stime; + struct old_timeval32 pr_cutime; + struct old_timeval32 pr_cstime; compat_elf_gregset_t pr_reg; #ifdef CONFIG_BINFMT_ELF_FDPIC compat_ulong_t pr_exec_fdpic_loadmap; diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h index 5d83d0c1d06c..bba2920e9c05 100644 --- a/include/linux/restart_block.h +++ b/include/linux/restart_block.h @@ -10,7 +10,7 @@ #include struct timespec; -struct compat_timespec; +struct old_timespec32; struct pollfd; enum timespec_type { @@ -40,7 +40,7 @@ struct restart_block { enum timespec_type type; union { struct __kernel_timespec __user *rmtp; - struct compat_timespec __user *compat_rmtp; + struct old_timespec32 __user *compat_rmtp; }; u64 expires; } nanosleep; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2ff814c92f7f..b3e27e5ee322 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -60,7 +60,7 @@ struct tms; struct utimbuf; struct mq_attr; struct compat_stat; -struct compat_timeval; +struct old_timeval32; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; diff --git a/include/linux/time32.h b/include/linux/time32.h index 92c0ca092d93..0e0d7304d1a8 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -13,6 +13,31 @@ #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +extern int get_old_timespec32(struct timespec64 *, const void __user *); +extern int put_old_timespec32(const struct timespec64 *, void __user *); +extern int get_old_itimerspec32(struct itimerspec64 *its, + const struct old_itimerspec32 __user *uits); +extern int put_old_itimerspec32(const struct itimerspec64 *its, + struct old_itimerspec32 __user *uits); + + #if __BITS_PER_LONG == 64 /* timespec64 is defined as timespec here */ @@ -183,18 +208,17 @@ extern struct timeval ns_to_timeval(const s64 nsec); extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec); /* - * New aliases for compat time functions. These will be used to replace - * the compat code so it can be shared between 32-bit and 64-bit builds - * both of which provide compatibility with old 32-bit tasks. + * Old names for the 32-bit time_t interfaces, these will be removed + * when everything uses the new names. */ -#define old_time32_t compat_time_t -#define old_timeval32 compat_timeval -#define old_timespec32 compat_timespec -#define old_itimerspec32 compat_itimerspec -#define ns_to_old_timeval32 ns_to_compat_timeval -#define get_old_itimerspec32 get_compat_itimerspec64 -#define put_old_itimerspec32 put_compat_itimerspec64 -#define get_old_timespec32 compat_get_timespec64 -#define put_old_timespec32 compat_put_timespec64 +#define compat_time_t old_time32_t +#define compat_timeval old_timeval32 +#define compat_timespec old_timespec32 +#define compat_itimerspec old_itimerspec32 +#define ns_to_compat_timeval ns_to_old_timeval32 +#define get_compat_itimerspec64 get_old_itimerspec32 +#define put_compat_itimerspec64 put_old_itimerspec32 +#define compat_get_timespec64 get_old_timespec32 +#define compat_put_timespec64 put_old_timespec32 #endif diff --git a/ipc/mqueue.c b/ipc/mqueue.c index c0d58f390c3b..db7833370351 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -1461,10 +1461,10 @@ COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, #endif #ifdef CONFIG_COMPAT_32BIT_TIME -static int compat_prepare_timeout(const struct compat_timespec __user *p, +static int compat_prepare_timeout(const struct old_timespec32 __user *p, struct timespec64 *ts) { - if (compat_get_timespec64(ts, p)) + if (get_old_timespec32(ts, p)) return -EFAULT; if (!timespec64_valid(ts)) return -EINVAL; @@ -1474,7 +1474,7 @@ static int compat_prepare_timeout(const struct compat_timespec __user *p, COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int, msg_prio, - const struct compat_timespec __user *, u_abs_timeout) + const struct old_timespec32 __user *, u_abs_timeout) { struct timespec64 ts, *p = NULL; if (u_abs_timeout) { @@ -1489,7 +1489,7 @@ COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, compat_size_t, msg_len, unsigned int __user *, u_msg_prio, - const struct compat_timespec __user *, u_abs_timeout) + const struct old_timespec32 __user *, u_abs_timeout) { struct timespec64 ts, *p = NULL; if (u_abs_timeout) { diff --git a/ipc/msg.c b/ipc/msg.c index 883642cf2b27..0833c6405915 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -622,9 +622,9 @@ struct compat_msqid_ds { struct compat_ipc_perm msg_perm; compat_uptr_t msg_first; compat_uptr_t msg_last; - compat_time_t msg_stime; - compat_time_t msg_rtime; - compat_time_t msg_ctime; + old_time32_t msg_stime; + old_time32_t msg_rtime; + old_time32_t msg_ctime; compat_ulong_t msg_lcbytes; compat_ulong_t msg_lqbytes; unsigned short msg_cbytes; diff --git a/ipc/sem.c b/ipc/sem.c index 26f8e37fcdcb..745dc6187e84 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1698,8 +1698,8 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) struct compat_semid_ds { struct compat_ipc_perm sem_perm; - compat_time_t sem_otime; - compat_time_t sem_ctime; + old_time32_t sem_otime; + old_time32_t sem_ctime; compat_uptr_t sem_base; compat_uptr_t sem_pending; compat_uptr_t sem_pending_last; @@ -2214,11 +2214,11 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, #ifdef CONFIG_COMPAT_32BIT_TIME long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, - const struct compat_timespec __user *timeout) + const struct old_timespec32 __user *timeout) { if (timeout) { struct timespec64 ts; - if (compat_get_timespec64(&ts, timeout)) + if (get_old_timespec32(&ts, timeout)) return -EFAULT; return do_semtimedop(semid, tsems, nsops, &ts); } @@ -2227,7 +2227,7 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems, unsigned int, nsops, - const struct compat_timespec __user *, timeout) + const struct old_timespec32 __user *, timeout) { return compat_ksys_semtimedop(semid, tsems, nsops, timeout); } diff --git a/ipc/shm.c b/ipc/shm.c index b0eb3757ab89..2657692199eb 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1201,9 +1201,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) struct compat_shmid_ds { struct compat_ipc_perm shm_perm; int shm_segsz; - compat_time_t shm_atime; - compat_time_t shm_dtime; - compat_time_t shm_ctime; + old_time32_t shm_atime; + old_time32_t shm_dtime; + old_time32_t shm_ctime; compat_ipc_pid_t shm_cpid; compat_ipc_pid_t shm_lpid; unsigned short shm_nattch; diff --git a/ipc/syscall.c b/ipc/syscall.c index 65d405f1ba0c..1ac06e3983c0 100644 --- a/ipc/syscall.c +++ b/ipc/syscall.c @@ -35,7 +35,7 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, (const struct __kernel_timespec __user *)fifth); else if (IS_ENABLED(CONFIG_COMPAT_32BIT_TIME)) return compat_ksys_semtimedop(first, ptr, second, - (const struct compat_timespec __user *)fifth); + (const struct old_timespec32 __user *)fifth); else return -ENOSYS; diff --git a/ipc/util.h b/ipc/util.h index 0a159f69b3bb..1ee81bce25e9 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -266,7 +266,7 @@ long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); /* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, - const struct compat_timespec __user *timeout); + const struct old_timespec32 __user *timeout); #ifdef CONFIG_COMPAT long compat_ksys_semctl(int semid, int semnum, int cmd, int arg); long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr); diff --git a/kernel/compat.c b/kernel/compat.c index 8e40efc2928a..089d00d0da9c 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -93,28 +93,28 @@ int compat_put_timex(struct compat_timex __user *utp, const struct timex *txc) return 0; } -static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) +static int __compat_get_timeval(struct timeval *tv, const struct old_timeval32 __user *ctv) { return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || __get_user(tv->tv_sec, &ctv->tv_sec) || __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } -static int __compat_put_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) +static int __compat_put_timeval(const struct timeval *tv, struct old_timeval32 __user *ctv) { return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || __put_user(tv->tv_sec, &ctv->tv_sec) || __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } -static int __compat_get_timespec(struct timespec *ts, const struct compat_timespec __user *cts) +static int __compat_get_timespec(struct timespec *ts, const struct old_timespec32 __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } -static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts) +static int __compat_put_timespec(const struct timespec *ts, struct old_timespec32 __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 83f830acbb5f..410a77a8f6e2 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -173,7 +173,7 @@ err_unlock: } COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, - struct compat_timespec __user *, utime, u32 __user *, uaddr2, + struct old_timespec32 __user *, utime, u32 __user *, uaddr2, u32, val3) { struct timespec ts; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 625bc9897f62..8287b75ed961 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5257,13 +5257,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, compat_pid_t, pid, - struct compat_timespec __user *, interval) + struct old_timespec32 __user *, interval) { struct timespec64 t; int retval = sched_rr_get_interval(pid, &t); if (retval == 0) - retval = compat_put_timespec64(&t, interval); + retval = put_old_timespec32(&t, interval); return retval; } #endif diff --git a/kernel/signal.c b/kernel/signal.c index 5843c541fda9..a4db724e14c1 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3173,7 +3173,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, struct compat_siginfo __user *, uinfo, - struct compat_timespec __user *, uts, compat_size_t, sigsetsize) + struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) { sigset_t s; struct timespec t; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index e1a549c9e399..9cdd74bd2d27 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1660,7 +1660,7 @@ int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) switch(restart->nanosleep.type) { #ifdef CONFIG_COMPAT_32BIT_TIME case TT_COMPAT: - if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp)) + if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp)) return -EFAULT; break; #endif @@ -1780,12 +1780,12 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp, #ifdef CONFIG_COMPAT_32BIT_TIME -COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, - struct compat_timespec __user *, rmtp) +COMPAT_SYSCALL_DEFINE2(nanosleep, struct old_timespec32 __user *, rqtp, + struct old_timespec32 __user *, rmtp) { struct timespec64 tu; - if (compat_get_timespec64(&tu, rqtp)) + if (get_old_timespec32(&tu, rqtp)) return -EFAULT; if (!timespec64_valid(&tu)) diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c index 2c6847d5d69b..989ccf028bde 100644 --- a/kernel/time/posix-stubs.c +++ b/kernel/time/posix-stubs.c @@ -162,20 +162,20 @@ COMPAT_SYS_NI(setitimer); #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, - struct compat_timespec __user *, tp) + struct old_timespec32 __user *, tp) { struct timespec64 new_tp; if (which_clock != CLOCK_REALTIME) return -EINVAL; - if (compat_get_timespec64(&new_tp, tp)) + if (get_old_timespec32(&new_tp, tp)) return -EFAULT; return do_sys_settimeofday64(&new_tp, NULL); } COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, - struct compat_timespec __user *, tp) + struct old_timespec32 __user *, tp) { int ret; struct timespec64 kernel_tp; @@ -184,13 +184,13 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, if (ret) return ret; - if (compat_put_timespec64(&kernel_tp, tp)) + if (put_old_timespec32(&kernel_tp, tp)) return -EFAULT; return 0; } COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, - struct compat_timespec __user *, tp) + struct old_timespec32 __user *, tp) { struct timespec64 rtn_tp = { .tv_sec = 0, @@ -201,7 +201,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_BOOTTIME: - if (compat_put_timespec64(&rtn_tp, tp)) + if (put_old_timespec32(&rtn_tp, tp)) return -EFAULT; return 0; default: @@ -210,8 +210,8 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, } COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, - struct compat_timespec __user *, rqtp, - struct compat_timespec __user *, rmtp) + struct old_timespec32 __user *, rqtp, + struct old_timespec32 __user *, rmtp) { struct timespec64 t; @@ -224,7 +224,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, return -EINVAL; } - if (compat_get_timespec64(&t, rqtp)) + if (get_old_timespec32(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 4b9127e95430..3e71921668ba 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -755,13 +755,13 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, - struct compat_itimerspec __user *, setting) + struct old_itimerspec32 __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { - if (put_compat_itimerspec64(&cur_setting, setting)) + if (put_old_itimerspec32(&cur_setting, setting)) ret = -EFAULT; } return ret; @@ -928,8 +928,8 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, - struct compat_itimerspec __user *, new, - struct compat_itimerspec __user *, old) + struct old_itimerspec32 __user *, new, + struct old_itimerspec32 __user *, old) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old ? &old_spec : NULL; @@ -937,12 +937,12 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, if (!new) return -EINVAL; - if (get_compat_itimerspec64(&new_spec, new)) + if (get_old_itimerspec32(&new_spec, new)) return -EFAULT; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old) { - if (put_compat_itimerspec64(&old_spec, old)) + if (put_old_itimerspec32(&old_spec, old)) error = -EFAULT; } return error; @@ -1115,7 +1115,7 @@ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock, - struct compat_timespec __user *, tp) + struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; @@ -1123,14 +1123,14 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock, if (!kc || !kc->clock_set) return -EINVAL; - if (compat_get_timespec64(&ts, tp)) + if (get_old_timespec32(&ts, tp)) return -EFAULT; return kc->clock_set(which_clock, &ts); } COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, - struct compat_timespec __user *, tp) + struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; @@ -1141,7 +1141,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, err = kc->clock_get(which_clock, &ts); - if (!err && compat_put_timespec64(&ts, tp)) + if (!err && put_old_timespec32(&ts, tp)) err = -EFAULT; return err; @@ -1180,7 +1180,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock, #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, - struct compat_timespec __user *, tp) + struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; @@ -1190,7 +1190,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, return -EINVAL; err = kc->clock_getres(which_clock, &ts); - if (!err && tp && compat_put_timespec64(&ts, tp)) + if (!err && tp && put_old_timespec32(&ts, tp)) return -EFAULT; return err; @@ -1237,8 +1237,8 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, - struct compat_timespec __user *, rqtp, - struct compat_timespec __user *, rmtp) + struct old_timespec32 __user *, rqtp, + struct old_timespec32 __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; @@ -1248,7 +1248,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, if (!kc->nsleep) return -EOPNOTSUPP; - if (compat_get_timespec64(&t, rqtp)) + if (get_old_timespec32(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) diff --git a/kernel/time/time.c b/kernel/time/time.c index de332250d6fa..f1983f468fe3 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -104,12 +104,12 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr) #ifdef CONFIG_COMPAT #ifdef __ARCH_WANT_COMPAT_SYS_TIME -/* compat_time_t is a 32 bit "long" and needs to get converted. */ -COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc) +/* old_time32_t is a 32 bit "long" and needs to get converted. */ +COMPAT_SYSCALL_DEFINE1(time, old_time32_t __user *, tloc) { - compat_time_t i; + old_time32_t i; - i = (compat_time_t)ktime_get_real_seconds(); + i = (old_time32_t)ktime_get_real_seconds(); if (tloc) { if (put_user(i,tloc)) @@ -119,7 +119,7 @@ COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc) return i; } -COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr) +COMPAT_SYSCALL_DEFINE1(stime, old_time32_t __user *, tptr) { struct timespec64 tv; int err; @@ -225,7 +225,7 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, } #ifdef CONFIG_COMPAT -COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv, +COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { @@ -244,7 +244,7 @@ COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv, return 0; } -COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv, +COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec64 new_ts; @@ -863,10 +863,10 @@ int put_timespec64(const struct timespec64 *ts, } EXPORT_SYMBOL_GPL(put_timespec64); -int __compat_get_timespec64(struct timespec64 *ts64, - const struct compat_timespec __user *cts) +int __get_old_timespec32(struct timespec64 *ts64, + const struct old_timespec32 __user *cts) { - struct compat_timespec ts; + struct old_timespec32 ts; int ret; ret = copy_from_user(&ts, cts, sizeof(ts)); @@ -879,33 +879,33 @@ int __compat_get_timespec64(struct timespec64 *ts64, return 0; } -int __compat_put_timespec64(const struct timespec64 *ts64, - struct compat_timespec __user *cts) +int __put_old_timespec32(const struct timespec64 *ts64, + struct old_timespec32 __user *cts) { - struct compat_timespec ts = { + struct old_timespec32 ts = { .tv_sec = ts64->tv_sec, .tv_nsec = ts64->tv_nsec }; return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0; } -int compat_get_timespec64(struct timespec64 *ts, const void __user *uts) +int get_old_timespec32(struct timespec64 *ts, const void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0; else - return __compat_get_timespec64(ts, uts); + return __get_old_timespec32(ts, uts); } -EXPORT_SYMBOL_GPL(compat_get_timespec64); +EXPORT_SYMBOL_GPL(get_old_timespec32); -int compat_put_timespec64(const struct timespec64 *ts, void __user *uts) +int put_old_timespec32(const struct timespec64 *ts, void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0; else - return __compat_put_timespec64(ts, uts); + return __put_old_timespec32(ts, uts); } -EXPORT_SYMBOL_GPL(compat_put_timespec64); +EXPORT_SYMBOL_GPL(put_old_timespec32); int get_itimerspec64(struct itimerspec64 *it, const struct __kernel_itimerspec __user *uit) @@ -937,23 +937,23 @@ int put_itimerspec64(const struct itimerspec64 *it, } EXPORT_SYMBOL_GPL(put_itimerspec64); -int get_compat_itimerspec64(struct itimerspec64 *its, - const struct compat_itimerspec __user *uits) +int get_old_itimerspec32(struct itimerspec64 *its, + const struct old_itimerspec32 __user *uits) { - if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) || - __compat_get_timespec64(&its->it_value, &uits->it_value)) + if (__get_old_timespec32(&its->it_interval, &uits->it_interval) || + __get_old_timespec32(&its->it_value, &uits->it_value)) return -EFAULT; return 0; } -EXPORT_SYMBOL_GPL(get_compat_itimerspec64); +EXPORT_SYMBOL_GPL(get_old_itimerspec32); -int put_compat_itimerspec64(const struct itimerspec64 *its, - struct compat_itimerspec __user *uits) +int put_old_itimerspec32(const struct itimerspec64 *its, + struct old_itimerspec32 __user *uits) { - if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) || - __compat_put_timespec64(&its->it_value, &uits->it_value)) + if (__put_old_timespec32(&its->it_interval, &uits->it_interval) || + __put_old_timespec32(&its->it_value, &uits->it_value)) return -EFAULT; return 0; } -EXPORT_SYMBOL_GPL(put_compat_itimerspec64); +EXPORT_SYMBOL_GPL(put_old_itimerspec32); diff --git a/net/compat.c b/net/compat.c index 3b2105f6549d..2ef160876bc1 100644 --- a/net/compat.c +++ b/net/compat.c @@ -812,7 +812,7 @@ COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len static int __compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, - struct compat_timespec __user *timeout) + struct old_timespec32 __user *timeout) { int datagrams; struct timespec ktspec; @@ -834,7 +834,7 @@ static int __compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, - struct compat_timespec __user *, timeout) + struct old_timespec32 __user *, timeout) { return __compat_sys_recvmmsg(fd, mmsg, vlen, flags, timeout); } -- cgit v1.2.3 From e06fa9c16ce4b740996189fa5610eabcee734e6c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 24 Aug 2018 22:08:50 +0200 Subject: bpf, sockmap: fix potential use after free in bpf_tcp_close bpf_tcp_close() we pop the psock linkage to a map via psock_map_pop(). A parallel update on the sock hash map can happen between psock_map_pop() and lookup_elem_raw() where we override the element under link->hash / link->key. In bpf_tcp_close()'s lookup_elem_raw() we subsequently only test whether an element is present, but we do not test whether the element is infact the element we were looking for. We lock the sock in bpf_tcp_close() during that time, so do we hold the lock in sock_hash_update_elem(). However, the latter locks the sock which is newly updated, not the one we're purging from the hash table. This means that while one CPU is doing the lookup from bpf_tcp_close(), another CPU is doing the map update in parallel, dropped our sock from the hlist and released the psock. Subsequently the first CPU will find the new sock and attempts to drop and release the old sock yet another time. Fix is that we need to check the elements for a match after lookup, similar as we do in the sock map. Note that the hash tab elems are freed via RCU, so access to their link->hash / link->key is fine since we're under RCU read side there. Fixes: e9db4ef6bf4c ("bpf: sockhash fix omitted bucket lock in sock_close") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- kernel/bpf/sockmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index cf5195c7c331..01879e4d599a 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout) /* If another thread deleted this object skip deletion. * The refcnt on psock may or may not be zero. */ - if (l) { + if (l && l == link) { hlist_del_rcu(&link->hash_node); smap_release_sock(psock, link->sk); free_htab_elem(htab, link); -- cgit v1.2.3 From 15c480efab01197c965ce0562a43ffedd852b8f9 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 24 Aug 2018 22:08:51 +0200 Subject: bpf, sockmap: fix psock refcount leak in bpf_tcp_recvmsg In bpf_tcp_recvmsg() we first took a reference on the psock, however once we find that there are skbs in the normal socket's receive queue we return with processing them through tcp_recvmsg(). Problem is that we leak the taken reference on the psock in that path. Given we don't really do anything with the psock at this point, move the skb_queue_empty() test before we fetch the psock to fix this case. Fixes: 8934ce2fd081 ("bpf: sockmap redirect ingress support") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- kernel/bpf/sockmap.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 01879e4d599a..26d8a3053407 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -912,6 +912,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); + if (!skb_queue_empty(&sk->sk_receive_queue)) + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); rcu_read_lock(); psock = smap_psock_sk(sk); @@ -922,9 +924,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, goto out; rcu_read_unlock(); - if (!skb_queue_empty(&sk->sk_receive_queue)) - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); - lock_sock(sk); bytes_ready: while (copied != len) { -- cgit v1.2.3 From 501ca81760c204ec59b73e4a00bee5971fc0f1b1 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Fri, 24 Aug 2018 17:37:00 -0700 Subject: bpf: sockmap, decrement copied count correctly in redirect error case Currently, when a redirect occurs in sockmap and an error occurs in the redirect call we unwind the scatterlist once in the error path of bpf_tcp_sendmsg_do_redirect() and then again in sendmsg(). Then in the error path of sendmsg we decrement the copied count by the send size. However, its possible we partially sent data before the error was generated. This can happen if do_tcp_sendpages() partially sends the scatterlist before encountering a memory pressure error. If this happens we need to decrement the copied value (the value tracking how many bytes were actually sent to TCP stack) by the number of remaining bytes _not_ the entire send size. Otherwise we risk confusing userspace. Also we don't need two calls to free the scatterlist one is good enough. So remove the one in bpf_tcp_sendmsg_do_redirect() and then properly reduce copied by the number of remaining bytes which may in fact be the entire send size if no bytes were sent. To do this use bool to indicate if free_start_sg() should do mem accounting or not. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 26d8a3053407..ce63e5801746 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk) } static void smap_release_sock(struct smap_psock *psock, struct sock *sock); -static int free_start_sg(struct sock *sk, struct sk_msg_buff *md); +static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge); static void bpf_tcp_release(struct sock *sk) { @@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk) goto out; if (psock->cork) { - free_start_sg(psock->sock, psock->cork); + free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); psock->cork = NULL; } @@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout) close_fun = psock->save_close; if (psock->cork) { - free_start_sg(psock->sock, psock->cork); + free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); psock->cork = NULL; } list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { list_del(&md->list); - free_start_sg(psock->sock, md); + free_start_sg(psock->sock, md, true); kfree(md); } @@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes, md->sg_start = i; } -static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) +static int free_sg(struct sock *sk, int start, + struct sk_msg_buff *md, bool charge) { struct scatterlist *sg = md->sg_data; int i = start, free = 0; while (sg[i].length) { free += sg[i].length; - sk_mem_uncharge(sk, sg[i].length); + if (charge) + sk_mem_uncharge(sk, sg[i].length); if (!md->skb) put_page(sg_page(&sg[i])); sg[i].length = 0; @@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) return free; } -static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) +static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge) { - int free = free_sg(sk, md->sg_start, md); + int free = free_sg(sk, md->sg_start, md, charge); md->sg_start = md->sg_end; return free; @@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) { - return free_sg(sk, md->sg_curr, md); + return free_sg(sk, md->sg_curr, md, true); } static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) @@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, list_add_tail(&r->list, &psock->ingress); sk->sk_data_ready(sk); } else { - free_start_sg(sk, r); + free_start_sg(sk, r, true); kfree(r); } @@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, release_sock(sk); } smap_release_sock(psock, sk); - if (unlikely(err)) - goto out; - return 0; + return err; out_rcu: rcu_read_unlock(); -out: - free_bytes_sg(NULL, send, md, false); - return err; + return 0; } static inline void bpf_md_init(struct smap_psock *psock) @@ -822,7 +820,7 @@ more_data: case __SK_PASS: err = bpf_tcp_push(sk, send, m, flags, true); if (unlikely(err)) { - *copied -= free_start_sg(sk, m); + *copied -= free_start_sg(sk, m, true); break; } @@ -845,16 +843,17 @@ more_data: lock_sock(sk); if (unlikely(err < 0)) { - free_start_sg(sk, m); + int free = free_start_sg(sk, m, false); + psock->sg_size = 0; if (!cork) - *copied -= send; + *copied -= free; } else { psock->sg_size -= send; } if (cork) { - free_start_sg(sk, m); + free_start_sg(sk, m, true); psock->sg_size = 0; kfree(m); m = NULL; @@ -1121,7 +1120,7 @@ wait_for_memory: err = sk_stream_wait_memory(sk, &timeo); if (err) { if (m && m != psock->cork) - free_start_sg(sk, m); + free_start_sg(sk, m, true); goto out_err; } } @@ -1580,13 +1579,13 @@ static void smap_gc_work(struct work_struct *w) bpf_prog_put(psock->bpf_tx_msg); if (psock->cork) { - free_start_sg(psock->sock, psock->cork); + free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); } list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { list_del(&md->list); - free_start_sg(psock->sock, md); + free_start_sg(psock->sock, md, true); kfree(md); } -- cgit v1.2.3 From 743f5cdb6cec33d2300922f6b1b1670a572595ad Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Wed, 29 Aug 2018 20:50:52 +0800 Subject: y2038: __get_old_timespec32() can be static The kbuild test robot reports two new warnings with the previous patch: kernel/time/time.c:866:5: sparse: symbol '__get_old_timespec32' was not declared. Should it be static? kernel/time/time.c:882:5: sparse: symbol '__put_old_timespec32' was not declared. Should it be static? These are actually older bugs, but came up now after the symbol got renamed. Fortunately, commit afef05cf238c ("time: Enable get/put_compat_itimerspec64 always") makes the two functions (__compat_get_timespec64/__compat_get_timespec64) local to time.c already, so we can mark them as 'static'. Fixes: ee16c8f415e4 ("y2038: Globally rename compat_time to old_time32") Signed-off-by: kbuild test robot [arnd: added changelog text] Signed-off-by: Arnd Bergmann --- kernel/time/time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/time.c b/kernel/time/time.c index f1983f468fe3..e3a7f7fd3abc 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -863,7 +863,7 @@ int put_timespec64(const struct timespec64 *ts, } EXPORT_SYMBOL_GPL(put_timespec64); -int __get_old_timespec32(struct timespec64 *ts64, +static int __get_old_timespec32(struct timespec64 *ts64, const struct old_timespec32 __user *cts) { struct old_timespec32 ts; @@ -879,7 +879,7 @@ int __get_old_timespec32(struct timespec64 *ts64, return 0; } -int __put_old_timespec32(const struct timespec64 *ts64, +static int __put_old_timespec32(const struct timespec64 *ts64, struct old_timespec32 __user *cts) { struct old_timespec32 ts = { -- cgit v1.2.3 From 474b9c777b20b8340a6ee0f7ba6ebbd6a4bf47e2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 17 Apr 2018 21:59:47 +0200 Subject: y2038: sched: Change sched_rr_get_interval to use __kernel_timespec This is a preparation patch for converting sys_sched_rr_get_interval to work with 64-bit time_t on 32-bit architectures. The 'interval' argument is changed to struct __kernel_timespec, which will be redefined using 64-bit time_t in the future. The compat version of the system call in turn is enabled for compilation with CONFIG_COMPAT_32BIT_TIME so the individual 32-bit architectures can share the handling of the traditional argument with 64-bit architectures providing it for their compat mode. Signed-off-by: Arnd Bergmann --- include/linux/syscalls.h | 2 +- kernel/sched/core.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 95e795fb0593..5642016a312d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -614,7 +614,7 @@ asmlinkage long sys_sched_yield(void); asmlinkage long sys_sched_get_priority_max(int policy); asmlinkage long sys_sched_get_priority_min(int policy); asmlinkage long sys_sched_rr_get_interval(pid_t pid, - struct timespec __user *interval); + struct __kernel_timespec __user *interval); /* kernel/signal.c */ asmlinkage long sys_restart_syscall(void); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8287b75ed961..39af2bec2b39 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5243,7 +5243,7 @@ out_unlock: * an error code. */ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, - struct timespec __user *, interval) + struct __kernel_timespec __user *, interval) { struct timespec64 t; int retval = sched_rr_get_interval(pid, &t); @@ -5254,7 +5254,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, return retval; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, compat_pid_t, pid, struct old_timespec32 __user *, interval) -- cgit v1.2.3 From 49c39f8464a9af702e9d45800c00a572753aeb06 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 18 Apr 2018 15:56:13 +0200 Subject: y2038: signal: Change rt_sigtimedwait to use __kernel_timespec This changes sys_rt_sigtimedwait() to use get_timespec64(), changing the timeout type to __kernel_timespec, which will be changed to use a 64-bit time_t in the future. Since the do_sigtimedwait() core function changes, we also have to modify the compat version of this system call in the same way. Signed-off-by: Arnd Bergmann --- include/linux/syscalls.h | 2 +- kernel/signal.c | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index fb3a05fab8aa..2ac3d13a915b 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -635,7 +635,7 @@ asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set, asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize); asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, siginfo_t __user *uinfo, - const struct timespec __user *uts, + const struct __kernel_timespec __user *uts, size_t sigsetsize); asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); diff --git a/kernel/signal.c b/kernel/signal.c index a4db724e14c1..0831d56a731a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3082,7 +3082,7 @@ int copy_siginfo_from_user32(struct siginfo *to, * @ts: upper bound on process time suspension */ static int do_sigtimedwait(const sigset_t *which, siginfo_t *info, - const struct timespec *ts) + const struct timespec64 *ts) { ktime_t *to = NULL, timeout = KTIME_MAX; struct task_struct *tsk = current; @@ -3090,9 +3090,9 @@ static int do_sigtimedwait(const sigset_t *which, siginfo_t *info, int sig, ret = 0; if (ts) { - if (!timespec_valid(ts)) + if (!timespec64_valid(ts)) return -EINVAL; - timeout = timespec_to_ktime(*ts); + timeout = timespec64_to_ktime(*ts); to = &timeout; } @@ -3140,11 +3140,12 @@ static int do_sigtimedwait(const sigset_t *which, siginfo_t *info, * @sigsetsize: size of sigset_t type */ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, - siginfo_t __user *, uinfo, const struct timespec __user *, uts, + siginfo_t __user *, uinfo, + const struct __kernel_timespec __user *, uts, size_t, sigsetsize) { sigset_t these; - struct timespec ts; + struct timespec64 ts; siginfo_t info; int ret; @@ -3156,7 +3157,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, return -EFAULT; if (uts) { - if (copy_from_user(&ts, uts, sizeof(ts))) + if (get_timespec64(&ts, uts)) return -EFAULT; } @@ -3176,7 +3177,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) { sigset_t s; - struct timespec t; + struct timespec64 t; siginfo_t info; long ret; @@ -3187,7 +3188,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, return -EFAULT; if (uts) { - if (compat_get_timespec(&t, uts)) + if (get_old_timespec32(&t, uts)) return -EFAULT; } -- cgit v1.2.3 From efbaec89c642cd1d4977fc6df9923697e1598d4e Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 28 Aug 2018 07:42:32 +0000 Subject: bpf: remove duplicated include from syscall.c Remove duplicated include. Signed-off-by: YueHaibing Signed-off-by: Daniel Borkmann --- kernel/bpf/syscall.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8339d81cba1d..3c9636f03bb2 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ -- cgit v1.2.3 From 444da518fd554eb1b9875dc97fac6ec249cee330 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Jul 2018 14:14:42 -0700 Subject: rcutorture: Force occasional reader waits Deferred quiescent states can interact with the scheduler, but rcu_torture_reader() does not force such interaction all that frequently. This commit therefore blocks for one jiffy after ten jiffies of read-side runtime. This has the beneficial effect of being most likely to block just after long-running readers, and it is exactly these readers that are most likely to have been preempted (in CONFIG_PREEMPT=y kernels). This in turn helps increase the probability that a deferred quiescent state will be seen by RCU's context-switch hooks. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index c596c6f1e457..50a4f0ed4ebf 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1387,6 +1387,7 @@ static void rcu_torture_timer(struct timer_list *unused) static int rcu_torture_reader(void *arg) { + unsigned long lastsleep = jiffies; DEFINE_TORTURE_RANDOM(rand); struct timer_list t; @@ -1402,6 +1403,10 @@ rcu_torture_reader(void *arg) } if (!rcu_torture_one_read(&rand)) schedule_timeout_interruptible(HZ); + if (time_after(jiffies, lastsleep)) { + schedule_timeout_interruptible(1); + lastsleep = jiffies + 10; + } stutter_wait("rcu_torture_reader"); } while (!torture_must_stop()); if (irqreader && cur_ops->irq_capable) { -- cgit v1.2.3 From e746b558572efbad250e35e582a32ecabc9e9316 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 17:35:22 -0700 Subject: rcutorture: Warn on bad torture type for built-in tests When running a built-in rcutorture test, specifying an invalid torture type results in what looks like a hard hang, with the error messages hidden by other boot-time output. This commit therefore executes a WARN_ON() in this case so that the splat appears just following the error messages. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 50a4f0ed4ebf..5df2411f7aee 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1968,6 +1968,7 @@ rcu_torture_init(void) for (i = 0; i < ARRAY_SIZE(torture_ops); i++) pr_cont(" %s", torture_ops[i]->name); pr_cont("\n"); + WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); firsterr = -EINVAL; goto unwind; } -- cgit v1.2.3 From f0288064425ff9a5e05c8c0fdba6ec7681dd3330 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:26:50 -0700 Subject: rcuperf: Warn on bad perf type for built-in tests When running a built-in rcuperf test, specifying an invalid perf type results in what looks like a hard hang, with the error messages hidden by other boot-time output. This commit therefore executes a WARN_ON() in this case so that the splat appears just following the error messages. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuperf.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 34244523550e..832ce68fd45f 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -680,6 +680,7 @@ rcu_perf_init(void) for (i = 0; i < ARRAY_SIZE(perf_ops); i++) pr_cont(" %s", perf_ops[i]->name); pr_cont("\n"); + WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST)); firsterr = -EINVAL; goto unwind; } -- cgit v1.2.3 From 1b27291b1ea4f1f2090fb07c3425db474cdb99ba Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Jul 2018 14:32:31 -0700 Subject: rcutorture: Add forward-progress tests for RCU grace periods This commit adds a kthread that loops going into and out of RCU read-side critical sections, but also including a cond_resched(), optionally guarded by a check of need_resched(), in that same loop. This commit relies solely on rcu_torture_writer() progress to judge the forward progress of grace periods. Note that Tasks RCU and SRCU are exempted from forward-progress testing due their (intentionally) less-robust forward-progress guarantees. Signed-off-by: Paul E. McKenney --- include/linux/rcutiny.h | 1 + kernel/rcu/rcutorture.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++- kernel/rcu/update.c | 1 + 3 files changed, 74 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 8d9a0ea8f0b5..a6353f3d6094 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -108,6 +108,7 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) */ static inline void rcu_virt_note_context_switch(int cpu) { } static inline void rcu_cpu_stall_reset(void) { } +static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } static inline void rcu_idle_enter(void) { } static inline void rcu_idle_exit(void) { } static inline void rcu_irq_enter(void) { } diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 5df2411f7aee..fd3ce6cc8eea 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -89,6 +89,12 @@ torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); +torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); +torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); +torture_param(int, fwd_progress_holdoff, 60, + "Time between forward-progress tests (s)"); +torture_param(bool, fwd_progress_need_resched, 1, + "Hide cond_resched() behind need_resched()"); torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); torture_param(bool, gp_normal, false, @@ -137,6 +143,7 @@ static struct task_struct **cbflood_task; static struct task_struct *fqs_task; static struct task_struct *boost_tasks[NR_CPUS]; static struct task_struct *stall_task; +static struct task_struct *fwd_prog_task; static struct task_struct **barrier_cbs_tasks; static struct task_struct *barrier_task; @@ -291,6 +298,7 @@ struct rcu_torture_ops { void (*cb_barrier)(void); void (*fqs)(void); void (*stats)(void); + int (*stall_dur)(void); int irq_capable; int can_boost; int extendables; @@ -429,6 +437,7 @@ static struct rcu_torture_ops rcu_ops = { .cb_barrier = rcu_barrier, .fqs = rcu_force_quiescent_state, .stats = NULL, + .stall_dur = rcu_jiffies_till_stall_check, .irq_capable = 1, .can_boost = rcu_can_boost(), .name = "rcu" @@ -1116,7 +1125,8 @@ rcu_torture_writer(void *arg) break; } } - rcu_torture_current_version++; + WRITE_ONCE(rcu_torture_current_version, + rcu_torture_current_version + 1); /* Cycle through nesting levels of rcu_expedite_gp() calls. */ if (can_expedite && !(torture_random(&rand) & 0xff & (!!expediting - 1))) { @@ -1660,6 +1670,63 @@ static int __init rcu_torture_stall_init(void) return torture_create_kthread(rcu_torture_stall, NULL, stall_task); } +/* Carry out grace-period forward-progress testing. */ +static int rcu_torture_fwd_prog(void *args) +{ + unsigned long cvar; + int idx; + unsigned long stopat; + bool tested = false; + + VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); + do { + schedule_timeout_interruptible(fwd_progress_holdoff * HZ); + cvar = READ_ONCE(rcu_torture_current_version); + stopat = jiffies + cur_ops->stall_dur() / fwd_progress_div; + while (time_before(jiffies, stopat) && !torture_must_stop()) { + idx = cur_ops->readlock(); + udelay(10); + cur_ops->readunlock(idx); + if (!fwd_progress_need_resched || need_resched()) + cond_resched(); + } + if (!time_before(jiffies, stopat) && !torture_must_stop()) { + tested = true; + WARN_ON_ONCE(cvar == + READ_ONCE(rcu_torture_current_version)); + } + /* Avoid slow periods, better to test when busy. */ + stutter_wait("rcu_torture_fwd_prog"); + } while (!torture_must_stop()); + WARN_ON(!tested); + torture_kthread_stopping("rcu_torture_fwd_prog"); + return 0; +} + +/* If forward-progress checking is requested and feasible, spawn the thread. */ +static int __init rcu_torture_fwd_prog_init(void) +{ + if (!fwd_progress) + return 0; /* Not requested, so don't do it. */ + if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) { + VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); + return 0; + } + if (stall_cpu > 0) { + VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); + if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) + return -EINVAL; /* In module, can fail back to user. */ + WARN_ON(1); /* Make sure rcutorture notices conflict. */ + return 0; + } + if (fwd_progress_holdoff <= 0) + fwd_progress_holdoff = 1; + if (fwd_progress_div <= 0) + fwd_progress_div = 4; + return torture_create_kthread(rcu_torture_fwd_prog, + NULL, fwd_prog_task); +} + /* Callback function for RCU barrier testing. */ static void rcu_torture_barrier_cbf(struct rcu_head *rcu) { @@ -1833,6 +1900,7 @@ rcu_torture_cleanup(void) } rcu_torture_barrier_cleanup(); + torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); torture_stop_kthread(rcu_torture_stall, stall_task); torture_stop_kthread(rcu_torture_writer, writer_task); @@ -2104,6 +2172,9 @@ rcu_torture_init(void) if (firsterr) goto unwind; firsterr = rcu_torture_stall_init(); + if (firsterr) + goto unwind; + firsterr = rcu_torture_fwd_prog_init(); if (firsterr) goto unwind; firsterr = rcu_torture_barrier_init(); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 39cb23d22109..a6b860422d18 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -472,6 +472,7 @@ int rcu_jiffies_till_stall_check(void) } return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; } +EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); void rcu_sysrq_start(void) { -- cgit v1.2.3 From 119248bec9d318ae41da8ab8f400f07e7a610cc3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Jul 2018 15:39:37 -0700 Subject: rcutorture: Also use GP sequence to judge forward progress Currently, rcutorture relies solely on the progress of rcu_torture_writer() to judge grace-period forward progress. In theory, this is the gold standard of forward progress, but in practice rcutorture separately detects and reports rcu_torture_writer() stalls. This commit therefore adds the grace-period sequence number (when provided) to the judgment of grace-period forward progress, which makes it easier to distinguish between failure of actual grace periods to progress on the one hand and downstream forward-progress failures on the other. For example, given this change, if rcu_torture_writer() stalls, but rcu_torture_fwd_prog() does not complain, then the grace-period computation is working, which is a hint that the failure lies in callback processing, wakeup of the rcu_torture_writer() kthread, or similar. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index fd3ce6cc8eea..dee7b45b2186 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1673,7 +1673,8 @@ static int __init rcu_torture_stall_init(void) /* Carry out grace-period forward-progress testing. */ static int rcu_torture_fwd_prog(void *args) { - unsigned long cvar; + unsigned long cver; + unsigned long gps; int idx; unsigned long stopat; bool tested = false; @@ -1681,7 +1682,8 @@ static int rcu_torture_fwd_prog(void *args) VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); do { schedule_timeout_interruptible(fwd_progress_holdoff * HZ); - cvar = READ_ONCE(rcu_torture_current_version); + cver = READ_ONCE(rcu_torture_current_version); + gps = cur_ops->get_gp_seq(); stopat = jiffies + cur_ops->stall_dur() / fwd_progress_div; while (time_before(jiffies, stopat) && !torture_must_stop()) { idx = cur_ops->readlock(); @@ -1692,8 +1694,9 @@ static int rcu_torture_fwd_prog(void *args) } if (!time_before(jiffies, stopat) && !torture_must_stop()) { tested = true; - WARN_ON_ONCE(cvar == - READ_ONCE(rcu_torture_current_version)); + cver = cver == READ_ONCE(rcu_torture_current_version); + gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); + WARN_ON_ONCE(cver && gps < 2); } /* Avoid slow periods, better to test when busy. */ stutter_wait("rcu_torture_fwd_prog"); -- cgit v1.2.3 From 152f4afbfd58f8ada7591113129aa6ba7fe114c5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Jul 2018 10:57:58 -0700 Subject: rcutorture: Avoid no-test complaint if too few forward-progress tries In a too-short test, random delays can cause each attempt to do forward-progress testing to fail to complete, thus resulting in spurious splats. This commit therefore requires at least five tries before complaining about rcutorture runs that failed to produce at least one valid forward-progress testing attempt. Note that actual forward-progress failures will splat regardless of the number of tries. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index dee7b45b2186..8ab23143c244 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1678,6 +1678,7 @@ static int rcu_torture_fwd_prog(void *args) int idx; unsigned long stopat; bool tested = false; + int tested_tries = 0; VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); do { @@ -1692,6 +1693,7 @@ static int rcu_torture_fwd_prog(void *args) if (!fwd_progress_need_resched || need_resched()) cond_resched(); } + tested_tries++; if (!time_before(jiffies, stopat) && !torture_must_stop()) { tested = true; cver = cver == READ_ONCE(rcu_torture_current_version); @@ -1701,7 +1703,8 @@ static int rcu_torture_fwd_prog(void *args) /* Avoid slow periods, better to test when busy. */ stutter_wait("rcu_torture_fwd_prog"); } while (!torture_must_stop()); - WARN_ON(!tested); + /* Short runs might not contain a valid forward-progress attempt. */ + WARN_ON(!tested && tested_tries >= 5); torture_kthread_stopping("rcu_torture_fwd_prog"); return 0; } -- cgit v1.2.3 From 08a7a2ec68348ebc6d8bf5f20df23815fc0d332b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Jul 2018 13:07:20 -0700 Subject: rcutorture: Vary forward-progress test interval Some of the Linux kernel's RCU implementations provide several mechanisms to promote forward progress that operate over different timeframes. This commit therefore causes rcu_torture_fwd_prog() to vary the duration of its forward-progress testing in order to test each such mechanism. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 8ab23143c244..89cc4d9c9a0c 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1676,16 +1676,21 @@ static int rcu_torture_fwd_prog(void *args) unsigned long cver; unsigned long gps; int idx; + int sd; + int sd4; unsigned long stopat; bool tested = false; int tested_tries = 0; + static DEFINE_TORTURE_RANDOM(trs); VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); do { schedule_timeout_interruptible(fwd_progress_holdoff * HZ); cver = READ_ONCE(rcu_torture_current_version); gps = cur_ops->get_gp_seq(); - stopat = jiffies + cur_ops->stall_dur() / fwd_progress_div; + sd = cur_ops->stall_dur() + 1; + sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; + stopat = jiffies + sd4 + torture_random(&trs) % (sd - sd4); while (time_before(jiffies, stopat) && !torture_must_stop()) { idx = cur_ops->readlock(); udelay(10); -- cgit v1.2.3 From 9fdcb9afe082794c6dcf2b79b3070ef5dafc8a8f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Jul 2018 13:36:00 -0700 Subject: rcutorture: Add self-propagating callback to forward-progress testing If rcutorture is run on a quiet system with the rcutorture.stutter module parameter set high, then there can legitimately be an extended period during which no RCU forward progress takes place. This can result in false-positive no-forward-progress splats. This commit therefore makes rcu_torture_fwd_prog() create a self-propagating RCU callback to ensure that grace periods are in progress for the duration of the forward-progress test. Note that the RCU flavor under test must define ->call(), ->sync(), and ->cb_barrier() for this self-propagating callback to be created. If one or more of those rcu_torture_ops fields are NULL, then the rcu_torture_fwd_prog() function will silently proceed without creating the self-propagating callback. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 89cc4d9c9a0c..316083687fd7 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1670,20 +1670,49 @@ static int __init rcu_torture_stall_init(void) return torture_create_kthread(rcu_torture_stall, NULL, stall_task); } +/* State structure for forward-progress self-propagating RCU callback. */ +struct fwd_cb_state { + struct rcu_head rh; + int stop; +}; + +/* + * Forward-progress self-propagating RCU callback function. Because + * callbacks run from softirq, this function is an implicit RCU read-side + * critical section. + */ +static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) +{ + struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); + + if (READ_ONCE(fcsp->stop)) { + WRITE_ONCE(fcsp->stop, 2); + return; + } + cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); +} + /* Carry out grace-period forward-progress testing. */ static int rcu_torture_fwd_prog(void *args) { unsigned long cver; + struct fwd_cb_state fcs = { .stop = 0 }; unsigned long gps; int idx; int sd; int sd4; + bool selfpropcb = false; unsigned long stopat; bool tested = false; int tested_tries = 0; static DEFINE_TORTURE_RANDOM(trs); VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); + if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { + init_rcu_head_on_stack(&fcs.rh); + cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); + selfpropcb = true; + } do { schedule_timeout_interruptible(fwd_progress_holdoff * HZ); cver = READ_ONCE(rcu_torture_current_version); @@ -1708,6 +1737,13 @@ static int rcu_torture_fwd_prog(void *args) /* Avoid slow periods, better to test when busy. */ stutter_wait("rcu_torture_fwd_prog"); } while (!torture_must_stop()); + if (selfpropcb) { + WRITE_ONCE(fcs.stop, 1); + cur_ops->sync(); /* Wait for running callback to complete. */ + cur_ops->cb_barrier(); /* Wait for queued callbacks. */ + WARN_ON(READ_ONCE(fcs.stop) != 2); + destroy_rcu_head_on_stack(&fcs.rh); + } /* Short runs might not contain a valid forward-progress attempt. */ WARN_ON(!tested && tested_tries >= 5); torture_kthread_stopping("rcu_torture_fwd_prog"); -- cgit v1.2.3 From 3cff54a830f760eafc9c20191ce1d4b8c356d002 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Jul 2018 15:25:57 -0700 Subject: rcutorture: Increase rcu_read_delay() longdelay_ms RCU now takes certain actions 100 and 200 milliseconds into a grace period by default, but rcutorture only runs RCU read-side critical sections with durations up to 50 milliseconds. This commit therefore increases test coverage by increasing the maximum critical-section duration to 300 milliseconds. Note that the existing code automatically dials down the probability of long delays based on the maximum duration, which means that this change should not significantly change the rate of execution of RCU read-side critical sections. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 316083687fd7..b98bb11d47a2 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -323,7 +323,7 @@ static void rcu_read_delay(struct torture_random_state *rrsp) unsigned long started; unsigned long completed; const unsigned long shortdelay_us = 200; - const unsigned long longdelay_ms = 50; + const unsigned long longdelay_ms = 300; unsigned long long ts; /* We want a short delay sometimes to make a reader delay the grace -- cgit v1.2.3 From 1e69676592edaf81eed88ba53f5239d84fae4e67 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 20 Jul 2018 12:04:12 -0700 Subject: rcutorture: Limit reader duration if irq or bh disabled There are debug checks in some environments that will complain if the duration of a bh-disabled region of code exceeds about 50 milliseconds. Because rcu_read_delay() can produce a 50-millisecond delay and because there could be up to eight reader segments with such delays, this commit limits the maximum delay to 10 milliseconds if either interrupts or softirqs are disabled. Reported-by: Thomas Gleixner Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index b98bb11d47a2..9622192ec5c9 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -323,7 +323,7 @@ static void rcu_read_delay(struct torture_random_state *rrsp) unsigned long started; unsigned long completed; const unsigned long shortdelay_us = 200; - const unsigned long longdelay_ms = 300; + unsigned long longdelay_ms = 300; unsigned long long ts; /* We want a short delay sometimes to make a reader delay the grace @@ -333,6 +333,8 @@ static void rcu_read_delay(struct torture_random_state *rrsp) if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); + if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) + longdelay_ms = 5; /* Avoid triggering BH limits. */ mdelay(longdelay_ms); completed = cur_ops->get_gp_seq(); do_trace_rcu_torture_read(cur_ops->name, NULL, ts, -- cgit v1.2.3 From fecad5091f35425246316ab25c8a9f2aa44a7051 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 20 Jul 2018 12:18:11 -0700 Subject: rcutorture: Reduce priority of forward-progress testing On !SMP tests, the forward-progress kthread might prevent RCU's grace-period kthread from running, which would defeat RCU's forward-progress measures. On PREEMPT tests without RCU priority boosting, the forward-progress kthread might preempt a reader for an extended time period, which would also defeat RCU's forward-progress measures. This commit therefore reduced rcutorture's forward-progress kthread's priority in those cases. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 9622192ec5c9..ac487ea8d245 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1710,6 +1710,8 @@ static int rcu_torture_fwd_prog(void *args) static DEFINE_TORTURE_RANDOM(trs); VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); + if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) + set_user_nice(current, MAX_NICE); if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { init_rcu_head_on_stack(&fcs.rh); cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); -- cgit v1.2.3 From c04dd09bd38c0df1aa6318164a51eccbc3a9fa5e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 23 Jul 2018 14:16:47 -0700 Subject: rcutorture: Adjust number of reader kthreads per CPU-hotplug operations Currently, rcutorture provisions rcu_torture_reader() kthreads based on the initial number of CPUs. This can be problematic when CPU hotplug is enabled, as a system with a very large number of CPUs will provision a very large number of rcu_torture_reader() kthreads. All of these kthreads will continue running even if the CPU-hotplug operations result in only one remaining online CPU. This can result in all sorts of strange artifacts due simply to massive overload. This commit therefore causes the rcu_torture_reader() kthreads to start blocking as the number of online CPUs decreases. This is accomplished by numbering these kthreads, and having each check to make sure that the number of online CPUs is at least as large as its assigned number. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index ac487ea8d245..50015b78a43f 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1400,6 +1400,8 @@ static int rcu_torture_reader(void *arg) { unsigned long lastsleep = jiffies; + long myid = (long)arg; + int mynumonline = myid; DEFINE_TORTURE_RANDOM(rand); struct timer_list t; @@ -1419,6 +1421,8 @@ rcu_torture_reader(void *arg) schedule_timeout_interruptible(1); lastsleep = jiffies + 10; } + while (num_online_cpus() < mynumonline && !torture_must_stop()) + schedule_timeout_interruptible(HZ / 5); stutter_wait("rcu_torture_reader"); } while (!torture_must_stop()); if (irqreader && cur_ops->irq_capable) { @@ -2063,7 +2067,7 @@ static void rcu_test_debug_objects(void) static int __init rcu_torture_init(void) { - int i; + long i; int cpu; int firsterr = 0; static struct rcu_torture_ops *torture_ops[] = { @@ -2169,7 +2173,7 @@ rcu_torture_init(void) goto unwind; } for (i = 0; i < nrealreaders; i++) { - firsterr = torture_create_kthread(rcu_torture_reader, NULL, + firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, reader_tasks[i]); if (firsterr) goto unwind; -- cgit v1.2.3 From f4de46ed5bbc8ba9acebc8ac75809751b716e470 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Jul 2018 20:50:40 -0700 Subject: rcutorture: Print forward-progress test interval on error This commit prints the duration of the forward-progress test interval in the case that no forward progress was observed as an aid to debugging. When forward progress does happen, it prints out the number of rcu_torture_writer() versions and grace periods that elapsed during the forward-progress test. At the end of the run, it also prints the number of attempted and actual forward-progress tests. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 50015b78a43f..7df8142a6a22 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1702,6 +1702,7 @@ static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) static int rcu_torture_fwd_prog(void *args) { unsigned long cver; + unsigned long dur; struct fwd_cb_state fcs = { .stop = 0 }; unsigned long gps; int idx; @@ -1709,7 +1710,7 @@ static int rcu_torture_fwd_prog(void *args) int sd4; bool selfpropcb = false; unsigned long stopat; - bool tested = false; + int tested = 0; int tested_tries = 0; static DEFINE_TORTURE_RANDOM(trs); @@ -1727,7 +1728,8 @@ static int rcu_torture_fwd_prog(void *args) gps = cur_ops->get_gp_seq(); sd = cur_ops->stall_dur() + 1; sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; - stopat = jiffies + sd4 + torture_random(&trs) % (sd - sd4); + dur = sd4 + torture_random(&trs) % (sd - sd4); + stopat = jiffies + dur; while (time_before(jiffies, stopat) && !torture_must_stop()) { idx = cur_ops->readlock(); udelay(10); @@ -1737,10 +1739,11 @@ static int rcu_torture_fwd_prog(void *args) } tested_tries++; if (!time_before(jiffies, stopat) && !torture_must_stop()) { - tested = true; - cver = cver == READ_ONCE(rcu_torture_current_version); + tested++; + cver = READ_ONCE(rcu_torture_current_version) - cver; gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); - WARN_ON_ONCE(cver && gps < 2); + WARN_ON(!cver && gps < 2); + pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); } /* Avoid slow periods, better to test when busy. */ stutter_wait("rcu_torture_fwd_prog"); @@ -1754,6 +1757,7 @@ static int rcu_torture_fwd_prog(void *args) } /* Short runs might not contain a valid forward-progress attempt. */ WARN_ON(!tested && tested_tries >= 5); + pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); torture_kthread_stopping("rcu_torture_fwd_prog"); return 0; } -- cgit v1.2.3 From 474e59b476b3390ef9f730515439f21640b61623 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 7 Aug 2018 14:34:44 -0700 Subject: rcutorture: Check GP completion at stutter end The rcu_torture_writer() function invokes stutter_wait() at the end of each writer pass, which occasionally blocks for an extended time period in order to ensure that RCU can handle intermittent loads. But part of handling a busy period is invoking all the callbacks before the end of the idle period induced by stutter_wait(). This commit therefore adds a return value to stutter_wait() indicating whether stutter_wait() actually waited. In addition, this commit causes rcu_torture_writer() to test this value and if set, checks that all the elements of the rcu_tortures[] array have been freed up. Signed-off-by: Paul E. McKenney --- include/linux/torture.h | 2 +- kernel/rcu/rcutorture.c | 5 ++++- kernel/torture.c | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/torture.h b/include/linux/torture.h index 61dfd93b6ee4..48fad21109fc 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -77,7 +77,7 @@ void torture_shutdown_absorb(const char *title); int torture_shutdown_init(int ssecs, void (*cleanup)(void)); /* Task stuttering, which forces load/no-load transitions. */ -void stutter_wait(const char *title); +bool stutter_wait(const char *title); int torture_stutter_init(int s); /* Initialization and cleanup. */ diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 7df8142a6a22..ae10ad531993 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1144,7 +1144,10 @@ rcu_torture_writer(void *arg) !rcu_gp_is_normal(); } rcu_torture_writer_state = RTWS_STUTTER; - stutter_wait("rcu_torture_writer"); + if (stutter_wait("rcu_torture_writer")) + for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) + if (list_empty(&rcu_tortures[i].rtort_free)) + WARN_ON_ONCE(1); } while (!torture_must_stop()); /* Reset expediting back to unexpedited. */ if (expediting > 0) diff --git a/kernel/torture.c b/kernel/torture.c index 1ac24a826589..17d91f5fba2a 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -573,7 +573,7 @@ static int stutter; * Block until the stutter interval ends. This must be called periodically * by all running kthreads that need to be subject to stuttering. */ -void stutter_wait(const char *title) +bool stutter_wait(const char *title) { int spt; @@ -590,6 +590,7 @@ void stutter_wait(const char *title) } torture_shutdown_absorb(title); } + return !!spt; } EXPORT_SYMBOL_GPL(stutter_wait); -- cgit v1.2.3 From 7c590fcca66b58957f8e34acdb0587cd1eeed35b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 7 Aug 2018 16:42:42 -0700 Subject: rcutorture: Maintain self-propagating CB only during forward-progress test The current forward-progress testing maintains a self-propagating callback during the full test. This could result in false negatives for stutter-end checking, where it might appear that RCU was clearing out old callbacks only because it was being continually motivated by the self-propagating callback. This commit therefore shuts down the self-propagating callback at the end of each forward-progress test interval. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index ae10ad531993..a02a2f21386b 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1706,7 +1706,7 @@ static int rcu_torture_fwd_prog(void *args) { unsigned long cver; unsigned long dur; - struct fwd_cb_state fcs = { .stop = 0 }; + struct fwd_cb_state fcs; unsigned long gps; int idx; int sd; @@ -1722,11 +1722,14 @@ static int rcu_torture_fwd_prog(void *args) set_user_nice(current, MAX_NICE); if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { init_rcu_head_on_stack(&fcs.rh); - cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); selfpropcb = true; } do { schedule_timeout_interruptible(fwd_progress_holdoff * HZ); + if (selfpropcb) { + WRITE_ONCE(fcs.stop, 0); + cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); + } cver = READ_ONCE(rcu_torture_current_version); gps = cur_ops->get_gp_seq(); sd = cur_ops->stall_dur() + 1; @@ -1748,13 +1751,15 @@ static int rcu_torture_fwd_prog(void *args) WARN_ON(!cver && gps < 2); pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); } + if (selfpropcb) { + WRITE_ONCE(fcs.stop, 1); + cur_ops->sync(); /* Wait for running CB to complete. */ + cur_ops->cb_barrier(); /* Wait for queued callbacks. */ + } /* Avoid slow periods, better to test when busy. */ stutter_wait("rcu_torture_fwd_prog"); } while (!torture_must_stop()); if (selfpropcb) { - WRITE_ONCE(fcs.stop, 1); - cur_ops->sync(); /* Wait for running callback to complete. */ - cur_ops->cb_barrier(); /* Wait for queued callbacks. */ WARN_ON(READ_ONCE(fcs.stop) != 2); destroy_rcu_head_on_stack(&fcs.rh); } -- cgit v1.2.3 From 679c782de14bd48c19dd74cd1af20a2bc05dd936 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 22 Aug 2018 20:02:19 +0100 Subject: bpf/verifier: per-register parent pointers By giving each register its own liveness chain, we elide the skip_callee() logic. Instead, each register's parent is the state it inherits from; both check_func_call() and prepare_func_exit() automatically connect reg states to the correct chain since when they copy the reg state across (r1-r5 into the callee as args, and r0 out as the return value) they also copy the parent pointer. Signed-off-by: Edward Cree Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 8 +- kernel/bpf/verifier.c | 184 +++++++++++-------------------------------- 2 files changed, 47 insertions(+), 145 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 38b04f559ad3..b42b60a83e19 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -41,6 +41,7 @@ enum bpf_reg_liveness { }; struct bpf_reg_state { + /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; union { /* valid when type == PTR_TO_PACKET */ @@ -59,7 +60,6 @@ struct bpf_reg_state { * came from, when one is tested for != NULL. */ u32 id; - /* Ordering of fields matters. See states_equal() */ /* For scalar types (SCALAR_VALUE), this represents our knowledge of * the actual value. * For pointer types, this represents the variable part of the offset @@ -76,15 +76,15 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* parentage chain for liveness checking */ + struct bpf_reg_state *parent; /* Inside the callee two registers can be both PTR_TO_STACK like * R1=fp-8 and R2=fp-8, but one of them points to this function stack * while another to the caller's stack. To differentiate them 'frameno' * is used which is an index in bpf_verifier_state->frame[] array * pointing to bpf_func_state. - * This field must be second to last, for states_equal() reasons. */ u32 frameno; - /* This field must be last, for states_equal() reasons. */ enum bpf_reg_liveness live; }; @@ -107,7 +107,6 @@ struct bpf_stack_state { */ struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; - struct bpf_verifier_state *parent; /* index of call instruction that called into this func */ int callsite; /* stack frame number of this function state from pov of @@ -129,7 +128,6 @@ struct bpf_func_state { struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; - struct bpf_verifier_state *parent; u32 curframe; }; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 92246117d2b0..68568d22d6bd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -380,9 +380,9 @@ static int copy_stack_state(struct bpf_func_state *dst, /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. - * Note there is a non-zero 'parent' pointer inside bpf_verifier_state - * which this function copies over. It points to previous bpf_verifier_state - * which is never reallocated + * Note there is a non-zero parent pointer inside each reg of bpf_verifier_state + * which this function copies over. It points to corresponding reg in previous + * bpf_verifier_state which is never reallocated */ static int realloc_func_state(struct bpf_func_state *state, int size, bool copy_old) @@ -466,7 +466,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->frame[i] = NULL; } dst_state->curframe = src->curframe; - dst_state->parent = src->parent; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { @@ -732,6 +731,7 @@ static void init_reg_state(struct bpf_verifier_env *env, for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; + regs[i].parent = NULL; } /* frame pointer */ @@ -876,74 +876,21 @@ next: return 0; } -static -struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - u32 regno) -{ - struct bpf_verifier_state *tmp = NULL; - - /* 'parent' could be a state of caller and - * 'state' could be a state of callee. In such case - * parent->curframe < state->curframe - * and it's ok for r1 - r5 registers - * - * 'parent' could be a callee's state after it bpf_exit-ed. - * In such case parent->curframe > state->curframe - * and it's ok for r0 only - */ - if (parent->curframe == state->curframe || - (parent->curframe < state->curframe && - regno >= BPF_REG_1 && regno <= BPF_REG_5) || - (parent->curframe > state->curframe && - regno == BPF_REG_0)) - return parent; - - if (parent->curframe > state->curframe && - regno >= BPF_REG_6) { - /* for callee saved regs we have to skip the whole chain - * of states that belong to callee and mark as LIVE_READ - * the registers before the call - */ - tmp = parent; - while (tmp && tmp->curframe != state->curframe) { - tmp = tmp->parent; - } - if (!tmp) - goto bug; - parent = tmp; - } else { - goto bug; - } - return parent; -bug: - verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); - verbose(env, "regno %d parent frame %d current frame %d\n", - regno, parent->curframe, state->curframe); - return NULL; -} - +/* Parentage chain of this register (or stack slot) should take care of all + * issues like callee-saved registers, stack slot allocation time, etc. + */ static int mark_reg_read(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - u32 regno) + const struct bpf_reg_state *state, + struct bpf_reg_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ - if (regno == BPF_REG_FP) - /* We don't need to worry about FP liveness because it's read-only */ - return 0; - while (parent) { /* if read wasn't screened by an earlier write ... */ - if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN) + if (writes && state->live & REG_LIVE_WRITTEN) break; - parent = skip_callee(env, state, parent, regno); - if (!parent) - return -EFAULT; /* ... then we depend on parent's value */ - parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ; + parent->live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; @@ -969,7 +916,10 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, verbose(env, "R%d !read_ok\n", regno); return -EACCES; } - return mark_reg_read(env, vstate, vstate->parent, regno); + /* We don't need to worry about FP liveness because it's read-only */ + if (regno != BPF_REG_FP) + return mark_reg_read(env, ®s[regno], + regs[regno].parent); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { @@ -1080,8 +1030,8 @@ static int check_stack_write(struct bpf_verifier_env *env, } else { u8 type = STACK_MISC; - /* regular write of data into stack */ - state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; + /* regular write of data into stack destroys any spilled ptr */ + state->stack[spi].spilled_ptr.type = NOT_INIT; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon @@ -1106,61 +1056,6 @@ static int check_stack_write(struct bpf_verifier_env *env, return 0; } -/* registers of every function are unique and mark_reg_read() propagates - * the liveness in the following cases: - * - from callee into caller for R1 - R5 that were used as arguments - * - from caller into callee for R0 that used as result of the call - * - from caller to the same caller skipping states of the callee for R6 - R9, - * since R6 - R9 are callee saved by implicit function prologue and - * caller's R6 != callee's R6, so when we propagate liveness up to - * parent states we need to skip callee states for R6 - R9. - * - * stack slot marking is different, since stacks of caller and callee are - * accessible in both (since caller can pass a pointer to caller's stack to - * callee which can pass it to another function), hence mark_stack_slot_read() - * has to propagate the stack liveness to all parent states at given frame number. - * Consider code: - * f1() { - * ptr = fp - 8; - * *ptr = ctx; - * call f2 { - * .. = *ptr; - * } - * .. = *ptr; - * } - * First *ptr is reading from f1's stack and mark_stack_slot_read() has - * to mark liveness at the f1's frame and not f2's frame. - * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has - * to propagate liveness to f2 states at f1's frame level and further into - * f1 states at f1's frame level until write into that stack slot - */ -static void mark_stack_slot_read(struct bpf_verifier_env *env, - const struct bpf_verifier_state *state, - struct bpf_verifier_state *parent, - int slot, int frameno) -{ - bool writes = parent == state->parent; /* Observe write marks */ - - while (parent) { - if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE) - /* since LIVE_WRITTEN mark is only done for full 8-byte - * write the read marks are conservative and parent - * state may not even have the stack allocated. In such case - * end the propagation, since the loop reached beginning - * of the function - */ - break; - /* if read wasn't screened by an earlier write ... */ - if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) - break; - /* ... then we depend on parent's value */ - parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ; - state = parent; - parent = state->parent; - writes = true; - } -} - static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) @@ -1198,8 +1093,8 @@ static int check_stack_read(struct bpf_verifier_env *env, */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } - mark_stack_slot_read(env, vstate, vstate->parent, spi, - reg_state->frameno); + mark_reg_read(env, ®_state->stack[spi].spilled_ptr, + reg_state->stack[spi].spilled_ptr.parent); return 0; } else { int zeros = 0; @@ -1215,8 +1110,8 @@ static int check_stack_read(struct bpf_verifier_env *env, off, i, size); return -EACCES; } - mark_stack_slot_read(env, vstate, vstate->parent, spi, - reg_state->frameno); + mark_reg_read(env, ®_state->stack[spi].spilled_ptr, + reg_state->stack[spi].spilled_ptr.parent); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, @@ -1908,8 +1803,8 @@ mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ - mark_stack_slot_read(env, env->cur_state, env->cur_state->parent, - spi, state->frameno); + mark_reg_read(env, &state->stack[spi].spilled_ptr, + state->stack[spi].spilled_ptr.parent); } return update_stack_depth(env, state, off); } @@ -2366,11 +2261,13 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); - /* copy r1 - r5 args that callee can access */ + /* copy r1 - r5 args that callee can access. The copy includes parent + * pointers, which connects us up to the liveness chain + */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; - /* after the call regsiters r0 - r5 were scratched */ + /* after the call registers r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, caller->regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); @@ -4370,7 +4267,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, /* explored state didn't use this */ return true; - equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0; + equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to @@ -4603,7 +4500,7 @@ static bool states_equal(struct bpf_verifier_env *env, * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison - * in mark_reg_read() and mark_stack_slot_read() is for. + * in mark_reg_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, @@ -4624,7 +4521,8 @@ static int propagate_liveness(struct bpf_verifier_env *env, if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) continue; if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { - err = mark_reg_read(env, vstate, vparent, i); + err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], + &vparent->frame[vstate->curframe]->regs[i]); if (err) return err; } @@ -4639,7 +4537,8 @@ static int propagate_liveness(struct bpf_verifier_env *env, if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) - mark_stack_slot_read(env, vstate, vparent, i, frame); + mark_reg_read(env, &state->stack[i].spilled_ptr, + &parent->stack[i].spilled_ptr); } } return err; @@ -4649,7 +4548,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; - struct bpf_verifier_state *cur = env->cur_state; + struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err; sl = env->explored_states[insn_idx]; @@ -4691,16 +4590,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return -ENOMEM; /* add new state to the head of linked list */ - err = copy_verifier_state(&new_sl->state, cur); + new = &new_sl->state; + err = copy_verifier_state(new, cur); if (err) { - free_verifier_state(&new_sl->state, false); + free_verifier_state(new, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ - cur->parent = &new_sl->state; + for (i = 0; i < BPF_REG_FP; i++) + cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i]; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark @@ -4713,9 +4614,13 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; + struct bpf_func_state *newframe = new->frame[j]; - for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) + for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; + frame->stack[i].spilled_ptr.parent = + &newframe->stack[i].spilled_ptr; + } } return 0; } @@ -4734,7 +4639,6 @@ static int do_check(struct bpf_verifier_env *env) if (!state) return -ENOMEM; state->curframe = 0; - state->parent = NULL; state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); if (!state->frame[0]) { kfree(state); -- cgit v1.2.3 From 8efea21d333d21e1f9177579ffdc69556314f603 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 22 Aug 2018 20:02:44 +0100 Subject: bpf/verifier: display non-spill stack slot types in print_verifier_state If a stack slot does not hold a spilled register (STACK_SPILL), then each of its eight bytes could potentially have a different slot_type. This information can be important for debugging, and previously we either did not print anything for the stack slot, or just printed fp-X=0 in the case where its first byte was STACK_ZERO. Instead, print eight characters with either 0 (STACK_ZERO), m (STACK_MISC) or ? (STACK_INVALID) for any stack slot which is neither STACK_SPILL nor entirely STACK_INVALID. Signed-off-by: Edward Cree Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 68568d22d6bd..f4ff0c569e54 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -263,6 +263,13 @@ static const char * const reg_type_str[] = { [PTR_TO_PACKET_END] = "pkt_end", }; +static char slot_type_char[] = { + [STACK_INVALID] = '?', + [STACK_SPILL] = 'r', + [STACK_MISC] = 'm', + [STACK_ZERO] = '0', +}; + static void print_liveness(struct bpf_verifier_env *env, enum bpf_reg_liveness live) { @@ -349,15 +356,26 @@ static void print_verifier_state(struct bpf_verifier_env *env, } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] == STACK_SPILL) { - verbose(env, " fp%d", - (-i - 1) * BPF_REG_SIZE); - print_liveness(env, state->stack[i].spilled_ptr.live); + char types_buf[BPF_REG_SIZE + 1]; + bool valid = false; + int j; + + for (j = 0; j < BPF_REG_SIZE; j++) { + if (state->stack[i].slot_type[j] != STACK_INVALID) + valid = true; + types_buf[j] = slot_type_char[ + state->stack[i].slot_type[j]]; + } + types_buf[BPF_REG_SIZE] = 0; + if (!valid) + continue; + verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); + print_liveness(env, state->stack[i].spilled_ptr.live); + if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, "=%s", reg_type_str[state->stack[i].spilled_ptr.type]); - } - if (state->stack[i].slot_type[0] == STACK_ZERO) - verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE); + else + verbose(env, "=%s", types_buf); } verbose(env, "\n"); } -- cgit v1.2.3 From c7b27c37af3da5a63f32b0bc99569e3069e4d9c1 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Wed, 29 Aug 2018 14:43:13 -0700 Subject: bpf: add bpffs pretty print for percpu arraymap/hash/lru_hash Added bpffs pretty print for percpu arraymap, percpu hashmap and percpu lru hashmap. For each map pair, the format is: : { cpu0: cpu1: ... cpun: } For example, on my VM, there are 4 cpus, and for test_btf test in the next patch: cat /sys/fs/bpf/pprint_test_percpu_hash You may get: ... 43602: { cpu0: {43602,0,-43602,0x3,0xaa52,0x3,{43602|[82,170,0,0,0,0,0,0]},ENUM_TWO} cpu1: {43602,0,-43602,0x3,0xaa52,0x3,{43602|[82,170,0,0,0,0,0,0]},ENUM_TWO} cpu2: {43602,0,-43602,0x3,0xaa52,0x3,{43602|[82,170,0,0,0,0,0,0]},ENUM_TWO} cpu3: {43602,0,-43602,0x3,0xaa52,0x3,{43602|[82,170,0,0,0,0,0,0]},ENUM_TWO} } 72847: { cpu0: {72847,0,-72847,0x3,0x11c8f,0x3,{72847|[143,28,1,0,0,0,0,0]},ENUM_THREE} cpu1: {72847,0,-72847,0x3,0x11c8f,0x3,{72847|[143,28,1,0,0,0,0,0]},ENUM_THREE} cpu2: {72847,0,-72847,0x3,0x11c8f,0x3,{72847|[143,28,1,0,0,0,0,0]},ENUM_THREE} cpu3: {72847,0,-72847,0x3,0x11c8f,0x3,{72847|[143,28,1,0,0,0,0,0]},ENUM_THREE} } ... Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/arraymap.c | 24 ++++++++++++++++++++++++ kernel/bpf/hashtab.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 0c17aab3ce5f..f9d24121be99 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -358,6 +358,29 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } +static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; + void __percpu *pptr; + int cpu; + + rcu_read_lock(); + + seq_printf(m, "%u: {\n", *(u32 *)key); + pptr = array->pptrs[index & array->index_mask]; + for_each_possible_cpu(cpu) { + seq_printf(m, "\tcpu%d: ", cpu); + btf_type_seq_show(map->btf, map->btf_value_type_id, + per_cpu_ptr(pptr, cpu), m); + seq_puts(m, "\n"); + } + seq_puts(m, "}\n"); + + rcu_read_unlock(); +} + static int array_map_check_btf(const struct bpf_map *map, const struct btf_type *key_type, const struct btf_type *value_type) @@ -398,6 +421,7 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_lookup_elem = percpu_array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, + .map_seq_show_elem = percpu_array_map_seq_show_elem, .map_check_btf = array_map_check_btf, }; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 03cc59ee9c95..2c1790288138 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1285,6 +1285,35 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, return ret; } +static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + struct htab_elem *l; + void __percpu *pptr; + int cpu; + + rcu_read_lock(); + + l = __htab_map_lookup_elem(map, key); + if (!l) { + rcu_read_unlock(); + return; + } + + btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); + seq_puts(m, ": {\n"); + pptr = htab_elem_get_ptr(l, map->key_size); + for_each_possible_cpu(cpu) { + seq_printf(m, "\tcpu%d: ", cpu); + btf_type_seq_show(map->btf, map->btf_value_type_id, + per_cpu_ptr(pptr, cpu), m); + seq_puts(m, "\n"); + } + seq_puts(m, "}\n"); + + rcu_read_unlock(); +} + const struct bpf_map_ops htab_percpu_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1293,6 +1322,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { .map_lookup_elem = htab_percpu_map_lookup_elem, .map_update_elem = htab_percpu_map_update_elem, .map_delete_elem = htab_map_delete_elem, + .map_seq_show_elem = htab_percpu_map_seq_show_elem, }; const struct bpf_map_ops htab_lru_percpu_map_ops = { @@ -1303,6 +1333,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_lookup_elem = htab_lru_percpu_map_lookup_elem, .map_update_elem = htab_lru_percpu_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, + .map_seq_show_elem = htab_percpu_map_seq_show_elem, }; static int fd_htab_map_alloc_check(union bpf_attr *attr) -- cgit v1.2.3 From bd14406b78e6daa1ea3c1673bda1ffc9efdeead0 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:25 +0200 Subject: perf/hw_breakpoint: Modify breakpoint even if the new attr has disabled set We need to change the breakpoint even if the attr with new fields has disabled set to true. Current code prevents following user code to change the breakpoint address: ptrace(PTRACE_POKEUSER, child, offsetof(struct user, u_debugreg[0]), addr_1) ptrace(PTRACE_POKEUSER, child, offsetof(struct user, u_debugreg[0]), addr_2) ptrace(PTRACE_POKEUSER, child, offsetof(struct user, u_debugreg[7]), dr7) The first PTRACE_POKEUSER creates the breakpoint with attr.disabled set to true: ptrace_set_breakpoint_addr(nr = 0) struct perf_event *bp = t->ptrace_bps[nr]; ptrace_register_breakpoint(..., disabled = true) ptrace_fill_bp_fields(..., disabled) register_user_hw_breakpoint So the second PTRACE_POKEUSER will be omitted: ptrace_set_breakpoint_addr(nr = 0) struct perf_event *bp = t->ptrace_bps[nr]; struct perf_event_attr attr = bp->attr; modify_user_hw_breakpoint(bp, &attr) if (!attr->disabled) modify_user_hw_breakpoint_check Reported-by: Milind Chabbi Signed-off-by: Jiri Olsa Acked-by: Frederic Weisbecker Acked-by: Oleg Nesterov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-3-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/hw_breakpoint.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b3814fce5ecb..fb229d9c7f3c 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -509,6 +509,8 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a */ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { + int err; + /* * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it * will not be possible to raise IPIs that invoke __perf_event_disable. @@ -520,11 +522,11 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att else perf_event_disable(bp); - if (!attr->disabled) { - int err = modify_user_hw_breakpoint_check(bp, attr, false); + err = modify_user_hw_breakpoint_check(bp, attr, false); + if (err) + return err; - if (err) - return err; + if (!attr->disabled) { perf_event_enable(bp); bp->attr.disabled = 0; } -- cgit v1.2.3 From cb45302d7c5e20f0c0598cdbd7753fa44daceb2a Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:26 +0200 Subject: perf/hw_breakpoint: Remove superfluous bp->attr.disabled = 0 Once the breakpoint was succesfully modified, the attr->disabled value is in bp->attr.disabled. So there's no reason to set it again, removing that. Signed-off-by: Jiri Olsa Acked-by: Frederic Weisbecker Acked-by: Oleg Nesterov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Milind Chabbi Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-4-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/hw_breakpoint.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index fb229d9c7f3c..3e560d7609fd 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -526,10 +526,9 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att if (err) return err; - if (!attr->disabled) { + if (!attr->disabled) perf_event_enable(bp); - bp->attr.disabled = 0; - } + return 0; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); -- cgit v1.2.3 From 969558371bf926258241727ebb994f516f2e6f61 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:27 +0200 Subject: perf/hw_breakpoint: Enable breakpoint in modify_user_hw_breakpoint Currently we enable the breakpoint back only if the breakpoint modification was successful. If it fails we can leave the breakpoint in disabled state with attr->disabled == 0. We can safely enable the breakpoint back for both the fail and success paths by checking the bp->attr.disabled, which either holds the new 'requested' disabled state or the original breakpoint state. Suggested-by: Oleg Nesterov Signed-off-by: Jiri Olsa Acked-by: Frederic Weisbecker Acked-by: Oleg Nesterov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Milind Chabbi Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-5-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/hw_breakpoint.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 3e560d7609fd..d6b56180827c 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -523,13 +523,11 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att perf_event_disable(bp); err = modify_user_hw_breakpoint_check(bp, attr, false); - if (err) - return err; - if (!attr->disabled) + if (!bp->attr.disabled) perf_event_enable(bp); - return 0; + return err; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); -- cgit v1.2.3 From bf06278c3fdf8909c3a9283e2c270b0fc170fa90 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:28 +0200 Subject: perf/hw_breakpoint: Simplify breakpoint enable in perf_event_modify_breakpoint We can safely enable the breakpoint back for both the fail and success paths by checking only the bp->attr.disabled, which either holds the new 'requested' disabled state or the original breakpoint state. Committer testing: At the end of the series, the 'perf test' entry introduced as the first patch now runs to completion without finding the fixed issues: # perf test "bp modify" 62: x86 bp modify : Ok # In verbose mode: # perf test -v "bp modify" 62: x86 bp modify : --- start --- test child forked, pid 5161 rip 5950a0, bp_1 0x5950a0 in bp_1 rip 5950a0, bp_1 0x5950a0 in bp_1 test child finished with 0 ---- end ---- x86 bp modify: Ok Suggested-by: Oleg Nesterov Acked-by: Oleg Nesterov Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Milind Chabbi Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-6-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/core.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index f6ea33a9f904..22ede28ec07d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2867,16 +2867,11 @@ static int perf_event_modify_breakpoint(struct perf_event *bp, _perf_event_disable(bp); err = modify_user_hw_breakpoint_check(bp, attr, true); - if (err) { - if (!bp->attr.disabled) - _perf_event_enable(bp); - return err; - } - - if (!attr->disabled) + if (!bp->attr.disabled) _perf_event_enable(bp); - return 0; + + return err; } static int perf_event_modify_attr(struct perf_event *event, -- cgit v1.2.3 From cf7614e13c8fcaf290c5ffaa04b2e1b4f704a52a Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Fri, 22 Jun 2018 15:12:06 +0900 Subject: rcu: Refactor rcu_{nmi,irq}_{enter,exit}() When entering or exiting irq or NMI handlers, the current code uses ->dynticks_nmi_nesting to detect if it is in the outermost handler, that is, the one interrupting or returning to an RCU-idle context (the idle loop or nohz_full usermode execution). When entering the outermost handler via an interrupt (as opposed to NMI), it is necessary to invoke rcu_dynticks_task_exit() just before the CPU is marked non-idle from an RCU perspective and to invoke rcu_cleanup_after_idle() just after the CPU is marked non-idle. Similarly, when exiting the outermost handler via an interrupt, it is necessary to invoke rcu_prepare_for_idle() just before marking the CPU idle and to invoke rcu_dynticks_task_enter() just after marking the CPU idle. The decision to execute these four functions is currently taken in rcu_irq_enter() and rcu_irq_exit() as follows: rcu_irq_enter() /* A conditional branch with ->dynticks_nmi_nesting */ rcu_nmi_enter() /* A conditional branch with ->dynticks */ /* A conditional branch with ->dynticks_nmi_nesting */ rcu_irq_exit() /* A conditional branch with ->dynticks_nmi_nesting */ rcu_nmi_exit() /* A conditional branch with ->dynticks_nmi_nesting */ /* A conditional branch with ->dynticks_nmi_nesting */ rcu_nmi_enter() /* A conditional branch with ->dynticks */ rcu_nmi_exit() /* A conditional branch with ->dynticks_nmi_nesting */ This works, but the conditional branches in rcu_irq_enter() and rcu_irq_exit() are redundant with those in rcu_nmi_enter() and rcu_nmi_exit(), respectively. Redundant branches are not something we want in the to/from-idle fastpaths, so this commit refactors rcu_{nmi,irq}_{enter,exit}() so they use a common inlined function passed a constant argument as follows: rcu_irq_enter() inlining rcu_nmi_enter_common(irq=true) /* A conditional branch with ->dynticks */ rcu_irq_exit() inlining rcu_nmi_exit_common(irq=true) /* A conditional branch with ->dynticks_nmi_nesting */ rcu_nmi_enter() inlining rcu_nmi_enter_common(irq=false) /* A conditional branch with ->dynticks */ rcu_nmi_exit() inlining rcu_nmi_exit_common(irq=false) /* A conditional branch with ->dynticks_nmi_nesting */ The combination of the constant function argument and the inlining allows the compiler to discard the conditionals that previously controlled execution of rcu_dynticks_task_exit(), rcu_cleanup_after_idle(), rcu_prepare_for_idle(), and rcu_dynticks_task_enter(). This reduces both the to-idle and from-idle path lengths by two conditional branches each, and improves readability as well. This commit also changes order of execution from this: rcu_dynticks_task_exit(); rcu_dynticks_eqs_exit(); trace_rcu_dyntick(); rcu_cleanup_after_idle(); To this: rcu_dynticks_task_exit(); rcu_dynticks_eqs_exit(); rcu_cleanup_after_idle(); trace_rcu_dyntick(); In other words, the calls to rcu_cleanup_after_idle() and trace_rcu_dyntick() are reversed. This has no functional effect because the real concern is whether a given call is before or after the call to rcu_dynticks_eqs_exit(), and this patch does not change that. Before the call to rcu_dynticks_eqs_exit(), RCU is not yet watching the current CPU and after that call RCU is watching. A similar switch in calling order happens on the idle-entry path, with similar lack of effect for the same reasons. Suggested-by: Paul E. McKenney Signed-off-by: Byungchul Park Signed-off-by: Paul E. McKenney [ paulmck: Applied Steven Rostedt feedback. ] Reviewed-by: Steven Rostedt (VMware) --- kernel/rcu/tree.c | 66 ++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0b760c1369f7..36786789b625 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -770,18 +770,16 @@ void rcu_user_enter(void) } #endif /* CONFIG_NO_HZ_FULL */ -/** - * rcu_nmi_exit - inform RCU of exit from NMI context - * +/* * If we are returning from the outermost NMI handler that interrupted an * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting * to let the RCU grace-period handling know that the CPU is back to * being RCU-idle. * - * If you add or remove a call to rcu_nmi_exit(), be sure to test + * If you add or remove a call to rcu_nmi_exit_common(), be sure to test * with CONFIG_RCU_EQS_DEBUG=y. */ -void rcu_nmi_exit(void) +static __always_inline void rcu_nmi_exit_common(bool irq) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); @@ -807,7 +805,26 @@ void rcu_nmi_exit(void) /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks); WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ + + if (irq) + rcu_prepare_for_idle(); + rcu_dynticks_eqs_enter(); + + if (irq) + rcu_dynticks_task_enter(); +} + +/** + * rcu_nmi_exit - inform RCU of exit from NMI context + * @irq: Is this call from rcu_irq_exit? + * + * If you add or remove a call to rcu_nmi_exit(), be sure to test + * with CONFIG_RCU_EQS_DEBUG=y. + */ +void rcu_nmi_exit(void) +{ + rcu_nmi_exit_common(false); } /** @@ -831,14 +848,8 @@ void rcu_nmi_exit(void) */ void rcu_irq_exit(void) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - lockdep_assert_irqs_disabled(); - if (rdtp->dynticks_nmi_nesting == 1) - rcu_prepare_for_idle(); - rcu_nmi_exit(); - if (rdtp->dynticks_nmi_nesting == 0) - rcu_dynticks_task_enter(); + rcu_nmi_exit_common(true); } /* @@ -921,7 +932,8 @@ void rcu_user_exit(void) #endif /* CONFIG_NO_HZ_FULL */ /** - * rcu_nmi_enter - inform RCU of entry to NMI context + * rcu_nmi_enter_common - inform RCU of entry to NMI context + * @irq: Is this call from rcu_irq_enter? * * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know @@ -929,10 +941,10 @@ void rcu_user_exit(void) * long as the nesting level does not overflow an int. (You will probably * run out of stack space first.) * - * If you add or remove a call to rcu_nmi_enter(), be sure to test + * If you add or remove a call to rcu_nmi_enter_common(), be sure to test * with CONFIG_RCU_EQS_DEBUG=y. */ -void rcu_nmi_enter(void) +static __always_inline void rcu_nmi_enter_common(bool irq) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); long incby = 2; @@ -949,7 +961,15 @@ void rcu_nmi_enter(void) * period (observation due to Andy Lutomirski). */ if (rcu_dynticks_curr_cpu_in_eqs()) { + + if (irq) + rcu_dynticks_task_exit(); + rcu_dynticks_eqs_exit(); + + if (irq) + rcu_cleanup_after_idle(); + incby = 1; } trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), @@ -960,6 +980,14 @@ void rcu_nmi_enter(void) barrier(); } +/** + * rcu_nmi_enter - inform RCU of entry to NMI context + */ +void rcu_nmi_enter(void) +{ + rcu_nmi_enter_common(false); +} + /** * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle * @@ -984,14 +1012,8 @@ void rcu_nmi_enter(void) */ void rcu_irq_enter(void) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); - lockdep_assert_irqs_disabled(); - if (rdtp->dynticks_nmi_nesting == 0) - rcu_dynticks_task_exit(); - rcu_nmi_enter(); - if (rdtp->dynticks_nmi_nesting == 1) - rcu_cleanup_after_idle(); + rcu_nmi_enter_common(true); } /* -- cgit v1.2.3 From 3e31009898699dfca823893054748d85048dc7b3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 21 Jun 2018 12:50:01 -0700 Subject: rcu: Defer reporting RCU-preempt quiescent states when disabled This commit defers reporting of RCU-preempt quiescent states at rcu_read_unlock_special() time when any of interrupts, softirq, or preemption are disabled. These deferred quiescent states are reported at a later RCU_SOFTIRQ, context switch, idle entry, or CPU-hotplug offline operation. Of course, if another RCU read-side critical section has started in the meantime, the reporting of the quiescent state will be further deferred. This also means that disabling preemption, interrupts, and/or softirqs will act as an RCU-preempt read-side critical section. This is enforced by checking preempt_count() as needed. Some special cases must be handled on an ad-hoc basis, for example, context switch is a quiescent state even though both the scheduler and do_exit() disable preemption. In these cases, additional calls to rcu_preempt_deferred_qs() override the preemption disabling. Similar logic overrides disabled interrupts in rcu_preempt_check_callbacks() because in this case the quiescent state happened just before the corresponding scheduling-clock interrupt. In theory, this change lifts a long-standing restriction that required that if interrupts were disabled across a call to rcu_read_unlock() that the matching rcu_read_lock() also be contained within that interrupts-disabled region of code. Because the reporting of the corresponding RCU-preempt quiescent state is now deferred until after interrupts have been enabled, it is no longer possible for this situation to result in deadlocks involving the scheduler's runqueue and priority-inheritance locks. This may allow some code simplification that might reduce interrupt latency a bit. Unfortunately, in practice this would also defer deboosting a low-priority task that had been subjected to RCU priority boosting, so real-time-response considerations might well force this restriction to remain in place. Because RCU-preempt grace periods are now blocked not only by RCU read-side critical sections, but also by disabling of interrupts, preemption, and softirqs, it will be possible to eliminate RCU-bh and RCU-sched in favor of RCU-preempt in CONFIG_PREEMPT=y kernels. This may require some additional plumbing to provide the network denial-of-service guarantees that have been traditionally provided by RCU-bh. Once these are in place, CONFIG_PREEMPT=n kernels will be able to fold RCU-bh into RCU-sched. This would mean that all kernels would have but one flavor of RCU, which would open the door to significant code cleanup. Moving to a single flavor of RCU would also have the beneficial effect of reducing the NOCB kthreads by at least a factor of two. Signed-off-by: Paul E. McKenney [ paulmck: Apply rcu_read_unlock_special() preempt_count() feedback from Joel Fernandes. ] [ paulmck: Adjust rcu_eqs_enter() call to rcu_preempt_deferred_qs() in response to bug reports from kbuild test robot. ] [ paulmck: Fix bug located by kbuild test robot involving recursion via rcu_preempt_deferred_qs(). ] --- .../RCU/Design/Requirements/Requirements.html | 50 ++++--- include/linux/rcutiny.h | 5 + kernel/rcu/tree.c | 9 ++ kernel/rcu/tree.h | 3 + kernel/rcu/tree_exp.h | 71 +++++++--- kernel/rcu/tree_plugin.h | 144 ++++++++++++++++----- 6 files changed, 205 insertions(+), 77 deletions(-) (limited to 'kernel') diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html index 49690228b1c6..038714475edb 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.html +++ b/Documentation/RCU/Design/Requirements/Requirements.html @@ -2394,30 +2394,9 @@ when invoked from a CPU-hotplug notifier.

RCU depends on the scheduler, and the scheduler uses RCU to protect some of its data structures. -This means the scheduler is forbidden from acquiring -the runqueue locks and the priority-inheritance locks -in the middle of an outermost RCU read-side critical section unless either -(1) it releases them before exiting that same -RCU read-side critical section, or -(2) interrupts are disabled across -that entire RCU read-side critical section. -This same prohibition also applies (recursively!) to any lock that is acquired -while holding any lock to which this prohibition applies. -Adhering to this rule prevents preemptible RCU from invoking -rcu_read_unlock_special() while either runqueue or -priority-inheritance locks are held, thus avoiding deadlock. - -

-Prior to v4.4, it was only necessary to disable preemption across -RCU read-side critical sections that acquired scheduler locks. -In v4.4, expedited grace periods started using IPIs, and these -IPIs could force a rcu_read_unlock() to take the slowpath. -Therefore, this expedited-grace-period change required disabling of -interrupts, not just preemption. - -

-For RCU's part, the preemptible-RCU rcu_read_unlock() -implementation must be written carefully to avoid similar deadlocks. +The preemptible-RCU rcu_read_unlock() +implementation must therefore be written carefully to avoid deadlocks +involving the scheduler's runqueue and priority-inheritance locks. In particular, rcu_read_unlock() must tolerate an interrupt where the interrupt handler invokes both rcu_read_lock() and rcu_read_unlock(). @@ -2426,7 +2405,7 @@ negative nesting levels to avoid destructive recursion via interrupt handler's use of RCU.

-This pair of mutual scheduler-RCU requirements came as a +This scheduler-RCU requirement came as a complete surprise.

@@ -2437,9 +2416,28 @@ when running context-switch-heavy workloads when built with CONFIG_NO_HZ_FULL=y did come as a surprise [PDF]. RCU has made good progress towards meeting this requirement, even -for context-switch-have CONFIG_NO_HZ_FULL=y workloads, +for context-switch-heavy CONFIG_NO_HZ_FULL=y workloads, but there is room for further improvement. +

+In the past, it was forbidden to disable interrupts across an +rcu_read_unlock() unless that interrupt-disabled region +of code also included the matching rcu_read_lock(). +Violating this restriction could result in deadlocks involving the +scheduler's runqueue and priority-inheritance spinlocks. +This restriction was lifted when interrupt-disabled calls to +rcu_read_unlock() started deferring the reporting of +the resulting RCU-preempt quiescent state until the end of that +interrupts-disabled region. +This deferred reporting means that the scheduler's runqueue and +priority-inheritance locks cannot be held while reporting an RCU-preempt +quiescent state, which lifts the earlier restriction, at least from +a deadlock perspective. +Unfortunately, real-time systems using RCU priority boosting may +need this restriction to remain in effect because deferred +quiescent-state reporting also defers deboosting, which in turn +degrades real-time latencies. +

Tracing and RCU

diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 8d9a0ea8f0b5..f617ab19bb51 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -115,6 +115,11 @@ static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } static inline void exit_rcu(void) { } +static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) +{ + return false; +} +static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } #ifdef CONFIG_SRCU void rcu_scheduler_starting(void); #else /* #ifndef CONFIG_SRCU */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 36786789b625..346624716d6e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -422,6 +422,7 @@ static void rcu_momentary_dyntick_idle(void) special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); /* It is illegal to call this from idle state. */ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); + rcu_preempt_deferred_qs(current); } /* @@ -729,6 +730,7 @@ static void rcu_eqs_enter(bool user) do_nocb_deferred_wakeup(rdp); } rcu_prepare_for_idle(); + rcu_preempt_deferred_qs(current); WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ rcu_dynticks_eqs_enter(); rcu_dynticks_task_enter(); @@ -2850,6 +2852,12 @@ __rcu_process_callbacks(struct rcu_state *rsp) WARN_ON_ONCE(!rdp->beenonline); + /* Report any deferred quiescent states if preemption enabled. */ + if (!(preempt_count() & PREEMPT_MASK)) + rcu_preempt_deferred_qs(current); + else if (rcu_preempt_need_deferred_qs(current)) + resched_cpu(rdp->cpu); /* Provoke future context switch. */ + /* Update RCU state based on any recent quiescent states. */ rcu_check_quiescent_state(rsp, rdp); @@ -3823,6 +3831,7 @@ void rcu_report_dead(unsigned int cpu) rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(rcu_sched_state.rda), true); preempt_enable(); + rcu_preempt_deferred_qs(current); for_each_rcu_flavor(rsp) rcu_cleanup_dying_idle_cpu(cpu, rsp); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4e74df768c57..025bd2e5592b 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -195,6 +195,7 @@ struct rcu_data { bool core_needs_qs; /* Core waits for quiesc state. */ bool beenonline; /* CPU online at least once. */ bool gpwrap; /* Possible ->gp_seq wrap. */ + bool deferred_qs; /* This CPU awaiting a deferred QS? */ struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ unsigned long grpmask; /* Mask to apply to leaf qsmask. */ unsigned long ticks_this_gp; /* The number of scheduling-clock */ @@ -461,6 +462,8 @@ static void rcu_cleanup_after_idle(void); static void rcu_prepare_for_idle(void); static void rcu_idle_count_callbacks_posted(void); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); +static bool rcu_preempt_need_deferred_qs(struct task_struct *t); +static void rcu_preempt_deferred_qs(struct task_struct *t); static void print_cpu_stall_info_begin(void); static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); static void print_cpu_stall_info_end(void); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 0b2c2ad69629..f9d5bbd8adce 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -262,6 +262,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, bool wake) { + WRITE_ONCE(rdp->deferred_qs, false); rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); } @@ -735,32 +736,70 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited); */ static void sync_rcu_exp_handler(void *info) { - struct rcu_data *rdp; + unsigned long flags; struct rcu_state *rsp = info; + struct rcu_data *rdp = this_cpu_ptr(rsp->rda); + struct rcu_node *rnp = rdp->mynode; struct task_struct *t = current; /* - * Within an RCU read-side critical section, request that the next - * rcu_read_unlock() report. Unless this RCU read-side critical - * section has already blocked, in which case it is already set - * up for the expedited grace period to wait on it. + * First, the common case of not being in an RCU read-side + * critical section. If also enabled or idle, immediately + * report the quiescent state, otherwise defer. */ - if (t->rcu_read_lock_nesting > 0 && - !t->rcu_read_unlock_special.b.blocked) { - t->rcu_read_unlock_special.b.exp_need_qs = true; + if (!t->rcu_read_lock_nesting) { + if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || + rcu_dynticks_curr_cpu_in_eqs()) { + rcu_report_exp_rdp(rsp, rdp, true); + } else { + rdp->deferred_qs = true; + resched_cpu(rdp->cpu); + } return; } /* - * We are either exiting an RCU read-side critical section (negative - * values of t->rcu_read_lock_nesting) or are not in one at all - * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU - * read-side critical section that blocked before this expedited - * grace period started. Either way, we can immediately report - * the quiescent state. + * Second, the less-common case of being in an RCU read-side + * critical section. In this case we can count on a future + * rcu_read_unlock(). However, this rcu_read_unlock() might + * execute on some other CPU, but in that case there will be + * a future context switch. Either way, if the expedited + * grace period is still waiting on this CPU, set ->deferred_qs + * so that the eventual quiescent state will be reported. + * Note that there is a large group of race conditions that + * can have caused this quiescent state to already have been + * reported, so we really do need to check ->expmask. */ - rdp = this_cpu_ptr(rsp->rda); - rcu_report_exp_rdp(rsp, rdp, true); + if (t->rcu_read_lock_nesting > 0) { + raw_spin_lock_irqsave_rcu_node(rnp, flags); + if (rnp->expmask & rdp->grpmask) + rdp->deferred_qs = true; + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } + + /* + * The final and least likely case is where the interrupted + * code was just about to or just finished exiting the RCU-preempt + * read-side critical section, and no, we can't tell which. + * So either way, set ->deferred_qs to flag later code that + * a quiescent state is required. + * + * If the CPU is fully enabled (or if some buggy RCU-preempt + * read-side critical section is being used from idle), just + * invoke rcu_preempt_defer_qs() to immediately report the + * quiescent state. We cannot use rcu_read_unlock_special() + * because we are in an interrupt handler, which will cause that + * function to take an early exit without doing anything. + * + * Otherwise, use resched_cpu() to force a context switch after + * the CPU enables everything. + */ + rdp->deferred_qs = true; + if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || + WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) + rcu_preempt_deferred_qs(t); + else + resched_cpu(rdp->cpu); } /** diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index a97c20ea9bce..542791361908 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -371,6 +371,9 @@ static void rcu_preempt_note_context_switch(bool preempt) * behalf of preempted instance of __rcu_read_unlock(). */ rcu_read_unlock_special(t); + rcu_preempt_deferred_qs(t); + } else { + rcu_preempt_deferred_qs(t); } /* @@ -464,54 +467,51 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) } /* - * Handle special cases during rcu_read_unlock(), such as needing to - * notify RCU core processing or task having blocked during the RCU - * read-side critical section. + * Report deferred quiescent states. The deferral time can + * be quite short, for example, in the case of the call from + * rcu_read_unlock_special(). */ -static void rcu_read_unlock_special(struct task_struct *t) +static void +rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) { bool empty_exp; bool empty_norm; bool empty_exp_now; - unsigned long flags; struct list_head *np; bool drop_boost_mutex = false; struct rcu_data *rdp; struct rcu_node *rnp; union rcu_special special; - /* NMI handlers cannot block and cannot safely manipulate state. */ - if (in_nmi()) - return; - - local_irq_save(flags); - /* * If RCU core is waiting for this CPU to exit its critical section, * report the fact that it has exited. Because irqs are disabled, * t->rcu_read_unlock_special cannot change. */ special = t->rcu_read_unlock_special; + rdp = this_cpu_ptr(rcu_state_p->rda); + if (!special.s && !rdp->deferred_qs) { + local_irq_restore(flags); + return; + } if (special.b.need_qs) { rcu_preempt_qs(); t->rcu_read_unlock_special.b.need_qs = false; - if (!t->rcu_read_unlock_special.s) { + if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) { local_irq_restore(flags); return; } } /* - * Respond to a request for an expedited grace period, but only if - * we were not preempted, meaning that we were running on the same - * CPU throughout. If we were preempted, the exp_need_qs flag - * would have been cleared at the time of the first preemption, - * and the quiescent state would be reported when we were dequeued. + * Respond to a request by an expedited grace period for a + * quiescent state from this CPU. Note that requests from + * tasks are handled when removing the task from the + * blocked-tasks list below. */ - if (special.b.exp_need_qs) { - WARN_ON_ONCE(special.b.blocked); + if (special.b.exp_need_qs || rdp->deferred_qs) { t->rcu_read_unlock_special.b.exp_need_qs = false; - rdp = this_cpu_ptr(rcu_state_p->rda); + rdp->deferred_qs = false; rcu_report_exp_rdp(rcu_state_p, rdp, true); if (!t->rcu_read_unlock_special.s) { local_irq_restore(flags); @@ -519,19 +519,6 @@ static void rcu_read_unlock_special(struct task_struct *t) } } - /* Hardware IRQ handlers cannot block, complain if they get here. */ - if (in_irq() || in_serving_softirq()) { - lockdep_rcu_suspicious(__FILE__, __LINE__, - "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); - pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", - t->rcu_read_unlock_special.s, - t->rcu_read_unlock_special.b.blocked, - t->rcu_read_unlock_special.b.exp_need_qs, - t->rcu_read_unlock_special.b.need_qs); - local_irq_restore(flags); - return; - } - /* Clean up if blocked during RCU read-side critical section. */ if (special.b.blocked) { t->rcu_read_unlock_special.b.blocked = false; @@ -602,6 +589,72 @@ static void rcu_read_unlock_special(struct task_struct *t) } } +/* + * Is a deferred quiescent-state pending, and are we also not in + * an RCU read-side critical section? It is the caller's responsibility + * to ensure it is otherwise safe to report any deferred quiescent + * states. The reason for this is that it is safe to report a + * quiescent state during context switch even though preemption + * is disabled. This function cannot be expected to understand these + * nuances, so the caller must handle them. + */ +static bool rcu_preempt_need_deferred_qs(struct task_struct *t) +{ + return (this_cpu_ptr(&rcu_preempt_data)->deferred_qs || + READ_ONCE(t->rcu_read_unlock_special.s)) && + !t->rcu_read_lock_nesting; +} + +/* + * Report a deferred quiescent state if needed and safe to do so. + * As with rcu_preempt_need_deferred_qs(), "safe" involves only + * not being in an RCU read-side critical section. The caller must + * evaluate safety in terms of interrupt, softirq, and preemption + * disabling. + */ +static void rcu_preempt_deferred_qs(struct task_struct *t) +{ + unsigned long flags; + bool couldrecurse = t->rcu_read_lock_nesting >= 0; + + if (!rcu_preempt_need_deferred_qs(t)) + return; + if (couldrecurse) + t->rcu_read_lock_nesting -= INT_MIN; + local_irq_save(flags); + rcu_preempt_deferred_qs_irqrestore(t, flags); + if (couldrecurse) + t->rcu_read_lock_nesting += INT_MIN; +} + +/* + * Handle special cases during rcu_read_unlock(), such as needing to + * notify RCU core processing or task having blocked during the RCU + * read-side critical section. + */ +static void rcu_read_unlock_special(struct task_struct *t) +{ + unsigned long flags; + bool preempt_bh_were_disabled = + !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); + bool irqs_were_disabled; + + /* NMI handlers cannot block and cannot safely manipulate state. */ + if (in_nmi()) + return; + + local_irq_save(flags); + irqs_were_disabled = irqs_disabled_flags(flags); + if ((preempt_bh_were_disabled || irqs_were_disabled) && + t->rcu_read_unlock_special.b.blocked) { + /* Need to defer quiescent state until everything is enabled. */ + raise_softirq_irqoff(RCU_SOFTIRQ); + local_irq_restore(flags); + return; + } + rcu_preempt_deferred_qs_irqrestore(t, flags); +} + /* * Dump detailed information for all tasks blocking the current RCU * grace period on the specified rcu_node structure. @@ -737,10 +790,20 @@ static void rcu_preempt_check_callbacks(void) struct rcu_state *rsp = &rcu_preempt_state; struct task_struct *t = current; - if (t->rcu_read_lock_nesting == 0) { - rcu_preempt_qs(); + if (t->rcu_read_lock_nesting > 0 || + (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { + /* No QS, force context switch if deferred. */ + if (rcu_preempt_need_deferred_qs(t)) + resched_cpu(smp_processor_id()); + } else if (rcu_preempt_need_deferred_qs(t)) { + rcu_preempt_deferred_qs(t); /* Report deferred QS. */ + return; + } else if (!t->rcu_read_lock_nesting) { + rcu_preempt_qs(); /* Report immediate QS. */ return; } + + /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ if (t->rcu_read_lock_nesting > 0 && __this_cpu_read(rcu_data_p->core_needs_qs) && __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) && @@ -859,6 +922,7 @@ void exit_rcu(void) barrier(); t->rcu_read_unlock_special.b.blocked = true; __rcu_read_unlock(); + rcu_preempt_deferred_qs(current); } /* @@ -940,6 +1004,16 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) return false; } +/* + * Because there is no preemptible RCU, there can be no deferred quiescent + * states. + */ +static bool rcu_preempt_need_deferred_qs(struct task_struct *t) +{ + return false; +} +static void rcu_preempt_deferred_qs(struct task_struct *t) { } + /* * Because preemptible RCU does not exist, we never have to check for * tasks blocked within RCU read-side critical sections. -- cgit v1.2.3 From c0335743c5d80233753d81a4c7d22b7437363a8f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 21 Jun 2018 16:17:46 -0700 Subject: rcutorture: Test extended "rcu" read-side critical sections This commit makes the "rcu" torture type test extended read-side critical sections in order to test the deferral of RCU-preempt quiescent-state testing. In CONFIG_PREEMPT=n kernels, this simply duplicates the setup already in place for the "sched" torture type. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index c596c6f1e457..c55d1483886e 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -431,6 +431,7 @@ static struct rcu_torture_ops rcu_ops = { .stats = NULL, .irq_capable = 1, .can_boost = rcu_can_boost(), + .extendables = RCUTORTURE_MAX_EXTEND, .name = "rcu" }; -- cgit v1.2.3 From 27c744e32a9a4066daca0ee7496819bff78c1b37 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 27 Jun 2018 21:48:00 -0700 Subject: rcu: Allow processing deferred QSes for exiting RCU-preempt readers If an RCU-preempt read-side critical section is exiting, that is, ->rcu_read_lock_nesting is negative, then it is a good time to look at the possibility of reporting deferred quiescent states. This commit therefore updates the checks in rcu_preempt_need_deferred_qs() to allow exiting critical sections to report deferred quiescent states. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 542791361908..24c209676d20 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -602,7 +602,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t) { return (this_cpu_ptr(&rcu_preempt_data)->deferred_qs || READ_ONCE(t->rcu_read_unlock_special.s)) && - !t->rcu_read_lock_nesting; + t->rcu_read_lock_nesting <= 0; } /* -- cgit v1.2.3 From fcc878e4dfb70128a73857c609d70570629b0d9e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 28 Jun 2018 07:39:59 -0700 Subject: rcu: Remove now-unused ->b.exp_need_qs field from the rcu_special union The ->b.exp_need_qs field is now set only to false, so this commit removes it. The job this field used to do is now done by the rcu_data structure's ->deferred_qs field, which is a consequence of a better split between task-based (the rcu_node structure's ->exp_tasks field) and CPU-based (the aforementioned rcu_data structure's ->deferred_qs field) tracking of quiescent states for RCU-preempt expedited grace periods. Signed-off-by: Paul E. McKenney --- include/linux/sched.h | 6 +----- kernel/rcu/tree_plugin.h | 13 ++++--------- 2 files changed, 5 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 977cb57d7bc9..004ca21f7e80 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -571,12 +571,8 @@ union rcu_special { struct { u8 blocked; u8 need_qs; - u8 exp_need_qs; - - /* Otherwise the compiler can store garbage here: */ - u8 pad; } b; /* Bits. */ - u32 s; /* Set of bits. */ + u16 s; /* Set of bits. */ }; enum perf_event_task_context { diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 24c209676d20..527a52792dce 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -284,13 +284,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) * no need to check for a subsequent expedited GP. (Though we are * still in a quiescent state in any case.) */ - if (blkd_state & RCU_EXP_BLKD && - t->rcu_read_unlock_special.b.exp_need_qs) { - t->rcu_read_unlock_special.b.exp_need_qs = false; + if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs) rcu_report_exp_rdp(rdp->rsp, rdp, true); - } else { - WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs); - } + else + WARN_ON_ONCE(rdp->deferred_qs); } /* @@ -509,9 +506,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * tasks are handled when removing the task from the * blocked-tasks list below. */ - if (special.b.exp_need_qs || rdp->deferred_qs) { - t->rcu_read_unlock_special.b.exp_need_qs = false; - rdp->deferred_qs = false; + if (rdp->deferred_qs) { rcu_report_exp_rdp(rcu_state_p, rdp, true); if (!t->rcu_read_unlock_special.s) { local_irq_restore(flags); -- cgit v1.2.3 From e11ec65cc8d63c41fc468363b65826a5ae4b8c66 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 28 Jun 2018 12:45:23 -0700 Subject: rcu: Add warning to detect half-interrupts RCU's dyntick-idle code is written to tolerate half-interrupts, that it, either an interrupt that invokes rcu_irq_enter() but never invokes the corresponding rcu_irq_exit() on the one hand, or an interrupt that never invokes rcu_irq_enter() but does invoke the "corresponding" rcu_irq_exit() on the other. These things really did happen at one time, as evidenced by this ca-2011 LKML post: http://lkml.kernel.org/r/20111014170019.GE2428@linux.vnet.ibm.com The reason why RCU tolerates half-interrupts is that usermode helpers used exceptions to invoke a system call from within the kernel such that the system call did a normal return (not a return from exception) to the calling context. This caused rcu_irq_enter() to be invoked without a matching rcu_irq_exit(). However, usermode helpers have since been rewritten to make much more housebroken use of workqueues, kernel threads, and do_execve(), and therefore should no longer produce half-interrupts. No one knows of any other source of half-interrupts, but then again, no one seems insane enough to go audit the entire kernel to verify that half-interrupts really are a relic of the past. This commit therefore adds a pair of WARN_ON_ONCE() calls that will trigger in the presence of half interrupts, which the code will continue to handle correctly. If neither of these WARN_ON_ONCE() trigger by mid-2021, then perhaps RCU can stop handling half-interrupts, which would be a considerable simplification. Reported-by: Steven Rostedt Reported-by: Joel Fernandes Reported-by: Andy Lutomirski Signed-off-by: Paul E. McKenney Reviewed-by: Joel Fernandes (Google) --- kernel/rcu/tree.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 346624716d6e..0b42249e2e40 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -714,6 +714,7 @@ static void rcu_eqs_enter(bool user) struct rcu_dynticks *rdtp; rdtp = this_cpu_ptr(&rcu_dynticks); + WARN_ON_ONCE(rdtp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && rdtp->dynticks_nesting == 0); @@ -896,6 +897,7 @@ static void rcu_eqs_exit(bool user) trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); WRITE_ONCE(rdtp->dynticks_nesting, 1); + WARN_ON_ONCE(rdtp->dynticks_nmi_nesting); WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); } -- cgit v1.2.3 From d28139c4e96713d52a300fb9036c5be2f45e0741 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 28 Jun 2018 14:45:25 -0700 Subject: rcu: Apply RCU-bh QSes to RCU-sched and RCU-preempt when safe One necessary step towards consolidating the three flavors of RCU is to make sure that the resulting consolidated "one flavor to rule them all" correctly handles networking denial-of-service attacks. One thing that allows RCU-bh to do so is that __do_softirq() invokes rcu_bh_qs() every so often, and so something similar has to happen for consolidated RCU. This must be done carefully. For example, if a preemption-disabled region of code takes an interrupt which does softirq processing before returning, consolidated RCU must ignore the resulting rcu_bh_qs() invocations -- preemption is still disabled, and that means an RCU reader for the consolidated flavor. This commit therefore creates a new rcu_softirq_qs() that is called only from the ksoftirqd task, thus avoiding the interrupted-a-preempted-region problem. This new rcu_softirq_qs() function invokes rcu_sched_qs(), rcu_preempt_qs(), and rcu_preempt_deferred_qs(). The latter call handles any deferred quiescent states. Note that __do_softirq() still invokes rcu_bh_qs(). It will continue to do so until a later stage of cleanup when the RCU-bh flavor is removed. Signed-off-by: Paul E. McKenney [ paulmck: Fix !SMP issue located by kbuild test robot. ] --- include/linux/rcutiny.h | 5 +++++ include/linux/rcutree.h | 1 + kernel/rcu/tree.c | 7 +++++++ kernel/rcu/tree.h | 1 + kernel/rcu/tree_plugin.h | 5 +++++ kernel/softirq.c | 2 ++ 6 files changed, 21 insertions(+) (limited to 'kernel') diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index f617ab19bb51..bcfbc40a7239 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -90,6 +90,11 @@ static inline void kfree_call_rcu(struct rcu_head *head, call_rcu(head, func); } +static inline void rcu_softirq_qs(void) +{ + rcu_sched_qs(); +} + #define rcu_note_context_switch(preempt) \ do { \ rcu_sched_qs(); \ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 914655848ef6..664b580695d6 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,6 +30,7 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H +void rcu_softirq_qs(void); void rcu_note_context_switch(bool preempt); int rcu_needs_cpu(u64 basem, u64 *nextevt); void rcu_cpu_stall_reset(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0b42249e2e40..cb35a417d947 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -255,6 +255,13 @@ void rcu_bh_qs(void) } } +void rcu_softirq_qs(void) +{ + rcu_sched_qs(); + rcu_preempt_qs(); + rcu_preempt_deferred_qs(current); +} + /* * Steal a bit from the bottom of ->dynticks for idle entry/exit * control. Initially this is for TLB flushing. diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 025bd2e5592b..e02c882861eb 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -433,6 +433,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); /* Forward declarations for rcutree_plugin.h */ static void rcu_bootup_announce(void); +static void rcu_preempt_qs(void); static void rcu_preempt_note_context_switch(bool preempt); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 527a52792dce..c686bf63bba5 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -974,6 +974,11 @@ static void __init rcu_bootup_announce(void) rcu_bootup_announce_oddness(); } +/* Because preemptible RCU does not exist, we can ignore its QSes. */ +static void rcu_preempt_qs(void) +{ +} + /* * Because preemptible RCU does not exist, we never have to check for * CPUs being in quiescent states. diff --git a/kernel/softirq.c b/kernel/softirq.c index 6f584861d329..ebd69694144a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -302,6 +302,8 @@ restart: } rcu_bh_qs(); + if (__this_cpu_read(ksoftirqd) == current) + rcu_softirq_qs(); local_irq_disable(); pending = local_softirq_pending(); -- cgit v1.2.3 From ba1c64c27239373be1b3d88cf0a9ac1b10fa871f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 30 Jun 2018 15:23:37 -0700 Subject: rcu: Report expedited grace periods at context-switch time This commit reduces the latency of expedited RCU grace periods by reporting a quiescent state for the CPU at context-switch time. In CONFIG_PREEMPT=y kernels, if the outgoing task is still within an RCU read-side critical section (and thus still blocking some grace period, perhaps including this expedited grace period), then that task will already have been placed on one of the leaf rcu_node structures' ->blkd_tasks list. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c686bf63bba5..0d7107fb3dec 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -332,7 +332,7 @@ static void rcu_preempt_qs(void) static void rcu_preempt_note_context_switch(bool preempt) { struct task_struct *t = current; - struct rcu_data *rdp; + struct rcu_data *rdp = this_cpu_ptr(rcu_state_p->rda); struct rcu_node *rnp; lockdep_assert_irqs_disabled(); @@ -341,7 +341,6 @@ static void rcu_preempt_note_context_switch(bool preempt) !t->rcu_read_unlock_special.b.blocked) { /* Possibly blocking in an RCU read-side critical section. */ - rdp = this_cpu_ptr(rcu_state_p->rda); rnp = rdp->mynode; raw_spin_lock_rcu_node(rnp); t->rcu_read_unlock_special.b.blocked = true; @@ -383,6 +382,8 @@ static void rcu_preempt_note_context_switch(bool preempt) * means that we continue to block the current grace period. */ rcu_preempt_qs(); + if (rdp->deferred_qs) + rcu_report_exp_rdp(rcu_state_p, rdp, true); } /* -- cgit v1.2.3 From 65cfe3583b612a22e12fba9a7bbd2d37ca5ad941 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 1 Jul 2018 07:40:52 -0700 Subject: rcu: Define RCU-bh update API in terms of RCU Now that the main RCU API knows about softirq disabling and softirq's quiescent states, the RCU-bh update code can be dispensed with. This commit therefore removes the RCU-bh update-side implementation and defines RCU-bh's update-side API in terms of that of either RCU-preempt or RCU-sched, depending on the setting of the CONFIG_PREEMPT Kconfig option. In kernels built with CONFIG_RCU_NOCB_CPU=y this has the knock-on effect of reducing by one the number of rcuo kthreads per CPU. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 10 +++-- include/linux/rcutiny.h | 10 +++-- include/linux/rcutree.h | 8 +++- kernel/rcu/tiny.c | 115 +++++++++-------------------------------------- kernel/rcu/tree.c | 97 ++++----------------------------------- kernel/rcu/tree_plugin.h | 1 - kernel/softirq.c | 1 - 7 files changed, 48 insertions(+), 194 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 75e5b393cf44..9ebfd436cec7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -55,11 +55,15 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); #define call_rcu call_rcu_sched #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ -void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); void rcu_barrier_tasks(void); +static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) +{ + call_rcu(head, func); +} + #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); @@ -104,7 +108,6 @@ static inline int rcu_preempt_depth(void) void rcu_init(void); extern int rcu_scheduler_active __read_mostly; void rcu_sched_qs(void); -void rcu_bh_qs(void); void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); @@ -326,8 +329,7 @@ static inline void rcu_preempt_sleep_check(void) { } * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of * multiple flavors of pointers to match the multiple flavors of RCU - * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in - * the future. + * (e.g., __rcu_sched, and __srcu), should this make sense in the future. */ #ifdef __CHECKER__ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index bcfbc40a7239..ac26c27ccde8 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -56,19 +56,23 @@ static inline void cond_synchronize_sched(unsigned long oldstate) might_sleep(); } -extern void rcu_barrier_bh(void); -extern void rcu_barrier_sched(void); - static inline void synchronize_rcu_expedited(void) { synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ } +extern void rcu_barrier_sched(void); + static inline void rcu_barrier(void) { rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ } +static inline void rcu_barrier_bh(void) +{ + rcu_barrier(); +} + static inline void synchronize_rcu_bh(void) { synchronize_sched(); diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 664b580695d6..c789c302a2c9 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -45,7 +45,11 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(false); } -void synchronize_rcu_bh(void); +static inline void synchronize_rcu_bh(void) +{ + synchronize_rcu(); +} + void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); @@ -69,7 +73,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); */ static inline void synchronize_rcu_bh_expedited(void) { - synchronize_sched_expedited(); + synchronize_rcu_expedited(); } void rcu_barrier(void); diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index befc9321a89c..cadcf63c4889 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -51,64 +51,22 @@ static struct rcu_ctrlblk rcu_sched_ctrlblk = { .curtail = &rcu_sched_ctrlblk.rcucblist, }; -static struct rcu_ctrlblk rcu_bh_ctrlblk = { - .donetail = &rcu_bh_ctrlblk.rcucblist, - .curtail = &rcu_bh_ctrlblk.rcucblist, -}; - -void rcu_barrier_bh(void) -{ - wait_rcu_gp(call_rcu_bh); -} -EXPORT_SYMBOL(rcu_barrier_bh); - void rcu_barrier_sched(void) { wait_rcu_gp(call_rcu_sched); } EXPORT_SYMBOL(rcu_barrier_sched); -/* - * Helper function for rcu_sched_qs() and rcu_bh_qs(). - * Also irqs are disabled to avoid confusion due to interrupt handlers - * invoking call_rcu(). - */ -static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) -{ - if (rcp->donetail != rcp->curtail) { - rcp->donetail = rcp->curtail; - return 1; - } - - return 0; -} - -/* - * Record an rcu quiescent state. And an rcu_bh quiescent state while we - * are at it, given that any rcu quiescent state is also an rcu_bh - * quiescent state. Use "+" instead of "||" to defeat short circuiting. - */ +/* Record an rcu quiescent state. */ void rcu_sched_qs(void) { unsigned long flags; local_irq_save(flags); - if (rcu_qsctr_help(&rcu_sched_ctrlblk) + - rcu_qsctr_help(&rcu_bh_ctrlblk)) - raise_softirq(RCU_SOFTIRQ); - local_irq_restore(flags); -} - -/* - * Record an rcu_bh quiescent state. - */ -void rcu_bh_qs(void) -{ - unsigned long flags; - - local_irq_save(flags); - if (rcu_qsctr_help(&rcu_bh_ctrlblk)) + if (rcu_sched_ctrlblk.donetail != rcu_sched_ctrlblk.curtail) { + rcu_sched_ctrlblk.donetail = rcu_sched_ctrlblk.curtail; raise_softirq(RCU_SOFTIRQ); + } local_irq_restore(flags); } @@ -122,32 +80,27 @@ void rcu_check_callbacks(int user) { if (user) rcu_sched_qs(); - if (user || !in_softirq()) - rcu_bh_qs(); } -/* - * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure - * whose grace period has elapsed. - */ -static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) +/* Invoke the RCU callbacks whose grace period has elapsed. */ +static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) { struct rcu_head *next, *list; unsigned long flags; /* Move the ready-to-invoke callbacks to a local list. */ local_irq_save(flags); - if (rcp->donetail == &rcp->rcucblist) { + if (rcu_sched_ctrlblk.donetail == &rcu_sched_ctrlblk.rcucblist) { /* No callbacks ready, so just leave. */ local_irq_restore(flags); return; } - list = rcp->rcucblist; - rcp->rcucblist = *rcp->donetail; - *rcp->donetail = NULL; - if (rcp->curtail == rcp->donetail) - rcp->curtail = &rcp->rcucblist; - rcp->donetail = &rcp->rcucblist; + list = rcu_sched_ctrlblk.rcucblist; + rcu_sched_ctrlblk.rcucblist = *rcu_sched_ctrlblk.donetail; + *rcu_sched_ctrlblk.donetail = NULL; + if (rcu_sched_ctrlblk.curtail == rcu_sched_ctrlblk.donetail) + rcu_sched_ctrlblk.curtail = &rcu_sched_ctrlblk.rcucblist; + rcu_sched_ctrlblk.donetail = &rcu_sched_ctrlblk.rcucblist; local_irq_restore(flags); /* Invoke the callbacks on the local list. */ @@ -162,19 +115,13 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) } } -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) -{ - __rcu_process_callbacks(&rcu_sched_ctrlblk); - __rcu_process_callbacks(&rcu_bh_ctrlblk); -} - /* * Wait for a grace period to elapse. But it is illegal to invoke * synchronize_sched() from within an RCU read-side critical section. * Therefore, any legal call to synchronize_sched() is a quiescent * state, and so on a UP system, synchronize_sched() need do nothing. - * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the - * benefits of doing might_sleep() to reduce latency.) + * (But Lai Jiangshan points out the benefits of doing might_sleep() + * to reduce latency.) * * Cool, huh? (Due to Josh Triplett.) */ @@ -188,11 +135,11 @@ void synchronize_sched(void) EXPORT_SYMBOL_GPL(synchronize_sched); /* - * Helper function for call_rcu() and call_rcu_bh(). + * Post an RCU callback to be invoked after the end of an RCU-sched grace + * period. But since we have but one CPU, that would be after any + * quiescent state. */ -static void __call_rcu(struct rcu_head *head, - rcu_callback_t func, - struct rcu_ctrlblk *rcp) +void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) { unsigned long flags; @@ -201,8 +148,8 @@ static void __call_rcu(struct rcu_head *head, head->next = NULL; local_irq_save(flags); - *rcp->curtail = head; - rcp->curtail = &head->next; + *rcu_sched_ctrlblk.curtail = head; + rcu_sched_ctrlblk.curtail = &head->next; local_irq_restore(flags); if (unlikely(is_idle_task(current))) { @@ -210,28 +157,8 @@ static void __call_rcu(struct rcu_head *head, resched_cpu(0); } } - -/* - * Post an RCU callback to be invoked after the end of an RCU-sched grace - * period. But since we have but one CPU, that would be after any - * quiescent state. - */ -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - __call_rcu(head, func, &rcu_sched_ctrlblk); -} EXPORT_SYMBOL_GPL(call_rcu_sched); -/* - * Post an RCU bottom-half callback to be invoked after any subsequent - * quiescent state. - */ -void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) -{ - __call_rcu(head, func, &rcu_bh_ctrlblk); -} -EXPORT_SYMBOL_GPL(call_rcu_bh); - void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index cb35a417d947..aedf81a0abd8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -108,7 +108,6 @@ struct rcu_state sname##_state = { \ } RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); -RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); static struct rcu_state *const rcu_state_p; LIST_HEAD(rcu_struct_flavors); @@ -244,17 +243,6 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } -void rcu_bh_qs(void) -{ - RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); - if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { - trace_rcu_grace_period(TPS("rcu_bh"), - __this_cpu_read(rcu_bh_data.gp_seq), - TPS("cpuqs")); - __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); - } -} - void rcu_softirq_qs(void) { rcu_sched_qs(); @@ -581,7 +569,7 @@ EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); */ unsigned long rcu_bh_get_gp_seq(void) { - return READ_ONCE(rcu_bh_state.gp_seq); + return READ_ONCE(rcu_state_p->gp_seq); } EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); @@ -621,7 +609,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); */ void rcu_bh_force_quiescent_state(void) { - force_quiescent_state(&rcu_bh_state); + force_quiescent_state(rcu_state_p); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); @@ -680,10 +668,8 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, switch (test_type) { case RCU_FLAVOR: - rsp = rcu_state_p; - break; case RCU_BH_FLAVOR: - rsp = &rcu_bh_state; + rsp = rcu_state_p; break; case RCU_SCHED_FLAVOR: rsp = &rcu_sched_state; @@ -2673,26 +2659,15 @@ void rcu_check_callbacks(int user) * nested interrupt. In this case, the CPU is in * a quiescent state, so note it. * - * No memory barrier is required here because both - * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local - * variables that other CPUs neither access nor modify, - * at least not while the corresponding CPU is online. + * No memory barrier is required here because + * rcu_sched_qs() references only CPU-local variables + * that other CPUs neither access nor modify, at least + * not while the corresponding CPU is online. */ rcu_sched_qs(); - rcu_bh_qs(); rcu_note_voluntary_context_switch(current); - } else if (!in_softirq()) { - - /* - * Get here if this CPU did not take its interrupt from - * softirq, in other words, if it is not interrupting - * a rcu_bh read-side critical section. This is an _bh - * critical section, so note it. - */ - - rcu_bh_qs(); } rcu_preempt_check_callbacks(); if (rcu_pending()) @@ -3079,34 +3054,6 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(call_rcu_sched); -/** - * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * The callback function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_bh() assumes - * that the read-side critical sections end on completion of a softirq - * handler. This means that read-side critical sections in process - * context must not be interrupted by softirqs. This interface is to be - * used when most of the read-side critical sections are in softirq context. - * RCU read-side critical sections are delimited by: - * - * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR - * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. - * - * These may be nested. - * - * See the description of call_rcu() for more detailed information on - * memory ordering guarantees. - */ -void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) -{ - __call_rcu(head, func, &rcu_bh_state, -1, 0); -} -EXPORT_SYMBOL_GPL(call_rcu_bh); - /* * Queue an RCU callback for lazy invocation after a grace period. * This will likely be later named something like "call_rcu_lazy()", @@ -3191,33 +3138,6 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); -/** - * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. - * - * Control will return to the caller some time after a full rcu_bh grace - * period has elapsed, in other words after all currently executing rcu_bh - * read-side critical sections have completed. RCU read-side critical - * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), - * and may be nested. - * - * See the description of synchronize_sched() for more detailed information - * on memory ordering guarantees. - */ -void synchronize_rcu_bh(void) -{ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || - lock_is_held(&rcu_lock_map) || - lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section"); - if (rcu_blocking_is_gp()) - return; - if (rcu_gp_is_expedited()) - synchronize_rcu_bh_expedited(); - else - wait_rcu_gp(call_rcu_bh); -} -EXPORT_SYMBOL_GPL(synchronize_rcu_bh); - /** * get_state_synchronize_rcu - Snapshot current RCU state * @@ -3529,7 +3449,7 @@ static void _rcu_barrier(struct rcu_state *rsp) */ void rcu_barrier_bh(void) { - _rcu_barrier(&rcu_bh_state); + _rcu_barrier(rcu_state_p); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -4180,7 +4100,6 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); - rcu_init_one(&rcu_bh_state); rcu_init_one(&rcu_sched_state); if (dump_tree) rcu_dump_rcu_node_tree(&rcu_sched_state); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0d7107fb3dec..1ff742a3c8d1 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1320,7 +1320,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, static void rcu_kthread_do_work(void) { rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); - rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); } diff --git a/kernel/softirq.c b/kernel/softirq.c index ebd69694144a..7a0720a20003 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -301,7 +301,6 @@ restart: pending >>= softirq_bit; } - rcu_bh_qs(); if (__this_cpu_read(ksoftirqd) == current) rcu_softirq_qs(); local_irq_disable(); -- cgit v1.2.3 From 82fcecfa81855924cc69f3078113cf63dd6c2964 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 2 Jul 2018 09:04:27 -0700 Subject: rcu: Update comments and help text for no more RCU-bh updaters This commit updates comments and help text to account for the fact that RCU-bh update-side functions are now simple wrappers for their RCU or RCU-sched counterparts. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 12 ++++-------- include/linux/rcupdate_wait.h | 6 +++--- include/linux/rcutree.h | 14 ++------------ kernel/rcu/Kconfig | 10 +++++----- kernel/rcu/tree.c | 17 +++++++++-------- kernel/rcu/update.c | 2 +- 6 files changed, 24 insertions(+), 37 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9ebfd436cec7..8d5740edd63c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -688,14 +688,10 @@ static inline void rcu_read_unlock(void) /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * - * This is equivalent of rcu_read_lock(), but to be used when updates - * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since - * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a - * softirq handler to be a quiescent state, a process in RCU read-side - * critical section must be protected by disabling softirqs. Read-side - * critical sections in interrupt context can use just rcu_read_lock(), - * though this should at least be commented to avoid confusing people - * reading the code. + * This is equivalent of rcu_read_lock(), but also disables softirqs. + * Note that synchronize_rcu() and friends may be used for the update + * side, although synchronize_rcu_bh() is available as a wrapper in the + * short term. Longer term, the _bh update-side API will be eliminated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 57f371344152..bc104699560e 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -36,13 +36,13 @@ do { \ * @...: List of call_rcu() functions for the flavors to wait on. * * This macro waits concurrently for multiple flavors of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait - * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * For example, synchronize_rcu_mult(call_rcu, call_rcu_sched) would wait + * on concurrent RCU and RCU-sched grace periods. Waiting on a give SRCU * domain requires you to write a wrapper function for that SRCU domain's * call_srcu() function, supplying the corresponding srcu_struct. * * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU - * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called + * or RCU-sched, given that anywhere synchronize_rcu_mult() can be called * is automatically a grace period. */ #define synchronize_rcu_mult(...) \ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c789c302a2c9..f7a41323aa54 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -58,18 +58,8 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); /** * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period * - * Wait for an RCU-bh grace period to elapse, but use a "big hammer" - * approach to force the grace period to end quickly. This consumes - * significant time on all CPUs and is unfriendly to real-time workloads, - * so is thus not recommended for any sort of common-case code. In fact, - * if you are using synchronize_rcu_bh_expedited() in a loop, please - * restructure your code to batch your updates, and then use a single - * synchronize_rcu_bh() instead. - * - * Note that it is illegal to call this function while holding any lock - * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal - * to call this function from a CPU-hotplug notifier. Failing to observe - * these restriction will result in deadlock. + * This is a transitional API and will soon be removed, with all + * callers converted to synchronize_rcu_expedited(). */ static inline void synchronize_rcu_bh_expedited(void) { diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index 9210379c0353..a0b7f0103ca9 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -229,11 +229,11 @@ config RCU_NOCB_CPU CPUs specified at boot time by the rcu_nocbs parameter. For each such CPU, a kthread ("rcuox/N") will be created to invoke callbacks, where the "N" is the CPU being offloaded, - and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and - "s" for RCU-sched. Nothing prevents this kthread from running - on the specified CPUs, but (1) the kthreads may be preempted - between each callback, and (2) affinity or cgroups can be used - to force the kthreads to run on whatever set of CPUs is desired. + and where the "p" for RCU-preempt and "s" for RCU-sched. + Nothing prevents this kthread from running on the specified + CPUs, but (1) the kthreads may be preempted between each + callback, and (2) affinity or cgroups can be used to force + the kthreads to run on whatever set of CPUs is desired. Say Y here if you want to help to debug reduced OS jitter. Say N here if you are unsure. diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index aedf81a0abd8..158c58d47b07 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -565,7 +565,8 @@ unsigned long rcu_sched_get_gp_seq(void) EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); /* - * Return the number of RCU-bh GPs completed thus far for debug & stats. + * Return the number of RCU GPs completed thus far for debug & stats. + * This is a transitional API and will soon be removed. */ unsigned long rcu_bh_get_gp_seq(void) { @@ -3069,13 +3070,13 @@ void kfree_call_rcu(struct rcu_head *head, EXPORT_SYMBOL_GPL(kfree_call_rcu); /* - * Because a context switch is a grace period for RCU-sched and RCU-bh, - * any blocking grace-period wait automatically implies a grace period - * if there is only one CPU online at any point time during execution - * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to - * occasionally incorrectly indicate that there are multiple CPUs online - * when there was in fact only one the whole time, as this just adds - * some overhead: RCU still operates correctly. + * Because a context switch is a grace period for RCU-sched, any blocking + * grace-period wait automatically implies a grace period if there + * is only one CPU online at any point time during execution of either + * synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally + * incorrectly indicate that there are multiple CPUs online when there + * was in fact only one the whole time, as this just adds some overhead: + * RCU still operates correctly. */ static int rcu_blocking_is_gp(void) { diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 39cb23d22109..9ea87d0aa386 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held); * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. * - * Note that rcu_read_lock() is disallowed if the CPU is either idle or + * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or * offline from an RCU perspective, so check for those as well. */ int rcu_read_lock_bh_held(void) -- cgit v1.2.3 From 2bbfc25b09dff6335acf4103c6c7c4591e62988b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 2 Jul 2018 09:17:57 -0700 Subject: rcu: Drop "wake" parameter from rcu_report_exp_rdp() The rcu_report_exp_rdp() function is always invoked with its "wake" argument set to "true", so this commit drops this parameter. The only potential call site that would use "false" is in the code driving the expedited grace period, and that code uses rcu_report_exp_cpu_mult() instead, which therefore retains its "wake" parameter. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 9 +++------ kernel/rcu/tree_exp.h | 9 ++++----- kernel/rcu/tree_plugin.h | 6 +++--- 3 files changed, 10 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 158c58d47b07..e1927147a4a5 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -165,8 +165,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); -static void rcu_report_exp_rdp(struct rcu_state *rsp, - struct rcu_data *rdp, bool wake); +static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp); static void sync_sched_exp_online_cleanup(int cpu); /* rcuc/rcub kthread realtime priority */ @@ -239,8 +238,7 @@ void rcu_sched_qs(void) if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) return; __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); - rcu_report_exp_rdp(&rcu_sched_state, - this_cpu_ptr(&rcu_sched_data), true); + rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); } void rcu_softirq_qs(void) @@ -3758,8 +3756,7 @@ void rcu_report_dead(unsigned int cpu) /* QS for any half-done expedited RCU-sched GP. */ preempt_disable(); - rcu_report_exp_rdp(&rcu_sched_state, - this_cpu_ptr(rcu_sched_state.rda), true); + rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(rcu_sched_state.rda)); preempt_enable(); rcu_preempt_deferred_qs(current); for_each_rcu_flavor(rsp) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index f9d5bbd8adce..0f8f225c1b46 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -259,11 +259,10 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, /* * Report expedited quiescent state for specified rcu_data (CPU). */ -static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, - bool wake) +static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp) { WRITE_ONCE(rdp->deferred_qs, false); - rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); + rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true); } /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ @@ -352,7 +351,7 @@ static void sync_sched_exp_handler(void *data) return; if (rcu_is_cpu_rrupt_from_idle()) { rcu_report_exp_rdp(&rcu_sched_state, - this_cpu_ptr(&rcu_sched_data), true); + this_cpu_ptr(&rcu_sched_data)); return; } __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true); @@ -750,7 +749,7 @@ static void sync_rcu_exp_handler(void *info) if (!t->rcu_read_lock_nesting) { if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || rcu_dynticks_curr_cpu_in_eqs()) { - rcu_report_exp_rdp(rsp, rdp, true); + rcu_report_exp_rdp(rsp, rdp); } else { rdp->deferred_qs = true; resched_cpu(rdp->cpu); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 1ff742a3c8d1..9f0d054e6c20 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -285,7 +285,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) * still in a quiescent state in any case.) */ if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs) - rcu_report_exp_rdp(rdp->rsp, rdp, true); + rcu_report_exp_rdp(rdp->rsp, rdp); else WARN_ON_ONCE(rdp->deferred_qs); } @@ -383,7 +383,7 @@ static void rcu_preempt_note_context_switch(bool preempt) */ rcu_preempt_qs(); if (rdp->deferred_qs) - rcu_report_exp_rdp(rcu_state_p, rdp, true); + rcu_report_exp_rdp(rcu_state_p, rdp); } /* @@ -508,7 +508,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * blocked-tasks list below. */ if (rdp->deferred_qs) { - rcu_report_exp_rdp(rcu_state_p, rdp, true); + rcu_report_exp_rdp(rcu_state_p, rdp); if (!t->rcu_read_unlock_special.s) { local_irq_restore(flags); return; -- cgit v1.2.3 From 4cf439a200fd621f838270c36c853407a934bcb5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 2 Jul 2018 12:15:25 -0700 Subject: rcu: Fix typo in rcu_get_gp_kthreads_prio() header comment Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e1927147a4a5..61c15de884b0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -181,7 +181,7 @@ module_param(gp_init_delay, int, 0444); static int gp_cleanup_delay; module_param(gp_cleanup_delay, int, 0444); -/* Retreive RCU kthreads priority for rcutorture */ +/* Retrieve RCU kthreads priority for rcutorture */ int rcu_get_gp_kthreads_prio(void) { return kthread_prio; -- cgit v1.2.3 From 45975c7d21a1c0aba97e3d8007e2a7c123145748 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 2 Jul 2018 14:30:37 -0700 Subject: rcu: Define RCU-sched API in terms of RCU for Tree RCU PREEMPT builds Now that RCU-preempt knows about preemption disabling, its implementation of synchronize_rcu() works for synchronize_sched(), and likewise for the other RCU-sched update-side API members. This commit therefore confines the RCU-sched update-side code to CONFIG_PREEMPT=n builds, and defines RCU-sched's update-side API members in terms of those of RCU-preempt. This means that any given build of the Linux kernel has only one update-side flavor of RCU, namely RCU-preempt for CONFIG_PREEMPT=y builds and RCU-sched for CONFIG_PREEMPT=n builds. This in turn means that kernels built with CONFIG_RCU_NOCB_CPU=y have only one rcuo kthread per CPU. Signed-off-by: Paul E. McKenney Cc: Andi Kleen --- include/linux/rcupdate.h | 14 +-- include/linux/rcutiny.h | 7 ++ include/linux/rcutree.h | 7 +- kernel/rcu/tree.c | 301 ++++++++++++++++------------------------------- kernel/rcu/tree.h | 9 +- kernel/rcu/tree_exp.h | 153 ++++++++++++------------ kernel/rcu/tree_plugin.h | 297 +++++++++++++++++----------------------------- 7 files changed, 308 insertions(+), 480 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8d5740edd63c..94474bb6b5c4 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -49,11 +49,11 @@ /* Exported common interfaces */ -#ifdef CONFIG_PREEMPT_RCU -void call_rcu(struct rcu_head *head, rcu_callback_t func); -#else /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TINY_RCU #define call_rcu call_rcu_sched -#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +#else +void call_rcu(struct rcu_head *head, rcu_callback_t func); +#endif void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); @@ -92,11 +92,6 @@ static inline void __rcu_read_unlock(void) preempt_enable(); } -static inline void synchronize_rcu(void) -{ - synchronize_sched(); -} - static inline int rcu_preempt_depth(void) { return 0; @@ -107,7 +102,6 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active __read_mostly; -void rcu_sched_qs(void); void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index ac26c27ccde8..df2c0895c5e7 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -36,6 +36,11 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + static inline unsigned long get_state_synchronize_rcu(void) { return 0; @@ -94,6 +99,8 @@ static inline void kfree_call_rcu(struct rcu_head *head, call_rcu(head, func); } +void rcu_sched_qs(void); + static inline void rcu_softirq_qs(void) { rcu_sched_qs(); diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index f7a41323aa54..0c44720f0e84 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -45,14 +45,19 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(false); } +void synchronize_rcu(void); static inline void synchronize_rcu_bh(void) { synchronize_rcu(); } -void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); +static inline void synchronize_sched_expedited(void) +{ + synchronize_rcu_expedited(); +} + void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); /** diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 61c15de884b0..5f79315f094e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -92,24 +92,29 @@ static const char *tp_##sname##_varname __used __tracepoint_string = sname##_var #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ DEFINE_RCU_TPS(sname) \ -static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \ -struct rcu_state sname##_state = { \ - .level = { &sname##_state.node[0] }, \ - .rda = &sname##_data, \ +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data); \ +struct rcu_state rcu_state = { \ + .level = { &rcu_state.node[0] }, \ + .rda = &rcu_data, \ .call = cr, \ .gp_state = RCU_GP_IDLE, \ .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \ - .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ + .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), \ .name = RCU_STATE_NAME(sname), \ .abbr = sabbr, \ - .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \ - .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \ - .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \ + .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), \ + .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), \ + .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), \ } -RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); +#ifdef CONFIG_PREEMPT_RCU +RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); +#else +RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu); +#endif -static struct rcu_state *const rcu_state_p; +static struct rcu_state *const rcu_state_p = &rcu_state; +static struct rcu_data __percpu *const rcu_data_p = &rcu_data; LIST_HEAD(rcu_struct_flavors); /* Dump rcu_node combining tree at boot to verify correct setup. */ @@ -220,31 +225,9 @@ static int rcu_gp_in_progress(struct rcu_state *rsp) return rcu_seq_state(rcu_seq_current(&rsp->gp_seq)); } -/* - * Note a quiescent state. Because we do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period, this just sets a flag. - * The caller must have disabled preemption. - */ -void rcu_sched_qs(void) -{ - RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!"); - if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) - return; - trace_rcu_grace_period(TPS("rcu_sched"), - __this_cpu_read(rcu_sched_data.gp_seq), - TPS("cpuqs")); - __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); - if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) - return; - __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); - rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); -} - void rcu_softirq_qs(void) { - rcu_sched_qs(); - rcu_preempt_qs(); + rcu_qs(); rcu_preempt_deferred_qs(current); } @@ -418,31 +401,18 @@ static void rcu_momentary_dyntick_idle(void) rcu_preempt_deferred_qs(current); } -/* - * Note a context switch. This is a quiescent state for RCU-sched, - * and requires special handling for preemptible RCU. - * The caller must have disabled interrupts. +/** + * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle + * + * If the current CPU is idle or running at a first-level (not nested) + * interrupt from idle, return true. The caller must have at least + * disabled preemption. */ -void rcu_note_context_switch(bool preempt) +static int rcu_is_cpu_rrupt_from_idle(void) { - barrier(); /* Avoid RCU read-side critical sections leaking down. */ - trace_rcu_utilization(TPS("Start context switch")); - rcu_sched_qs(); - rcu_preempt_note_context_switch(preempt); - /* Load rcu_urgent_qs before other flags. */ - if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) - goto out; - this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); - if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) - rcu_momentary_dyntick_idle(); - this_cpu_inc(rcu_dynticks.rcu_qs_ctr); - if (!preempt) - rcu_tasks_qs(current); -out: - trace_rcu_utilization(TPS("End context switch")); - barrier(); /* Avoid RCU read-side critical sections leaking up. */ + return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 && + __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1; } -EXPORT_SYMBOL_GPL(rcu_note_context_switch); /* * Register a quiescent state for all RCU flavors. If there is an @@ -476,8 +446,8 @@ void rcu_all_qs(void) rcu_momentary_dyntick_idle(); local_irq_restore(flags); } - if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) - rcu_sched_qs(); + if (unlikely(raw_cpu_read(rcu_data.cpu_no_qs.b.exp))) + rcu_qs(); this_cpu_inc(rcu_dynticks.rcu_qs_ctr); barrier(); /* Avoid RCU read-side critical sections leaking up. */ preempt_enable(); @@ -558,7 +528,7 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_seq); */ unsigned long rcu_sched_get_gp_seq(void) { - return READ_ONCE(rcu_sched_state.gp_seq); + return rcu_get_gp_seq(); } EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); @@ -590,7 +560,7 @@ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); */ unsigned long rcu_exp_batches_completed_sched(void) { - return rcu_sched_state.expedited_sequence; + return rcu_state.expedited_sequence; } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); @@ -617,7 +587,7 @@ EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); */ void rcu_sched_force_quiescent_state(void) { - force_quiescent_state(&rcu_sched_state); + rcu_force_quiescent_state(); } EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); @@ -668,10 +638,8 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, switch (test_type) { case RCU_FLAVOR: case RCU_BH_FLAVOR: - rsp = rcu_state_p; - break; case RCU_SCHED_FLAVOR: - rsp = &rcu_sched_state; + rsp = rcu_state_p; break; default: break; @@ -1107,19 +1075,6 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ -/** - * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle - * - * If the current CPU is idle or running at a first-level (not nested) - * interrupt from idle, return true. The caller must have at least - * disabled preemption. - */ -static int rcu_is_cpu_rrupt_from_idle(void) -{ - return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 && - __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1; -} - /* * We are reporting a quiescent state on behalf of some other CPU, so * it is our responsibility to check for and handle potential overflow @@ -2364,7 +2319,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp, struct rcu_node *rnp_p; raw_lockdep_assert_held_rcu_node(rnp); - if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) || + if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) || WARN_ON_ONCE(rsp != rcu_state_p) || WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || rnp->qsmask != 0) { @@ -2650,25 +2605,7 @@ void rcu_check_callbacks(int user) { trace_rcu_utilization(TPS("Start scheduler-tick")); increment_cpu_stall_ticks(); - if (user || rcu_is_cpu_rrupt_from_idle()) { - - /* - * Get here if this CPU took its interrupt from user - * mode or from the idle loop, and if this is not a - * nested interrupt. In this case, the CPU is in - * a quiescent state, so note it. - * - * No memory barrier is required here because - * rcu_sched_qs() references only CPU-local variables - * that other CPUs neither access nor modify, at least - * not while the corresponding CPU is online. - */ - - rcu_sched_qs(); - rcu_note_voluntary_context_switch(current); - - } - rcu_preempt_check_callbacks(); + rcu_flavor_check_callbacks(user); if (rcu_pending()) invoke_rcu_core(); @@ -2694,7 +2631,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) mask = 0; raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->qsmask == 0) { - if (rcu_state_p == &rcu_sched_state || + if (!IS_ENABLED(CONFIG_PREEMPT) || rsp != rcu_state_p || rcu_preempt_blocked_readers_cgp(rnp)) { /* @@ -3028,28 +2965,56 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, } /** - * call_rcu_sched() - Queue an RCU for invocation after sched grace period. + * call_rcu() - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * * The callback function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_sched() assumes - * that the read-side critical sections end on enabling of preemption - * or on voluntary preemption. - * RCU read-side critical sections are delimited by: - * - * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR - * - anything that disables preemption. - * - * These may be nested. + * period elapses, in other words after all pre-existing RCU read-side + * critical sections have completed. However, the callback function + * might well execute concurrently with RCU read-side critical sections + * that started after call_rcu() was invoked. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and + * may be nested. In addition, regions of code across which interrupts, + * preemption, or softirqs have been disabled also serve as RCU read-side + * critical sections. This includes hardware interrupt handlers, softirq + * handlers, and NMI handlers. + * + * Note that all CPUs must agree that the grace period extended beyond + * all pre-existing RCU read-side critical section. On systems with more + * than one CPU, this means that when "func()" is invoked, each CPU is + * guaranteed to have executed a full memory barrier since the end of its + * last RCU read-side critical section whose beginning preceded the call + * to call_rcu(). It also means that each CPU executing an RCU read-side + * critical section that continues beyond the start of "func()" must have + * executed a memory barrier after the call_rcu() but before the beginning + * of that RCU read-side critical section. Note that these guarantees + * include CPUs that are offline, idle, or executing in user mode, as + * well as CPUs that are executing in the kernel. + * + * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the + * resulting RCU callback function "func()", then both CPU A and CPU B are + * guaranteed to execute a full memory barrier during the time interval + * between the call to call_rcu() and the invocation of "func()" -- even + * if CPU A and CPU B are the same CPU (but again only if the system has + * more than one CPU). + */ +void call_rcu(struct rcu_head *head, rcu_callback_t func) +{ + __call_rcu(head, func, rcu_state_p, -1, 0); +} +EXPORT_SYMBOL_GPL(call_rcu); + +/** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period * - * See the description of call_rcu() for more detailed information on - * memory ordering guarantees. + * This is transitional. */ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) { - __call_rcu(head, func, &rcu_sched_state, -1, 0); + call_rcu(head, func); } EXPORT_SYMBOL_GPL(call_rcu_sched); @@ -3067,73 +3032,14 @@ void kfree_call_rcu(struct rcu_head *head, } EXPORT_SYMBOL_GPL(kfree_call_rcu); -/* - * Because a context switch is a grace period for RCU-sched, any blocking - * grace-period wait automatically implies a grace period if there - * is only one CPU online at any point time during execution of either - * synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally - * incorrectly indicate that there are multiple CPUs online when there - * was in fact only one the whole time, as this just adds some overhead: - * RCU still operates correctly. - */ -static int rcu_blocking_is_gp(void) -{ - int ret; - - might_sleep(); /* Check for RCU read-side critical section. */ - preempt_disable(); - ret = num_online_cpus() <= 1; - preempt_enable(); - return ret; -} - /** * synchronize_sched - wait until an rcu-sched grace period has elapsed. * - * Control will return to the caller some time after a full rcu-sched - * grace period has elapsed, in other words after all currently executing - * rcu-sched read-side critical sections have completed. These read-side - * critical sections are delimited by rcu_read_lock_sched() and - * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), - * local_irq_disable(), and so on may be used in place of - * rcu_read_lock_sched(). - * - * This means that all preempt_disable code sequences, including NMI and - * non-threaded hardware-interrupt handlers, in progress on entry will - * have completed before this primitive returns. However, this does not - * guarantee that softirq handlers will have completed, since in some - * kernels, these handlers can run in process context, and can block. - * - * Note that this guarantee implies further memory-ordering guarantees. - * On systems with more than one CPU, when synchronize_sched() returns, - * each CPU is guaranteed to have executed a full memory barrier since the - * end of its last RCU-sched read-side critical section whose beginning - * preceded the call to synchronize_sched(). In addition, each CPU having - * an RCU read-side critical section that extends beyond the return from - * synchronize_sched() is guaranteed to have executed a full memory barrier - * after the beginning of synchronize_sched() and before the beginning of - * that RCU read-side critical section. Note that these guarantees include - * CPUs that are offline, idle, or executing in user mode, as well as CPUs - * that are executing in the kernel. - * - * Furthermore, if CPU A invoked synchronize_sched(), which returned - * to its caller on CPU B, then both CPU A and CPU B are guaranteed - * to have executed a full memory barrier during the execution of - * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but - * again only if the system has more than one CPU). + * This is transitional. */ void synchronize_sched(void) { - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || - lock_is_held(&rcu_lock_map) || - lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_sched() in RCU-sched read-side critical section"); - if (rcu_blocking_is_gp()) - return; - if (rcu_gp_is_expedited()) - synchronize_sched_expedited(); - else - wait_rcu_gp(call_rcu_sched); + synchronize_rcu(); } EXPORT_SYMBOL_GPL(synchronize_sched); @@ -3181,41 +3087,23 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu); /** * get_state_synchronize_sched - Snapshot current RCU-sched state * - * Returns a cookie that is used by a later call to cond_synchronize_sched() - * to determine whether or not a full grace period has elapsed in the - * meantime. + * This is transitional, and only used by rcutorture. */ unsigned long get_state_synchronize_sched(void) { - /* - * Any prior manipulation of RCU-protected data must happen - * before the load from ->gp_seq. - */ - smp_mb(); /* ^^^ */ - return rcu_seq_snap(&rcu_sched_state.gp_seq); + return get_state_synchronize_rcu(); } EXPORT_SYMBOL_GPL(get_state_synchronize_sched); /** * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period - * * @oldstate: return value from earlier call to get_state_synchronize_sched() * - * If a full RCU-sched grace period has elapsed since the earlier call to - * get_state_synchronize_sched(), just return. Otherwise, invoke - * synchronize_sched() to wait for a full grace period. - * - * Yes, this function does not take counter wrap into account. But - * counter wrap is harmless. If the counter wraps, we have waited for - * more than 2 billion grace periods (and way more on a 64-bit system!), - * so waiting for one additional grace period should be just fine. + * This is transitional and only used by rcutorture. */ void cond_synchronize_sched(unsigned long oldstate) { - if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate)) - synchronize_sched(); - else - smp_mb(); /* Ensure GP ends before subsequent accesses. */ + cond_synchronize_rcu(oldstate); } EXPORT_SYMBOL_GPL(cond_synchronize_sched); @@ -3452,12 +3340,28 @@ void rcu_barrier_bh(void) } EXPORT_SYMBOL_GPL(rcu_barrier_bh); +/** + * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. + * + * Note that this primitive does not necessarily wait for an RCU grace period + * to complete. For example, if there are no RCU callbacks queued anywhere + * in the system, then rcu_barrier() is within its rights to return + * immediately, without waiting for anything, much less an RCU grace period. + */ +void rcu_barrier(void) +{ + _rcu_barrier(rcu_state_p); +} +EXPORT_SYMBOL_GPL(rcu_barrier); + /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. + * + * This is transitional. */ void rcu_barrier_sched(void) { - _rcu_barrier(&rcu_sched_state); + rcu_barrier(); } EXPORT_SYMBOL_GPL(rcu_barrier_sched); @@ -3756,7 +3660,7 @@ void rcu_report_dead(unsigned int cpu) /* QS for any half-done expedited RCU-sched GP. */ preempt_disable(); - rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(rcu_sched_state.rda)); + rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(rcu_state.rda)); preempt_enable(); rcu_preempt_deferred_qs(current); for_each_rcu_flavor(rsp) @@ -4098,10 +4002,9 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); - rcu_init_one(&rcu_sched_state); + rcu_init_one(&rcu_state); if (dump_tree) - rcu_dump_rcu_node_tree(&rcu_sched_state); - __rcu_init_preempt(); + rcu_dump_rcu_node_tree(&rcu_state); open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e02c882861eb..38658ca87dcb 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -225,9 +225,6 @@ struct rcu_data { /* 5) _rcu_barrier(), OOM callbacks, and expediting. */ struct rcu_head barrier_head; -#ifdef CONFIG_RCU_FAST_NO_HZ - struct rcu_head oom_head; -#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ int exp_dynticks_snap; /* Double-check need for IPI. */ /* 6) Callback offloading. */ @@ -433,8 +430,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); /* Forward declarations for rcutree_plugin.h */ static void rcu_bootup_announce(void); -static void rcu_preempt_qs(void); -static void rcu_preempt_note_context_switch(bool preempt); +static void rcu_qs(void); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static bool rcu_preempt_has_tasks(struct rcu_node *rnp); @@ -444,9 +440,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp); static int rcu_print_task_exp_stall(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp); -static void rcu_preempt_check_callbacks(void); +static void rcu_flavor_check_callbacks(int user); void call_rcu(struct rcu_head *head, rcu_callback_t func); -static void __init __rcu_init_preempt(void); static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 0f8f225c1b46..5619edfd414e 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -265,7 +265,7 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp) rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true); } -/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ +/* Common code for work-done checking. */ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s) { if (rcu_exp_gp_seq_done(rsp, s)) { @@ -337,45 +337,6 @@ fastpath: return false; } -/* Invoked on each online non-idle CPU for expedited quiescent state. */ -static void sync_sched_exp_handler(void *data) -{ - struct rcu_data *rdp; - struct rcu_node *rnp; - struct rcu_state *rsp = data; - - rdp = this_cpu_ptr(rsp->rda); - rnp = rdp->mynode; - if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || - __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) - return; - if (rcu_is_cpu_rrupt_from_idle()) { - rcu_report_exp_rdp(&rcu_sched_state, - this_cpu_ptr(&rcu_sched_data)); - return; - } - __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true); - /* Store .exp before .rcu_urgent_qs. */ - smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); - resched_cpu(smp_processor_id()); -} - -/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ -static void sync_sched_exp_online_cleanup(int cpu) -{ - struct rcu_data *rdp; - int ret; - struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_sched_state; - - rdp = per_cpu_ptr(rsp->rda, cpu); - rnp = rdp->mynode; - if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) - return; - ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); - WARN_ON_ONCE(ret); -} - /* * Select the CPUs within the specified rcu_node that the upcoming * expedited grace period needs to wait for. @@ -691,39 +652,6 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, mutex_unlock(&rsp->exp_mutex); } -/** - * synchronize_sched_expedited - Brute-force RCU-sched grace period - * - * Wait for an RCU-sched grace period to elapse, but use a "big hammer" - * approach to force the grace period to end quickly. This consumes - * significant time on all CPUs and is unfriendly to real-time workloads, - * so is thus not recommended for any sort of common-case code. In fact, - * if you are using synchronize_sched_expedited() in a loop, please - * restructure your code to batch your updates, and then use a single - * synchronize_sched() instead. - * - * This implementation can be thought of as an application of sequence - * locking to expedited grace periods, but using the sequence counter to - * determine when someone else has already done the work instead of for - * retrying readers. - */ -void synchronize_sched_expedited(void) -{ - struct rcu_state *rsp = &rcu_sched_state; - - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || - lock_is_held(&rcu_lock_map) || - lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_sched_expedited() in RCU read-side critical section"); - - /* If only one CPU, this is automatically a grace period. */ - if (rcu_blocking_is_gp()) - return; - - _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); -} -EXPORT_SYMBOL_GPL(synchronize_sched_expedited); - #ifdef CONFIG_PREEMPT_RCU /* @@ -801,6 +729,11 @@ static void sync_rcu_exp_handler(void *info) resched_cpu(rdp->cpu); } +/* PREEMPT=y, so no RCU-sched to clean up after. */ +static void sync_sched_exp_online_cleanup(int cpu) +{ +} + /** * synchronize_rcu_expedited - Brute-force RCU grace period * @@ -818,6 +751,8 @@ static void sync_rcu_exp_handler(void *info) * you are using synchronize_rcu_expedited() in a loop, please restructure * your code to batch your updates, and then Use a single synchronize_rcu() * instead. + * + * This has the same semantics as (but is more brutal than) synchronize_rcu(). */ void synchronize_rcu_expedited(void) { @@ -836,13 +771,79 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); #else /* #ifdef CONFIG_PREEMPT_RCU */ +/* Invoked on each online non-idle CPU for expedited quiescent state. */ +static void sync_sched_exp_handler(void *data) +{ + struct rcu_data *rdp; + struct rcu_node *rnp; + struct rcu_state *rsp = data; + + rdp = this_cpu_ptr(rsp->rda); + rnp = rdp->mynode; + if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || + __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) + return; + if (rcu_is_cpu_rrupt_from_idle()) { + rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); + return; + } + __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); + /* Store .exp before .rcu_urgent_qs. */ + smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); + resched_cpu(smp_processor_id()); +} + +/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ +static void sync_sched_exp_online_cleanup(int cpu) +{ + struct rcu_data *rdp; + int ret; + struct rcu_node *rnp; + struct rcu_state *rsp = &rcu_state; + + rdp = per_cpu_ptr(rsp->rda, cpu); + rnp = rdp->mynode; + if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) + return; + ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); + WARN_ON_ONCE(ret); +} + /* - * Wait for an rcu-preempt grace period, but make it happen quickly. - * But because preemptible RCU does not exist, map to rcu-sched. + * Because a context switch is a grace period for RCU-sched, any blocking + * grace-period wait automatically implies a grace period if there + * is only one CPU online at any point time during execution of either + * synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally + * incorrectly indicate that there are multiple CPUs online when there + * was in fact only one the whole time, as this just adds some overhead: + * RCU still operates correctly. */ +static int rcu_blocking_is_gp(void) +{ + int ret; + + might_sleep(); /* Check for RCU read-side critical section. */ + preempt_disable(); + ret = num_online_cpus() <= 1; + preempt_enable(); + return ret; +} + +/* PREEMPT=n implementation of synchronize_rcu_expedited(). */ void synchronize_rcu_expedited(void) { - synchronize_sched_expedited(); + struct rcu_state *rsp = &rcu_state; + + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || + lock_is_held(&rcu_lock_map) || + lock_is_held(&rcu_sched_lock_map), + "Illegal synchronize_sched_expedited() in RCU read-side critical section"); + + /* If only one CPU, this is automatically a grace period. */ + if (rcu_blocking_is_gp()) + return; + + _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 9f0d054e6c20..2c81f8dd63b4 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -123,10 +123,6 @@ static void __init rcu_bootup_announce_oddness(void) #ifdef CONFIG_PREEMPT_RCU -RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); -static struct rcu_state *const rcu_state_p = &rcu_preempt_state; -static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data; - static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, bool wake); static void rcu_read_unlock_special(struct task_struct *t); @@ -303,15 +299,15 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) * * Callers to this function must disable preemption. */ -static void rcu_preempt_qs(void) +static void rcu_qs(void) { - RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n"); + RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) { trace_rcu_grace_period(TPS("rcu_preempt"), __this_cpu_read(rcu_data_p->gp_seq), TPS("cpuqs")); __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false); - barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */ + barrier(); /* Coordinate with rcu_flavor_check_callbacks(). */ current->rcu_read_unlock_special.b.need_qs = false; } } @@ -329,12 +325,14 @@ static void rcu_preempt_qs(void) * * Caller must disable interrupts. */ -static void rcu_preempt_note_context_switch(bool preempt) +void rcu_note_context_switch(bool preempt) { struct task_struct *t = current; struct rcu_data *rdp = this_cpu_ptr(rcu_state_p->rda); struct rcu_node *rnp; + barrier(); /* Avoid RCU read-side critical sections leaking down. */ + trace_rcu_utilization(TPS("Start context switch")); lockdep_assert_irqs_disabled(); WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); if (t->rcu_read_lock_nesting > 0 && @@ -381,10 +379,13 @@ static void rcu_preempt_note_context_switch(bool preempt) * grace period, then the fact that the task has been enqueued * means that we continue to block the current grace period. */ - rcu_preempt_qs(); + rcu_qs(); if (rdp->deferred_qs) rcu_report_exp_rdp(rcu_state_p, rdp); + trace_rcu_utilization(TPS("End context switch")); + barrier(); /* Avoid RCU read-side critical sections leaking up. */ } +EXPORT_SYMBOL_GPL(rcu_note_context_switch); /* * Check for preempted RCU readers blocking the current grace period @@ -493,7 +494,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) return; } if (special.b.need_qs) { - rcu_preempt_qs(); + rcu_qs(); t->rcu_read_unlock_special.b.need_qs = false; if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) { local_irq_restore(flags); @@ -596,7 +597,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) */ static bool rcu_preempt_need_deferred_qs(struct task_struct *t) { - return (this_cpu_ptr(&rcu_preempt_data)->deferred_qs || + return (this_cpu_ptr(&rcu_data)->deferred_qs || READ_ONCE(t->rcu_read_unlock_special.s)) && t->rcu_read_lock_nesting <= 0; } @@ -781,11 +782,14 @@ rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) * * Caller must disable hard irqs. */ -static void rcu_preempt_check_callbacks(void) +static void rcu_flavor_check_callbacks(int user) { - struct rcu_state *rsp = &rcu_preempt_state; + struct rcu_state *rsp = &rcu_state; struct task_struct *t = current; + if (user || rcu_is_cpu_rrupt_from_idle()) { + rcu_note_voluntary_context_switch(current); + } if (t->rcu_read_lock_nesting > 0 || (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { /* No QS, force context switch if deferred. */ @@ -795,7 +799,7 @@ static void rcu_preempt_check_callbacks(void) rcu_preempt_deferred_qs(t); /* Report deferred QS. */ return; } else if (!t->rcu_read_lock_nesting) { - rcu_preempt_qs(); /* Report immediate QS. */ + rcu_qs(); /* Report immediate QS. */ return; } @@ -808,44 +812,6 @@ static void rcu_preempt_check_callbacks(void) t->rcu_read_unlock_special.b.need_qs = true; } -/** - * call_rcu() - Queue an RCU callback for invocation after a grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * The callback function will be invoked some time after a full grace - * period elapses, in other words after all pre-existing RCU read-side - * critical sections have completed. However, the callback function - * might well execute concurrently with RCU read-side critical sections - * that started after call_rcu() was invoked. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), - * and may be nested. - * - * Note that all CPUs must agree that the grace period extended beyond - * all pre-existing RCU read-side critical section. On systems with more - * than one CPU, this means that when "func()" is invoked, each CPU is - * guaranteed to have executed a full memory barrier since the end of its - * last RCU read-side critical section whose beginning preceded the call - * to call_rcu(). It also means that each CPU executing an RCU read-side - * critical section that continues beyond the start of "func()" must have - * executed a memory barrier after the call_rcu() but before the beginning - * of that RCU read-side critical section. Note that these guarantees - * include CPUs that are offline, idle, or executing in user mode, as - * well as CPUs that are executing in the kernel. - * - * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the - * resulting RCU callback function "func()", then both CPU A and CPU B are - * guaranteed to execute a full memory barrier during the time interval - * between the call to call_rcu() and the invocation of "func()" -- even - * if CPU A and CPU B are the same CPU (but again only if the system has - * more than one CPU). - */ -void call_rcu(struct rcu_head *head, rcu_callback_t func) -{ - __call_rcu(head, func, rcu_state_p, -1, 0); -} -EXPORT_SYMBOL_GPL(call_rcu); - /** * synchronize_rcu - wait until a grace period has elapsed. * @@ -856,14 +822,28 @@ EXPORT_SYMBOL_GPL(call_rcu); * concurrently with new RCU read-side critical sections that began while * synchronize_rcu() was waiting. RCU read-side critical sections are * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. + * In addition, regions of code across which interrupts, preemption, or + * softirqs have been disabled also serve as RCU read-side critical + * sections. This includes hardware interrupt handlers, softirq handlers, + * and NMI handlers. + * + * Note that this guarantee implies further memory-ordering guarantees. + * On systems with more than one CPU, when synchronize_rcu() returns, + * each CPU is guaranteed to have executed a full memory barrier since the + * end of its last RCU-sched read-side critical section whose beginning + * preceded the call to synchronize_rcu(). In addition, each CPU having + * an RCU read-side critical section that extends beyond the return from + * synchronize_rcu() is guaranteed to have executed a full memory barrier + * after the beginning of synchronize_rcu() and before the beginning of + * that RCU read-side critical section. Note that these guarantees include + * CPUs that are offline, idle, or executing in user mode, as well as CPUs + * that are executing in the kernel. * - * See the description of synchronize_sched() for more detailed - * information on memory-ordering guarantees. However, please note - * that -only- the memory-ordering guarantees apply. For example, - * synchronize_rcu() is -not- guaranteed to wait on things like code - * protected by preempt_disable(), instead, synchronize_rcu() is -only- - * guaranteed to wait on RCU read-side critical sections, that is, sections - * of code protected by rcu_read_lock(). + * Furthermore, if CPU A invoked synchronize_rcu(), which returned + * to its caller on CPU B, then both CPU A and CPU B are guaranteed + * to have executed a full memory barrier during the execution of + * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but + * again only if the system has more than one CPU). */ void synchronize_rcu(void) { @@ -880,28 +860,6 @@ void synchronize_rcu(void) } EXPORT_SYMBOL_GPL(synchronize_rcu); -/** - * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. - * - * Note that this primitive does not necessarily wait for an RCU grace period - * to complete. For example, if there are no RCU callbacks queued anywhere - * in the system, then rcu_barrier() is within its rights to return - * immediately, without waiting for anything, much less an RCU grace period. - */ -void rcu_barrier(void) -{ - _rcu_barrier(rcu_state_p); -} -EXPORT_SYMBOL_GPL(rcu_barrier); - -/* - * Initialize preemptible RCU's state structures. - */ -static void __init __rcu_init_preempt(void) -{ - rcu_init_one(rcu_state_p); -} - /* * Check for a task exiting while in a preemptible-RCU read-side * critical section, clean up if so. No need to issue warnings, @@ -964,8 +922,6 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) #else /* #ifdef CONFIG_PREEMPT_RCU */ -static struct rcu_state *const rcu_state_p = &rcu_sched_state; - /* * Tell them what RCU they are running. */ @@ -975,18 +931,48 @@ static void __init rcu_bootup_announce(void) rcu_bootup_announce_oddness(); } -/* Because preemptible RCU does not exist, we can ignore its QSes. */ -static void rcu_preempt_qs(void) +/* + * Note a quiescent state for PREEMPT=n. Because we do not need to know + * how many quiescent states passed, just if there was at least one since + * the start of the grace period, this just sets a flag. The caller must + * have disabled preemption. + */ +static void rcu_qs(void) { + RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); + if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) + return; + trace_rcu_grace_period(TPS("rcu_sched"), + __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); + __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); + if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) + return; + __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); + rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); } /* - * Because preemptible RCU does not exist, we never have to check for - * CPUs being in quiescent states. + * Note a PREEMPT=n context switch. The caller must have disabled interrupts. */ -static void rcu_preempt_note_context_switch(bool preempt) +void rcu_note_context_switch(bool preempt) { + barrier(); /* Avoid RCU read-side critical sections leaking down. */ + trace_rcu_utilization(TPS("Start context switch")); + rcu_qs(); + /* Load rcu_urgent_qs before other flags. */ + if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) + goto out; + this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); + if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) + rcu_momentary_dyntick_idle(); + this_cpu_inc(rcu_dynticks.rcu_qs_ctr); + if (!preempt) + rcu_tasks_qs(current); +out: + trace_rcu_utilization(TPS("End context switch")); + barrier(); /* Avoid RCU read-side critical sections leaking up. */ } +EXPORT_SYMBOL_GPL(rcu_note_context_switch); /* * Because preemptible RCU does not exist, there are never any preempted @@ -1054,29 +1040,48 @@ rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) } /* - * Because preemptible RCU does not exist, it never has any callbacks - * to check. + * Check to see if this CPU is in a non-context-switch quiescent state + * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). + * Also schedule RCU core processing. + * + * This function must be called from hardirq context. It is normally + * invoked from the scheduling-clock interrupt. */ -static void rcu_preempt_check_callbacks(void) +static void rcu_flavor_check_callbacks(int user) { -} + if (user || rcu_is_cpu_rrupt_from_idle()) { -/* - * Because preemptible RCU does not exist, rcu_barrier() is just - * another name for rcu_barrier_sched(). - */ -void rcu_barrier(void) -{ - rcu_barrier_sched(); + /* + * Get here if this CPU took its interrupt from user + * mode or from the idle loop, and if this is not a + * nested interrupt. In this case, the CPU is in + * a quiescent state, so note it. + * + * No memory barrier is required here because rcu_qs() + * references only CPU-local variables that other CPUs + * neither access nor modify, at least not while the + * corresponding CPU is online. + */ + + rcu_qs(); + } } -EXPORT_SYMBOL_GPL(rcu_barrier); -/* - * Because preemptible RCU does not exist, it need not be initialized. - */ -static void __init __rcu_init_preempt(void) +/* PREEMPT=n implementation of synchronize_rcu(). */ +void synchronize_rcu(void) { + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || + lock_is_held(&rcu_lock_map) || + lock_is_held(&rcu_sched_lock_map), + "Illegal synchronize_rcu() in RCU-sched read-side critical section"); + if (rcu_blocking_is_gp()) + return; + if (rcu_gp_is_expedited()) + synchronize_rcu_expedited(); + else + wait_rcu_gp(call_rcu); } +EXPORT_SYMBOL_GPL(synchronize_rcu); /* * Because preemptible RCU does not exist, tasks cannot possibly exit @@ -1319,8 +1324,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, static void rcu_kthread_do_work(void) { - rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); - rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); + rcu_do_batch(&rcu_state, this_cpu_ptr(&rcu_data)); } static void rcu_cpu_kthread_setup(unsigned int cpu) @@ -1727,87 +1731,6 @@ static void rcu_idle_count_callbacks_posted(void) __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); } -/* - * Data for flushing lazy RCU callbacks at OOM time. - */ -static atomic_t oom_callback_count; -static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); - -/* - * RCU OOM callback -- decrement the outstanding count and deliver the - * wake-up if we are the last one. - */ -static void rcu_oom_callback(struct rcu_head *rhp) -{ - if (atomic_dec_and_test(&oom_callback_count)) - wake_up(&oom_callback_wq); -} - -/* - * Post an rcu_oom_notify callback on the current CPU if it has at - * least one lazy callback. This will unnecessarily post callbacks - * to CPUs that already have a non-lazy callback at the end of their - * callback list, but this is an infrequent operation, so accept some - * extra overhead to keep things simple. - */ -static void rcu_oom_notify_cpu(void *unused) -{ - struct rcu_state *rsp; - struct rcu_data *rdp; - - for_each_rcu_flavor(rsp) { - rdp = raw_cpu_ptr(rsp->rda); - if (rcu_segcblist_n_lazy_cbs(&rdp->cblist)) { - atomic_inc(&oom_callback_count); - rsp->call(&rdp->oom_head, rcu_oom_callback); - } - } -} - -/* - * If low on memory, ensure that each CPU has a non-lazy callback. - * This will wake up CPUs that have only lazy callbacks, in turn - * ensuring that they free up the corresponding memory in a timely manner. - * Because an uncertain amount of memory will be freed in some uncertain - * timeframe, we do not claim to have freed anything. - */ -static int rcu_oom_notify(struct notifier_block *self, - unsigned long notused, void *nfreed) -{ - int cpu; - - /* Wait for callbacks from earlier instance to complete. */ - wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); - smp_mb(); /* Ensure callback reuse happens after callback invocation. */ - - /* - * Prevent premature wakeup: ensure that all increments happen - * before there is a chance of the counter reaching zero. - */ - atomic_set(&oom_callback_count, 1); - - for_each_online_cpu(cpu) { - smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); - cond_resched_tasks_rcu_qs(); - } - - /* Unconditionally decrement: no need to wake ourselves up. */ - atomic_dec(&oom_callback_count); - - return NOTIFY_OK; -} - -static struct notifier_block rcu_oom_nb = { - .notifier_call = rcu_oom_notify -}; - -static int __init rcu_register_oom_notifier(void) -{ - register_oom_notifier(&rcu_oom_nb); - return 0; -} -early_initcall(rcu_register_oom_notifier); - #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ #ifdef CONFIG_RCU_FAST_NO_HZ -- cgit v1.2.3 From 709fdce7545c978e69f52eb19082ea3af44332f5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 10:44:44 -0700 Subject: rcu: Express Tiny RCU updates in terms of RCU rather than RCU-sched This commit renames Tiny RCU functions so that the lowest level of functionality is RCU (e.g., synchronize_rcu()) rather than RCU-sched (e.g., synchronize_sched()). This provides greater naming compatibility with Tree RCU, which will in turn permit more LoC removal once the RCU-sched and RCU-bh update-side API is removed. Signed-off-by: Paul E. McKenney [ paulmck: Fix Tiny call_rcu()'s EXPORT_SYMBOL() in response to a bug report from kbuild test robot. ] --- include/linux/rcupdate.h | 12 +++++------- include/linux/rcutiny.h | 34 +++++++++++++++++++--------------- include/linux/rcutree.h | 1 - kernel/rcu/tiny.c | 48 ++++++++++++++++++++++++------------------------ 4 files changed, 48 insertions(+), 47 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 94474bb6b5c4..1207c6c9bd8b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -49,15 +49,14 @@ /* Exported common interfaces */ -#ifdef CONFIG_TINY_RCU -#define call_rcu call_rcu_sched -#else -void call_rcu(struct rcu_head *head, rcu_callback_t func); +#ifndef CONFIG_TINY_RCU +void synchronize_sched(void); +void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); #endif -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); -void synchronize_sched(void); +void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); +void synchronize_rcu(void); static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) { @@ -68,7 +67,6 @@ static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) void __rcu_read_lock(void); void __rcu_read_unlock(void); -void synchronize_rcu(void); /* * Defined as a macro as it is a very low level header included from diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index df2c0895c5e7..e66fb8bc2127 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -36,9 +36,9 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } -static inline void synchronize_rcu(void) +static inline void synchronize_sched(void) { - synchronize_sched(); + synchronize_rcu(); } static inline unsigned long get_state_synchronize_rcu(void) @@ -61,16 +61,11 @@ static inline void cond_synchronize_sched(unsigned long oldstate) might_sleep(); } -static inline void synchronize_rcu_expedited(void) -{ - synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ -} +extern void rcu_barrier(void); -extern void rcu_barrier_sched(void); - -static inline void rcu_barrier(void) +static inline void rcu_barrier_sched(void) { - rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ + rcu_barrier(); /* Only one CPU, so only one list of callbacks! */ } static inline void rcu_barrier_bh(void) @@ -88,27 +83,36 @@ static inline void synchronize_rcu_bh_expedited(void) synchronize_sched(); } +static inline void synchronize_rcu_expedited(void) +{ + synchronize_sched(); +} + static inline void synchronize_sched_expedited(void) { synchronize_sched(); } -static inline void kfree_call_rcu(struct rcu_head *head, - rcu_callback_t func) +static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) +{ + call_rcu(head, func); +} + +static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } -void rcu_sched_qs(void); +void rcu_qs(void); static inline void rcu_softirq_qs(void) { - rcu_sched_qs(); + rcu_qs(); } #define rcu_note_context_switch(preempt) \ do { \ - rcu_sched_qs(); \ + rcu_qs(); \ rcu_tasks_qs(current); \ } while (0) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 0c44720f0e84..6d30a0809300 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -45,7 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(false); } -void synchronize_rcu(void); static inline void synchronize_rcu_bh(void) { synchronize_rcu(); diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index cadcf63c4889..30826fb6e438 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -46,25 +46,25 @@ struct rcu_ctrlblk { }; /* Definition for rcupdate control block. */ -static struct rcu_ctrlblk rcu_sched_ctrlblk = { - .donetail = &rcu_sched_ctrlblk.rcucblist, - .curtail = &rcu_sched_ctrlblk.rcucblist, +static struct rcu_ctrlblk rcu_ctrlblk = { + .donetail = &rcu_ctrlblk.rcucblist, + .curtail = &rcu_ctrlblk.rcucblist, }; -void rcu_barrier_sched(void) +void rcu_barrier(void) { - wait_rcu_gp(call_rcu_sched); + wait_rcu_gp(call_rcu); } -EXPORT_SYMBOL(rcu_barrier_sched); +EXPORT_SYMBOL(rcu_barrier); /* Record an rcu quiescent state. */ -void rcu_sched_qs(void) +void rcu_qs(void) { unsigned long flags; local_irq_save(flags); - if (rcu_sched_ctrlblk.donetail != rcu_sched_ctrlblk.curtail) { - rcu_sched_ctrlblk.donetail = rcu_sched_ctrlblk.curtail; + if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { + rcu_ctrlblk.donetail = rcu_ctrlblk.curtail; raise_softirq(RCU_SOFTIRQ); } local_irq_restore(flags); @@ -79,7 +79,7 @@ void rcu_sched_qs(void) void rcu_check_callbacks(int user) { if (user) - rcu_sched_qs(); + rcu_qs(); } /* Invoke the RCU callbacks whose grace period has elapsed. */ @@ -90,17 +90,17 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused /* Move the ready-to-invoke callbacks to a local list. */ local_irq_save(flags); - if (rcu_sched_ctrlblk.donetail == &rcu_sched_ctrlblk.rcucblist) { + if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) { /* No callbacks ready, so just leave. */ local_irq_restore(flags); return; } - list = rcu_sched_ctrlblk.rcucblist; - rcu_sched_ctrlblk.rcucblist = *rcu_sched_ctrlblk.donetail; - *rcu_sched_ctrlblk.donetail = NULL; - if (rcu_sched_ctrlblk.curtail == rcu_sched_ctrlblk.donetail) - rcu_sched_ctrlblk.curtail = &rcu_sched_ctrlblk.rcucblist; - rcu_sched_ctrlblk.donetail = &rcu_sched_ctrlblk.rcucblist; + list = rcu_ctrlblk.rcucblist; + rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail; + *rcu_ctrlblk.donetail = NULL; + if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail) + rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist; + rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist; local_irq_restore(flags); /* Invoke the callbacks on the local list. */ @@ -125,21 +125,21 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused * * Cool, huh? (Due to Josh Triplett.) */ -void synchronize_sched(void) +void synchronize_rcu(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_sched() in RCU read-side critical section"); } -EXPORT_SYMBOL_GPL(synchronize_sched); +EXPORT_SYMBOL_GPL(synchronize_rcu); /* * Post an RCU callback to be invoked after the end of an RCU-sched grace * period. But since we have but one CPU, that would be after any * quiescent state. */ -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) +void call_rcu(struct rcu_head *head, rcu_callback_t func) { unsigned long flags; @@ -148,16 +148,16 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) head->next = NULL; local_irq_save(flags); - *rcu_sched_ctrlblk.curtail = head; - rcu_sched_ctrlblk.curtail = &head->next; + *rcu_ctrlblk.curtail = head; + rcu_ctrlblk.curtail = &head->next; local_irq_restore(flags); if (unlikely(is_idle_task(current))) { - /* force scheduling for rcu_sched_qs() */ + /* force scheduling for rcu_qs() */ resched_cpu(0); } } -EXPORT_SYMBOL_GPL(call_rcu_sched); +EXPORT_SYMBOL_GPL(call_rcu); void __init rcu_init(void) { -- cgit v1.2.3 From 358be2d3685cb0cca49c914e89824467ee0b589c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 14:15:31 -0700 Subject: rcu: Remove RCU_STATE_INITIALIZER() Now that a given build of the Linux kernel has only one set of rcu_state, rcu_node, and rcu_data structures, there is no point in creating a macro to declare and compile-time initialize them. This commit therefore just does normal declaration and compile-time initialization of these structures. While in the area, this commit also removes #ifndefs of the no-longer-ever-defined preprocessor macro RCU_TREE_NONCORE. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 54 ++++++++++++++---------------------------------------- kernel/rcu/tree.h | 29 +++++++++++++++++++++++------ 2 files changed, 37 insertions(+), 46 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5f79315f094e..1d36cbcce1b4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -72,46 +72,20 @@ /* Data structures. */ -/* - * In order to export the rcu_state name to the tracing tools, it - * needs to be added in the __tracepoint_string section. - * This requires defining a separate variable tp__varname - * that points to the string being used, and this will allow - * the tracing userspace tools to be able to decipher the string - * address to the matching string. - */ -#ifdef CONFIG_TRACING -# define DEFINE_RCU_TPS(sname) \ -static char sname##_varname[] = #sname; \ -static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; -# define RCU_STATE_NAME(sname) sname##_varname -#else -# define DEFINE_RCU_TPS(sname) -# define RCU_STATE_NAME(sname) __stringify(sname) -#endif - -#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ -DEFINE_RCU_TPS(sname) \ -static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data); \ -struct rcu_state rcu_state = { \ - .level = { &rcu_state.node[0] }, \ - .rda = &rcu_data, \ - .call = cr, \ - .gp_state = RCU_GP_IDLE, \ - .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \ - .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), \ - .name = RCU_STATE_NAME(sname), \ - .abbr = sabbr, \ - .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), \ - .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), \ - .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), \ -} - -#ifdef CONFIG_PREEMPT_RCU -RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); -#else -RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu); -#endif +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data); +struct rcu_state rcu_state = { + .level = { &rcu_state.node[0] }, + .rda = &rcu_data, + .call = call_rcu, + .gp_state = RCU_GP_IDLE, + .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, + .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), + .name = RCU_NAME, + .abbr = RCU_ABBR, + .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), + .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), + .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), +}; static struct rcu_state *const rcu_state_p = &rcu_state; static struct rcu_data __percpu *const rcu_data_p = &rcu_data; diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 38658ca87dcb..3f36562d3118 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -386,7 +386,6 @@ struct rcu_state { #define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */ #define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */ -#ifndef RCU_TREE_NONCORE static const char * const gp_state_names[] = { "RCU_GP_IDLE", "RCU_GP_WAIT_GPS", @@ -398,7 +397,29 @@ static const char * const gp_state_names[] = { "RCU_GP_CLEANUP", "RCU_GP_CLEANED", }; -#endif /* #ifndef RCU_TREE_NONCORE */ + +/* + * In order to export the rcu_state name to the tracing tools, it + * needs to be added in the __tracepoint_string section. + * This requires defining a separate variable tp__varname + * that points to the string being used, and this will allow + * the tracing userspace tools to be able to decipher the string + * address to the matching string. + */ +#ifdef CONFIG_PREEMPT_RCU +#define RCU_ABBR 'p' +#define RCU_NAME_RAW "rcu_preempt" +#else /* #ifdef CONFIG_PREEMPT_RCU */ +#define RCU_ABBR 's' +#define RCU_NAME_RAW "rcu_sched" +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +#ifndef CONFIG_TRACING +#define RCU_NAME RCU_NAME_RAW +#else /* #ifdef CONFIG_TRACING */ +static char rcu_name[] = RCU_NAME_RAW; +static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; +#define RCU_NAME rcu_name +#endif /* #else #ifdef CONFIG_TRACING */ extern struct list_head rcu_struct_flavors; @@ -426,8 +447,6 @@ DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DECLARE_PER_CPU(char, rcu_cpu_has_work); #endif /* #ifdef CONFIG_RCU_BOOST */ -#ifndef RCU_TREE_NONCORE - /* Forward declarations for rcutree_plugin.h */ static void rcu_bootup_announce(void); static void rcu_qs(void); @@ -495,5 +514,3 @@ void srcu_offline_cpu(unsigned int cpu); void srcu_online_cpu(unsigned int cpu) { } void srcu_offline_cpu(unsigned int cpu) { } #endif /* #else #ifdef CONFIG_SRCU */ - -#endif /* #ifndef RCU_TREE_NONCORE */ -- cgit v1.2.3 From ec5dd444b678b1305d9af34ebb4cca17e0ef88e6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 15:02:28 -0700 Subject: rcu: Eliminate rcu_state structure's ->call field The rcu_state structure's ->call field references the corresponding RCU flavor's call_rcu() function. However, now that there is only ever one rcu_state structure in a given build of the Linux kernel, and that flavor uses plain old call_rcu(), there is not a lot of point in continuing to have the ->call field. This commit therefore removes it. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 1 - kernel/rcu/tree.h | 1 - kernel/rcu/tree_exp.h | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1d36cbcce1b4..ea0dfd13fd27 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -76,7 +76,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data); struct rcu_state rcu_state = { .level = { &rcu_state.node[0] }, .rda = &rcu_data, - .call = call_rcu, .gp_state = RCU_GP_IDLE, .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 3f36562d3118..c50060567146 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -313,7 +313,6 @@ struct rcu_state { /* Hierarchy levels (+1 to */ /* shut bogus gcc warning) */ struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ - call_rcu_func_t call; /* call_rcu() flavor. */ int ncpus; /* # CPUs seen so far. */ /* The following fields are guarded by the root rcu_node's lock. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 5619edfd414e..224f05f0c0c9 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -619,7 +619,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, /* If expedited grace periods are prohibited, fall back to normal. */ if (rcu_gp_is_normal()) { - wait_rcu_gp(rsp->call); + wait_rcu_gp(call_rcu); return; } -- cgit v1.2.3 From da1df50d16171f4c65da18093d5b5652423f5b99 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 15:37:16 -0700 Subject: rcu: Remove rcu_state structure's ->rda field The rcu_state structure's ->rda field was used to find the per-CPU rcu_data structures corresponding to that rcu_state structure. But now there is only one rcu_state structure (creatively named "rcu_state") and one set of per-CPU rcu_data structures (creatively named "rcu_data"). Therefore, uses of the ->rda field can always be replaced by "rcu_data, and this commit makes that change and removes the ->rda field. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 67 ++++++++++++++++++++++++------------------------ kernel/rcu/tree.h | 1 - kernel/rcu/tree_exp.h | 19 +++++++------- kernel/rcu/tree_plugin.h | 24 ++++++++--------- 4 files changed, 54 insertions(+), 57 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ea0dfd13fd27..e6b0bb0d00b7 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -75,7 +75,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data); struct rcu_state rcu_state = { .level = { &rcu_state.node[0] }, - .rda = &rcu_data, .gp_state = RCU_GP_IDLE, .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), @@ -586,7 +585,7 @@ void show_rcu_gp_kthreads(void) if (!rcu_is_leaf_node(rnp)) continue; for_each_leaf_node_possible_cpu(rnp, cpu) { - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); if (rdp->gpwrap || ULONG_CMP_GE(rsp->gp_seq, rdp->gp_seq_needed)) @@ -660,7 +659,7 @@ static void rcu_eqs_enter(bool user) trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); do_nocb_deferred_wakeup(rdp); } rcu_prepare_for_idle(); @@ -1034,7 +1033,7 @@ bool rcu_lockdep_current_cpu_online(void) return true; preempt_disable(); for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); rnp = rdp->mynode; if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) { preempt_enable(); @@ -1352,7 +1351,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) print_cpu_stall_info_end(); for_each_possible_cpu(cpu) - totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, + totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, cpu)->cblist); pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start), @@ -1392,7 +1391,7 @@ static void print_cpu_stall(struct rcu_state *rsp) { int cpu; unsigned long flags; - struct rcu_data *rdp = this_cpu_ptr(rsp->rda); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rcu_get_root(rsp); long totqlen = 0; @@ -1413,7 +1412,7 @@ static void print_cpu_stall(struct rcu_state *rsp) raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); print_cpu_stall_info_end(); for_each_possible_cpu(cpu) - totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, + totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, cpu)->cblist); pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", jiffies - rsp->gp_start, @@ -1624,7 +1623,7 @@ unlock_out: static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { bool needmore; - struct rcu_data *rdp = this_cpu_ptr(rsp->rda); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); if (!needmore) @@ -1936,7 +1935,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) rcu_for_each_node_breadth_first(rsp, rnp) { rcu_gp_slow(rsp, gp_init_delay); raw_spin_lock_irqsave_rcu_node(rnp, flags); - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); rcu_preempt_check_blocked_tasks(rsp, rnp); rnp->qsmask = rnp->qsmaskinit; WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); @@ -2050,7 +2049,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) dump_blkd_tasks(rsp, rnp, 10); WARN_ON_ONCE(rnp->qsmask); WRITE_ONCE(rnp->gp_seq, new_gp_seq); - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); if (rnp == rdp->mynode) needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; /* smp_mb() provided by prior unlock-lock pair. */ @@ -2070,7 +2069,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end")); rsp->gp_state = RCU_GP_IDLE; /* Check for GP requests since above loop. */ - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, TPS("CleanupMore")); @@ -2405,7 +2404,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) { RCU_TRACE(bool blkd;) - RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);) + RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);) RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) @@ -2469,7 +2468,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) */ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) { - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) @@ -2622,7 +2621,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) for_each_leaf_node_possible_cpu(rnp, cpu) { unsigned long bit = leaf_node_cpu_bit(rnp, cpu); if ((rnp->qsmask & bit) != 0) { - if (f(per_cpu_ptr(rsp->rda, cpu))) + if (f(per_cpu_ptr(&rcu_data, cpu))) mask |= bit; } } @@ -2648,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp) struct rcu_node *rnp_old = NULL; /* Funnel through hierarchy to reduce memory contention. */ - rnp = __this_cpu_read(rsp->rda->mynode); + rnp = __this_cpu_read(rcu_data.mynode); for (; rnp != NULL; rnp = rnp->parent) { ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || !raw_spin_trylock(&rnp->fqslock); @@ -2740,7 +2739,7 @@ static void __rcu_process_callbacks(struct rcu_state *rsp) { unsigned long flags; - struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); + struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; WARN_ON_ONCE(!rdp->beenonline); @@ -2894,14 +2893,14 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, head->func = func; head->next = NULL; local_irq_save(flags); - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); /* Add the callback to our list. */ if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) { int offline; if (cpu != -1) - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); if (likely(rdp->mynode)) { /* Post-boot, so this should be for a no-CBs CPU. */ offline = !__call_rcu_nocb(rdp, head, lazy, flags); @@ -3135,7 +3134,7 @@ static int rcu_pending(void) struct rcu_state *rsp; for_each_rcu_flavor(rsp) - if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda))) + if (__rcu_pending(rsp, this_cpu_ptr(&rcu_data))) return 1; return 0; } @@ -3153,7 +3152,7 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy) struct rcu_state *rsp; for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); if (rcu_segcblist_empty(&rdp->cblist)) continue; hc = true; @@ -3202,7 +3201,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp) static void rcu_barrier_func(void *type) { struct rcu_state *rsp = type; - struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); + struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence); rdp->barrier_head.func = rcu_barrier_callback; @@ -3262,7 +3261,7 @@ static void _rcu_barrier(struct rcu_state *rsp) for_each_possible_cpu(cpu) { if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu)) continue; - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); if (rcu_is_nocb_cpu(cpu)) { if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu, @@ -3372,7 +3371,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); /* Set up local state, ensuring consistent view of global state. */ rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); @@ -3398,7 +3397,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_state *rsp) { unsigned long flags; - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ @@ -3454,7 +3453,7 @@ int rcutree_prepare_cpu(unsigned int cpu) */ static void rcutree_affinity_setting(unsigned int cpu, int outgoing) { - struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); } @@ -3471,7 +3470,7 @@ int rcutree_online_cpu(unsigned int cpu) struct rcu_state *rsp; for_each_rcu_flavor(rsp) { - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); rnp = rdp->mynode; raw_spin_lock_irqsave_rcu_node(rnp, flags); rnp->ffmask |= rdp->grpmask; @@ -3498,7 +3497,7 @@ int rcutree_offline_cpu(unsigned int cpu) struct rcu_state *rsp; for_each_rcu_flavor(rsp) { - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); rnp = rdp->mynode; raw_spin_lock_irqsave_rcu_node(rnp, flags); rnp->ffmask &= ~rdp->grpmask; @@ -3532,7 +3531,7 @@ int rcutree_dead_cpu(unsigned int cpu) for_each_rcu_flavor(rsp) { rcu_cleanup_dead_cpu(cpu, rsp); - do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); + do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); } return 0; } @@ -3566,7 +3565,7 @@ void rcu_cpu_starting(unsigned int cpu) per_cpu(rcu_cpu_started, cpu) = 1; for_each_rcu_flavor(rsp) { - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); rnp = rdp->mynode; mask = rdp->grpmask; raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -3600,7 +3599,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) { unsigned long flags; unsigned long mask; - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ @@ -3633,7 +3632,7 @@ void rcu_report_dead(unsigned int cpu) /* QS for any half-done expedited RCU-sched GP. */ preempt_disable(); - rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(rcu_state.rda)); + rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); preempt_enable(); rcu_preempt_deferred_qs(current); for_each_rcu_flavor(rsp) @@ -3647,7 +3646,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) { unsigned long flags; struct rcu_data *my_rdp; - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); bool needwake; @@ -3655,7 +3654,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) return; /* No callbacks to migrate. */ local_irq_save(flags); - my_rdp = this_cpu_ptr(rsp->rda); + my_rdp = this_cpu_ptr(&rcu_data); if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) { local_irq_restore(flags); return; @@ -3857,7 +3856,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) for_each_possible_cpu(i) { while (i > rnp->grphi) rnp++; - per_cpu_ptr(rsp->rda, i)->mynode = rnp; + per_cpu_ptr(&rcu_data, i)->mynode = rnp; rcu_boot_init_percpu_data(i, rsp); } list_add(&rsp->flavors, &rcu_struct_flavors); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index c50060567146..d60304f1ef56 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -312,7 +312,6 @@ struct rcu_state { struct rcu_node *level[RCU_NUM_LVLS + 1]; /* Hierarchy levels (+1 to */ /* shut bogus gcc warning) */ - struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ int ncpus; /* # CPUs seen so far. */ /* The following fields are guarded by the root rcu_node's lock. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 224f05f0c0c9..3a8a582d9958 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -286,7 +286,7 @@ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s) */ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) { - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp_root = rcu_get_root(rsp); @@ -361,7 +361,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) mask_ofl_test = 0; for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { unsigned long mask = leaf_node_cpu_bit(rnp, cpu); - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu); int snap; @@ -390,7 +390,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) /* IPI the remaining CPUs for expedited quiescent state. */ for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { unsigned long mask = leaf_node_cpu_bit(rnp, cpu); - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); if (!(mask_ofl_ipi & mask)) continue; @@ -509,7 +509,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) if (!(rnp->expmask & mask)) continue; ndetected++; - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); pr_cont(" %d-%c%c%c", cpu, "O."[!!cpu_online(cpu)], "o."[!!(rdp->grpmask & rnp->expmaskinit)], @@ -642,7 +642,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, } /* Wait for expedited grace period to complete. */ - rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); + rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); rnp = rcu_get_root(rsp); wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], sync_exp_work_done(rsp, s)); @@ -665,7 +665,7 @@ static void sync_rcu_exp_handler(void *info) { unsigned long flags; struct rcu_state *rsp = info; - struct rcu_data *rdp = this_cpu_ptr(rsp->rda); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; struct task_struct *t = current; @@ -772,13 +772,12 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); #else /* #ifdef CONFIG_PREEMPT_RCU */ /* Invoked on each online non-idle CPU for expedited quiescent state. */ -static void sync_sched_exp_handler(void *data) +static void sync_sched_exp_handler(void *unused) { struct rcu_data *rdp; struct rcu_node *rnp; - struct rcu_state *rsp = data; - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); rnp = rdp->mynode; if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) @@ -801,7 +800,7 @@ static void sync_sched_exp_online_cleanup(int cpu) struct rcu_node *rnp; struct rcu_state *rsp = &rcu_state; - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); rnp = rdp->mynode; if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) return; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 2c81f8dd63b4..b7a99a6e64b6 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -328,7 +328,7 @@ static void rcu_qs(void) void rcu_note_context_switch(bool preempt) { struct task_struct *t = current; - struct rcu_data *rdp = this_cpu_ptr(rcu_state_p->rda); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp; barrier(); /* Avoid RCU read-side critical sections leaking down. */ @@ -488,7 +488,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * t->rcu_read_unlock_special cannot change. */ special = t->rcu_read_unlock_special; - rdp = this_cpu_ptr(rcu_state_p->rda); + rdp = this_cpu_ptr(&rcu_data); if (!special.s && !rdp->deferred_qs) { local_irq_restore(flags); return; @@ -911,7 +911,7 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) } pr_cont("\n"); for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", cpu, ".o"[onl], @@ -1437,7 +1437,7 @@ static void __init rcu_spawn_boost_kthreads(void) static void rcu_prepare_kthreads(int cpu) { - struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ @@ -1574,7 +1574,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) rdtp->last_advance_all = jiffies; for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); rnp = rdp->mynode; /* @@ -1692,7 +1692,7 @@ static void rcu_prepare_for_idle(void) return; rdtp->last_accelerate = jiffies; for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(rsp->rda); + rdp = this_cpu_ptr(&rcu_data); if (!rcu_segcblist_pend_cbs(&rdp->cblist)) continue; rnp = rdp->mynode; @@ -1778,7 +1778,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) { unsigned long delta; char fast_no_hz[72]; - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_dynticks *rdtp = rdp->dynticks; char *ticks_title; unsigned long ticks_value; @@ -1833,7 +1833,7 @@ static void increment_cpu_stall_ticks(void) struct rcu_state *rsp; for_each_rcu_flavor(rsp) - raw_cpu_inc(rsp->rda->ticks_this_gp); + raw_cpu_inc(rcu_data.ticks_this_gp); } #ifdef CONFIG_RCU_NOCB_CPU @@ -1965,7 +1965,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, */ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) { - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); unsigned long ret; #ifdef CONFIG_PROVE_RCU struct rcu_head *rhp; @@ -2426,7 +2426,7 @@ void __init rcu_init_nohz(void) for_each_rcu_flavor(rsp) { for_each_cpu(cpu, rcu_nocb_mask) - init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu)); + init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu)); rcu_organize_nocb_kthreads(rsp); } } @@ -2452,7 +2452,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) struct rcu_data *rdp; struct rcu_data *rdp_last; struct rcu_data *rdp_old_leader; - struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu); + struct rcu_data *rdp_spawn = per_cpu_ptr(&rcu_data, cpu); struct task_struct *t; /* @@ -2545,7 +2545,7 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp) * we will spawn the needed set of rcu_nocb_kthread() kthreads. */ for_each_cpu(cpu, rcu_nocb_mask) { - rdp = per_cpu_ptr(rsp->rda, cpu); + rdp = per_cpu_ptr(&rcu_data, cpu); if (rdp->cpu >= nl) { /* New leader, set up for followers & next leader. */ nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; -- cgit v1.2.3 From 16fc9c600b3caf97f42cdd1e35309b7529a55cfb Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 15:54:39 -0700 Subject: rcu: Remove rcu_state_p pointer to default rcu_state structure The rcu_state_p pointer references the default rcu_state structure, that is, the one that call_rcu() uses, as opposed to call_rcu_bh() and sometimes call_rcu_sched(). But there is now only one rcu_state structure, so that one structure is by definition the default, which means that the rcu_state_p pointer no longer serves any useful purpose. This commit therefore removes it. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 27 ++++++++++++--------------- kernel/rcu/tree_exp.h | 2 +- kernel/rcu/tree_plugin.h | 16 ++++++++-------- 3 files changed, 21 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e6b0bb0d00b7..e3cdec55ef3c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -85,7 +85,6 @@ struct rcu_state rcu_state = { .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), }; -static struct rcu_state *const rcu_state_p = &rcu_state; static struct rcu_data __percpu *const rcu_data_p = &rcu_data; LIST_HEAD(rcu_struct_flavors); @@ -491,7 +490,7 @@ static int rcu_pending(void); */ unsigned long rcu_get_gp_seq(void) { - return READ_ONCE(rcu_state_p->gp_seq); + return READ_ONCE(rcu_state.gp_seq); } EXPORT_SYMBOL_GPL(rcu_get_gp_seq); @@ -510,7 +509,7 @@ EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); */ unsigned long rcu_bh_get_gp_seq(void) { - return READ_ONCE(rcu_state_p->gp_seq); + return READ_ONCE(rcu_state.gp_seq); } EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); @@ -522,7 +521,7 @@ EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); */ unsigned long rcu_exp_batches_completed(void) { - return rcu_state_p->expedited_sequence; + return rcu_state.expedited_sequence; } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); @@ -541,7 +540,7 @@ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); */ void rcu_force_quiescent_state(void) { - force_quiescent_state(rcu_state_p); + force_quiescent_state(&rcu_state); } EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); @@ -550,7 +549,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); */ void rcu_bh_force_quiescent_state(void) { - force_quiescent_state(rcu_state_p); + force_quiescent_state(&rcu_state); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); @@ -611,7 +610,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, case RCU_FLAVOR: case RCU_BH_FLAVOR: case RCU_SCHED_FLAVOR: - rsp = rcu_state_p; + rsp = &rcu_state; break; default: break; @@ -2292,7 +2291,6 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp, raw_lockdep_assert_held_rcu_node(rnp); if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) || - WARN_ON_ONCE(rsp != rcu_state_p) || WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || rnp->qsmask != 0) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -2604,7 +2602,6 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->qsmask == 0) { if (!IS_ENABLED(CONFIG_PREEMPT) || - rsp != rcu_state_p || rcu_preempt_blocked_readers_cgp(rnp)) { /* * No point in scanning bits because they @@ -2973,7 +2970,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, */ void call_rcu(struct rcu_head *head, rcu_callback_t func) { - __call_rcu(head, func, rcu_state_p, -1, 0); + __call_rcu(head, func, &rcu_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu); @@ -3000,7 +2997,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched); void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { - __call_rcu(head, func, rcu_state_p, -1, 1); + __call_rcu(head, func, &rcu_state, -1, 1); } EXPORT_SYMBOL_GPL(kfree_call_rcu); @@ -3029,7 +3026,7 @@ unsigned long get_state_synchronize_rcu(void) * before the load from ->gp_seq. */ smp_mb(); /* ^^^ */ - return rcu_seq_snap(&rcu_state_p->gp_seq); + return rcu_seq_snap(&rcu_state.gp_seq); } EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); @@ -3049,7 +3046,7 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); */ void cond_synchronize_rcu(unsigned long oldstate) { - if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate)) + if (!rcu_seq_done(&rcu_state.gp_seq, oldstate)) synchronize_rcu(); else smp_mb(); /* Ensure GP ends before subsequent accesses. */ @@ -3308,7 +3305,7 @@ static void _rcu_barrier(struct rcu_state *rsp) */ void rcu_barrier_bh(void) { - _rcu_barrier(rcu_state_p); + _rcu_barrier(&rcu_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -3322,7 +3319,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh); */ void rcu_barrier(void) { - _rcu_barrier(rcu_state_p); + _rcu_barrier(&rcu_state); } EXPORT_SYMBOL_GPL(rcu_barrier); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 3a8a582d9958..298a6904bbcd 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -756,7 +756,7 @@ static void sync_sched_exp_online_cleanup(int cpu) */ void synchronize_rcu_expedited(void) { - struct rcu_state *rsp = rcu_state_p; + struct rcu_state *rsp = &rcu_state; RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b7a99a6e64b6..329d5802d899 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -381,7 +381,7 @@ void rcu_note_context_switch(bool preempt) */ rcu_qs(); if (rdp->deferred_qs) - rcu_report_exp_rdp(rcu_state_p, rdp); + rcu_report_exp_rdp(&rcu_state, rdp); trace_rcu_utilization(TPS("End context switch")); barrier(); /* Avoid RCU read-side critical sections leaking up. */ } @@ -509,7 +509,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * blocked-tasks list below. */ if (rdp->deferred_qs) { - rcu_report_exp_rdp(rcu_state_p, rdp); + rcu_report_exp_rdp(&rcu_state, rdp); if (!t->rcu_read_unlock_special.s) { local_irq_restore(flags); return; @@ -566,7 +566,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) rnp->grplo, rnp->grphi, !!rnp->gp_tasks); - rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); + rcu_report_unblock_qs_rnp(&rcu_state, rnp, flags); } else { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } @@ -580,7 +580,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * then we need to report up the rcu_node hierarchy. */ if (!empty_exp && empty_exp_now) - rcu_report_exp_rnp(rcu_state_p, rnp, true); + rcu_report_exp_rnp(&rcu_state, rnp, true); } else { local_irq_restore(flags); } @@ -1300,7 +1300,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct sched_param sp; struct task_struct *t; - if (rcu_state_p != rsp) + if (&rcu_state != rsp) return 0; if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) @@ -1431,8 +1431,8 @@ static void __init rcu_spawn_boost_kthreads(void) for_each_possible_cpu(cpu) per_cpu(rcu_cpu_has_work, cpu) = 0; BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); - rcu_for_each_leaf_node(rcu_state_p, rnp) - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); + rcu_for_each_leaf_node(&rcu_state, rnp) + (void)rcu_spawn_one_boost_kthread(&rcu_state, rnp); } static void rcu_prepare_kthreads(int cpu) @@ -1442,7 +1442,7 @@ static void rcu_prepare_kthreads(int cpu) /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ if (rcu_scheduler_fully_active) - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); + (void)rcu_spawn_one_boost_kthread(&rcu_state, rnp); } #else /* #ifdef CONFIG_RCU_BOOST */ -- cgit v1.2.3 From 2280ee5a7d3efca0dbb2c241029b6c63bec50a6b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 15:54:39 -0700 Subject: rcu: Remove rcu_data_p pointer to default rcu_data structure The rcu_data_p pointer references the default set of per-CPU rcu_data structures, that is, those that call_rcu() uses, as opposed to call_rcu_bh() and sometimes call_rcu_sched(). But there is now only one set of per-CPU rcu_data structures, so that one set is by definition the default, which means that the rcu_data_p pointer no longer serves any useful purpose. This commit therefore removes it. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 1 - kernel/rcu/tree_plugin.h | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e3cdec55ef3c..b650b0c9897e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -85,7 +85,6 @@ struct rcu_state rcu_state = { .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), }; -static struct rcu_data __percpu *const rcu_data_p = &rcu_data; LIST_HEAD(rcu_struct_flavors); /* Dump rcu_node combining tree at boot to verify correct setup. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 329d5802d899..18175ca19f34 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -302,11 +302,11 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) static void rcu_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); - if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) { + if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { trace_rcu_grace_period(TPS("rcu_preempt"), - __this_cpu_read(rcu_data_p->gp_seq), + __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); - __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false); + __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); barrier(); /* Coordinate with rcu_flavor_check_callbacks(). */ current->rcu_read_unlock_special.b.need_qs = false; } @@ -805,8 +805,8 @@ static void rcu_flavor_check_callbacks(int user) /* If GP is oldish, ask for help from rcu_read_unlock_special(). */ if (t->rcu_read_lock_nesting > 0 && - __this_cpu_read(rcu_data_p->core_needs_qs) && - __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) && + __this_cpu_read(rcu_data.core_needs_qs) && + __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && !t->rcu_read_unlock_special.b.need_qs && time_after(jiffies, rsp->gp_start + HZ)) t->rcu_read_unlock_special.b.need_qs = true; -- cgit v1.2.3 From b50912d0b5e03f11004fec1e2b50244de9e2fa41 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_report_qs_rnp() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_report_qs_rnp(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b650b0c9897e..919033d2c083 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -132,9 +132,8 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active); */ static int rcu_scheduler_fully_active __read_mostly; -static void -rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, - struct rcu_node *rnp, unsigned long gps, unsigned long flags); +static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, + unsigned long gps, unsigned long flags); static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); @@ -1947,7 +1946,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) mask = rnp->qsmask & ~rnp->qsmaskinitnext; rnp->rcu_gp_init_mask = mask; if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) - rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); + rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); else raw_spin_unlock_irq_rcu_node(rnp); cond_resched_tasks_rcu_qs(); @@ -2214,13 +2213,13 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) * disabled. This allows propagating quiescent state due to resumed tasks * during grace-period initialization. */ -static void -rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, - struct rcu_node *rnp, unsigned long gps, unsigned long flags) +static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, + unsigned long gps, unsigned long flags) __releases(rnp->lock) { unsigned long oldmask = 0; struct rcu_node *rnp_c; + struct rcu_state __maybe_unused *rsp = &rcu_state; raw_lockdep_assert_held_rcu_node(rnp); @@ -2312,7 +2311,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp, mask = rnp->grpmask; raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ - rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags); + rcu_report_qs_rnp(mask, rnp_p, gps, flags); } /* @@ -2355,7 +2354,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) */ needwake = rcu_accelerate_cbs(rsp, rnp, rdp); - rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); + rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); /* ^^^ Released rnp->lock */ if (needwake) rcu_gp_kthread_wake(rsp); @@ -2623,7 +2622,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) } if (mask != 0) { /* Idle/offline CPUs, report (releases rnp->lock). */ - rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); + rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); } else { /* Nothing to do here, so just drop the lock. */ raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -3577,7 +3576,7 @@ void rcu_cpu_starting(unsigned int cpu) rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags); if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ /* Report QS -after- changing ->qsmaskinitnext! */ - rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); + rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); } else { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } @@ -3606,7 +3605,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags); if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ /* Report quiescent state -before- changing ->qsmaskinitnext! */ - rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags); + rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags); } rnp->qsmaskinitnext &= ~mask; -- cgit v1.2.3 From aff4e9ede52badf550745c3d30ed5fcf86ed4351 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_report_qs_rsp() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_report_qs_rsp(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 919033d2c083..2665a45ccb43 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -138,7 +138,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); static void invoke_rcu_core(void); -static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); +static void invoke_rcu_callbacks(struct rcu_data *rdp); static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp); static void sync_sched_exp_online_cleanup(int cpu); @@ -2189,9 +2189,11 @@ static int __noreturn rcu_gp_kthread(void *arg) * just-completed grace period. Note that the caller must hold rnp->lock, * which is released before return. */ -static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) +static void rcu_report_qs_rsp(unsigned long flags) __releases(rcu_get_root(rsp)->lock) { + struct rcu_state *rsp = &rcu_state; + raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp)); WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); @@ -2268,7 +2270,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, * state for this grace period. Invoke rcu_report_qs_rsp() * to clean up and start the next grace period if one is needed. */ - rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ + rcu_report_qs_rsp(flags); /* releases rnp->lock. */ } /* @@ -2302,7 +2304,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp, * Only one rcu_node structure in the tree, so don't * try to report up to its nonexistent parent! */ - rcu_report_qs_rsp(rsp, flags); + rcu_report_qs_rsp(flags); return; } @@ -2761,7 +2763,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* If there are callbacks ready, invoke them. */ if (rcu_segcblist_ready_cbs(&rdp->cblist)) - invoke_rcu_callbacks(rsp, rdp); + invoke_rcu_callbacks(rdp); /* Do any needed deferred wakeups of rcuo kthreads. */ do_nocb_deferred_wakeup(rdp); @@ -2789,8 +2791,10 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused * are running on the current CPU with softirqs disabled, the * rcu_cpu_kthread_task cannot disappear out from under us. */ -static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) +static void invoke_rcu_callbacks(struct rcu_data *rdp) { + struct rcu_state *rsp = &rcu_state; + if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; if (likely(!rsp->boost)) { -- cgit v1.2.3 From 139ad4da5ab5d5600b46d930dbf4419577039d9c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_report_unblock_qs_rnp() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_report_unblock_qs_rnp(), which is particularly appropriate in this case given that this parameter is no longer used. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 3 +-- kernel/rcu/tree_plugin.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 2665a45ccb43..58aca700d67b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2281,8 +2281,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, * disabled. */ static void __maybe_unused -rcu_report_unblock_qs_rnp(struct rcu_state *rsp, - struct rcu_node *rnp, unsigned long flags) +rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) __releases(rnp->lock) { unsigned long gps; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 18175ca19f34..566828ecaecb 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -566,7 +566,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) rnp->grplo, rnp->grphi, !!rnp->gp_tasks); - rcu_report_unblock_qs_rnp(&rcu_state, rnp, flags); + rcu_report_unblock_qs_rnp(rnp, flags); } else { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } -- cgit v1.2.3 From 33085c469aeaef3e1f8a203128cf886490419205 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_report_qs_rdp() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_report_qs_rdp(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 58aca700d67b..cdf53f8b31cd 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2320,12 +2320,13 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) * structure. This must be called from the specified CPU. */ static void -rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) +rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) { unsigned long flags; unsigned long mask; bool needwake; struct rcu_node *rnp; + struct rcu_state *rsp = &rcu_state; rnp = rdp->mynode; raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -2392,7 +2393,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) * Tell RCU we are done (but rcu_report_qs_rdp() will be the * judge of that). */ - rcu_report_qs_rdp(rdp->cpu, rsp, rdp); + rcu_report_qs_rdp(rdp->cpu, rdp); } /* -- cgit v1.2.3 From de8e87305a1ae878f7c518fd9cadcc9159cda493 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_gp_in_progress() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_gp_in_progress(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 30 +++++++++++++++--------------- kernel/rcu/tree_plugin.h | 2 +- 2 files changed, 16 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index cdf53f8b31cd..1a2956d9e999 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -189,9 +189,9 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) * permit this function to be invoked without holding the root rcu_node * structure's ->lock, but of course results can be subject to change. */ -static int rcu_gp_in_progress(struct rcu_state *rsp) +static int rcu_gp_in_progress(void) { - return rcu_seq_state(rcu_seq_current(&rsp->gp_seq)); + return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); } void rcu_softirq_qs(void) @@ -1297,7 +1297,7 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp) return; j = READ_ONCE(rsp->jiffies_kick_kthreads); if (time_after(jiffies, j) && rsp->gp_kthread && - (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) { + (rcu_gp_in_progress() || READ_ONCE(rsp->gp_flags))) { WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name); rcu_ftrace_dump(DUMP_ALL); wake_up_process(rsp->gp_kthread); @@ -1449,7 +1449,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) struct rcu_node *rnp; if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || - !rcu_gp_in_progress(rsp)) + !rcu_gp_in_progress()) return; rcu_stall_kick_kthreads(rsp); j = jiffies; @@ -1484,14 +1484,14 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) return; /* No stall or GP completed since entering function. */ rnp = rdp->mynode; jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; - if (rcu_gp_in_progress(rsp) && + if (rcu_gp_in_progress() && (READ_ONCE(rnp->qsmask) & rdp->grpmask) && cmpxchg(&rsp->jiffies_stall, js, jn) == js) { /* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); - } else if (rcu_gp_in_progress(rsp) && + } else if (rcu_gp_in_progress() && ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && cmpxchg(&rsp->jiffies_stall, js, jn) == js) { @@ -1589,7 +1589,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, } /* If GP already in progress, just leave, otherwise start one. */ - if (rcu_gp_in_progress(rsp)) { + if (rcu_gp_in_progress()) { trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); goto unlock_out; } @@ -1846,7 +1846,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) } WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ - if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { + if (WARN_ON_ONCE(rcu_gp_in_progress())) { /* * Grace period already in progress, don't start another. * Not supposed to be able to happen. @@ -2195,7 +2195,7 @@ static void rcu_report_qs_rsp(unsigned long flags) struct rcu_state *rsp = &rcu_state; raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp)); - WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + WARN_ON_ONCE(!rcu_gp_in_progress()); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); rcu_gp_kthread_wake(rsp); @@ -2682,7 +2682,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_node *rnp_root = rcu_get_root(rsp); static atomic_t warned = ATOMIC_INIT(0); - if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) || + if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed)) return; j = jiffies; /* Expensive access, and in common case don't get here. */ @@ -2693,7 +2693,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp, raw_spin_lock_irqsave_rcu_node(rnp, flags); j = jiffies; - if (rcu_gp_in_progress(rsp) || + if (rcu_gp_in_progress() || ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) || time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) || @@ -2706,7 +2706,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp, if (rnp_root != rnp) raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ j = jiffies; - if (rcu_gp_in_progress(rsp) || + if (rcu_gp_in_progress() || ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || time_before(j, rsp->gp_req_activity + gpssdelay) || time_before(j, rsp->gp_activity + gpssdelay) || @@ -2751,7 +2751,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) rcu_check_quiescent_state(rsp, rdp); /* No grace period and unregistered callbacks? */ - if (!rcu_gp_in_progress(rsp) && + if (!rcu_gp_in_progress() && rcu_segcblist_is_enabled(&rdp->cblist)) { local_irq_save(flags); if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) @@ -2841,7 +2841,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, note_gp_changes(rsp, rdp); /* Start a new grace period if one not already started. */ - if (!rcu_gp_in_progress(rsp)) { + if (!rcu_gp_in_progress()) { rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp); } else { /* Give the grace period a kick. */ @@ -3105,7 +3105,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) return 1; /* Has RCU gone idle with this CPU needing another grace period? */ - if (!rcu_gp_in_progress(rsp) && + if (!rcu_gp_in_progress() && rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) return 1; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 566828ecaecb..99f517035a6e 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2655,7 +2655,7 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp) { #ifdef CONFIG_NO_HZ_FULL if (tick_nohz_full_cpu(smp_processor_id()) && - (!rcu_gp_in_progress(rsp) || + (!rcu_gp_in_progress() || ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ))) return true; #endif /* #ifdef CONFIG_NO_HZ_FULL */ -- cgit v1.2.3 From 336a4f6c451e488b5388a2593fa20f7192706c7b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_get_root() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_get_root(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 38 +++++++++++++++++++------------------- kernel/rcu/tree_exp.h | 6 +++--- kernel/rcu/tree_plugin.h | 2 +- 3 files changed, 23 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1a2956d9e999..8d0e18faab3b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -623,9 +623,9 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); /* * Return the root node of the specified rcu_state structure. */ -static struct rcu_node *rcu_get_root(struct rcu_state *rsp) +static struct rcu_node *rcu_get_root(void) { - return &rsp->node[0]; + return &rcu_state.node[0]; } /* @@ -1318,7 +1318,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) unsigned long gpa; unsigned long j; int ndetected = 0; - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); long totqlen = 0; /* Kick and suppress, if so configured. */ @@ -1367,7 +1367,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", rsp->name, j - gpa, j, gpa, jiffies_till_next_fqs, - rcu_get_root(rsp)->qsmask); + rcu_get_root()->qsmask); /* In this case, the current CPU might be at fault. */ sched_show_task(current); } @@ -1389,7 +1389,7 @@ static void print_cpu_stall(struct rcu_state *rsp) int cpu; unsigned long flags; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); long totqlen = 0; /* Kick and suppress, if so configured. */ @@ -1835,7 +1835,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) unsigned long oldmask; unsigned long mask; struct rcu_data *rdp; - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); WRITE_ONCE(rsp->gp_activity, jiffies); raw_spin_lock_irq_rcu_node(rnp); @@ -1962,7 +1962,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) */ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) { - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); /* Someone like call_rcu() requested a force-quiescent-state scan. */ *gfp = READ_ONCE(rsp->gp_flags); @@ -1981,7 +1981,7 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) */ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time) { - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); WRITE_ONCE(rsp->gp_activity, jiffies); rsp->n_force_qs++; @@ -2010,7 +2010,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) bool needgp = false; unsigned long new_gp_seq; struct rcu_data *rdp; - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); struct swait_queue_head *sq; WRITE_ONCE(rsp->gp_activity, jiffies); @@ -2058,7 +2058,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) WRITE_ONCE(rsp->gp_activity, jiffies); rcu_gp_slow(rsp, gp_cleanup_delay); } - rnp = rcu_get_root(rsp); + rnp = rcu_get_root(); raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */ /* Declare grace period done. */ @@ -2094,7 +2094,7 @@ static int __noreturn rcu_gp_kthread(void *arg) unsigned long j; int ret; struct rcu_state *rsp = arg; - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); rcu_bind_gp_kthread(); for (;;) { @@ -2190,14 +2190,14 @@ static int __noreturn rcu_gp_kthread(void *arg) * which is released before return. */ static void rcu_report_qs_rsp(unsigned long flags) - __releases(rcu_get_root(rsp)->lock) + __releases(rcu_get_root()->lock) { struct rcu_state *rsp = &rcu_state; - raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp)); + raw_lockdep_assert_held_rcu_node(rcu_get_root()); WARN_ON_ONCE(!rcu_gp_in_progress()); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); - raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags); + raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); rcu_gp_kthread_wake(rsp); } @@ -2654,7 +2654,7 @@ static void force_quiescent_state(struct rcu_state *rsp) return; rnp_old = rnp; } - /* rnp_old == rcu_get_root(rsp), rnp == NULL. */ + /* rnp_old == rcu_get_root(), rnp == NULL. */ /* Reached the root of the rcu_node tree, acquire lock. */ raw_spin_lock_irqsave_rcu_node(rnp_old, flags); @@ -2679,7 +2679,7 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp, const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ; unsigned long flags; unsigned long j; - struct rcu_node *rnp_root = rcu_get_root(rsp); + struct rcu_node *rnp_root = rcu_get_root(); static atomic_t warned = ATOMIC_INIT(0); if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || @@ -3397,7 +3397,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) { unsigned long flags; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); /* Set up local state, ensuring consistent view of global state. */ raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -3646,7 +3646,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) unsigned long flags; struct rcu_data *my_rdp; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); + struct rcu_node *rnp_root = rcu_get_root(); bool needwake; if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) @@ -3744,7 +3744,7 @@ static int __init rcu_spawn_gp_kthread(void) for_each_rcu_flavor(rsp) { t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); BUG_ON(IS_ERR(t)); - rnp = rcu_get_root(rsp); + rnp = rcu_get_root(); raw_spin_lock_irqsave_rcu_node(rnp, flags); rsp->gp_kthread = t; if (kthread_prio) { diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 298a6904bbcd..0bcbb03c9702 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -288,7 +288,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); struct rcu_node *rnp = rdp->mynode; - struct rcu_node *rnp_root = rcu_get_root(rsp); + struct rcu_node *rnp_root = rcu_get_root(); /* Low-contention fastpath. */ if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && @@ -479,7 +479,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) unsigned long mask; int ndetected; struct rcu_node *rnp; - struct rcu_node *rnp_root = rcu_get_root(rsp); + struct rcu_node *rnp_root = rcu_get_root(); int ret; trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait")); @@ -643,7 +643,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, /* Wait for expedited grace period to complete. */ rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); - rnp = rcu_get_root(rsp); + rnp = rcu_get_root(); wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], sync_exp_work_done(rsp, s)); smp_mb(); /* Workqueue actions happen before return. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 99f517035a6e..545e4ac9422a 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -685,7 +685,7 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) */ static void rcu_print_detail_task_stall(struct rcu_state *rsp) { - struct rcu_node *rnp = rcu_get_root(rsp); + struct rcu_node *rnp = rcu_get_root(); rcu_print_detail_task_stall_rnp(rnp); rcu_for_each_leaf_node(rsp, rnp) -- cgit v1.2.3 From ad3832e974eba3b6d253d60a28eac2f2da7ea7ff Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from record_gp_stall_check_time() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from record_gp_stall_check_time(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8d0e18faab3b..bcfdb92d5d10 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1214,17 +1214,17 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) return 0; } -static void record_gp_stall_check_time(struct rcu_state *rsp) +static void record_gp_stall_check_time(void) { unsigned long j = jiffies; unsigned long j1; - rsp->gp_start = j; + rcu_state.gp_start = j; j1 = rcu_jiffies_till_stall_check(); /* Record ->gp_start before ->jiffies_stall. */ - smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */ - rsp->jiffies_resched = j + j1 / 2; - rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); + smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */ + rcu_state.jiffies_resched = j + j1 / 2; + rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); } /* @@ -1856,7 +1856,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) } /* Advance to a new grace period and initialize state. */ - record_gp_stall_check_time(rsp); + record_gp_stall_check_time(); /* Record GP times before starting GP, hence rcu_seq_start(). */ rcu_seq_start(&rsp->gp_seq); trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start")); -- cgit v1.2.3 From 8fd119b6522fea9ba5e68a3aa653f1490778fb25 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_check_gp_kthread_starvation() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_check_gp_kthread_starvation(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index bcfdb92d5d10..09f05083f01d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1240,10 +1240,11 @@ static const char *gp_state_getname(short gs) /* * Complain about starvation of grace-period kthread. */ -static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) +static void rcu_check_gp_kthread_starvation(void) { unsigned long gpa; unsigned long j; + struct rcu_state *rsp = &rcu_state; j = jiffies; gpa = READ_ONCE(rsp->gp_activity); @@ -1377,7 +1378,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) WRITE_ONCE(rsp->jiffies_stall, jiffies + 3 * rcu_jiffies_till_stall_check() + 3); - rcu_check_gp_kthread_starvation(rsp); + rcu_check_gp_kthread_starvation(); panic_on_rcu_stall(); @@ -1415,7 +1416,7 @@ static void print_cpu_stall(struct rcu_state *rsp) jiffies - rsp->gp_start, (long)rcu_seq_current(&rsp->gp_seq), totqlen); - rcu_check_gp_kthread_starvation(rsp); + rcu_check_gp_kthread_starvation(); rcu_dump_cpu_stacks(rsp); -- cgit v1.2.3 From 33dbdbf02538e8f088f83a89de68436da590ce76 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_dump_cpu_stacks() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_dump_cpu_stacks(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 09f05083f01d..3e252b80e0bf 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1270,13 +1270,13 @@ static void rcu_check_gp_kthread_starvation(void) * that don't support NMI-based stack dumps. The NMI-triggered stack * traces are more accurate because they are printed by the target CPU. */ -static void rcu_dump_cpu_stacks(struct rcu_state *rsp) +static void rcu_dump_cpu_stacks(void) { int cpu; unsigned long flags; struct rcu_node *rnp; - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(&rcu_state, rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); for_each_leaf_node_possible_cpu(rnp, cpu) if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) @@ -1355,7 +1355,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) smp_processor_id(), (long)(jiffies - rsp->gp_start), (long)rcu_seq_current(&rsp->gp_seq), totqlen); if (ndetected) { - rcu_dump_cpu_stacks(rsp); + rcu_dump_cpu_stacks(); /* Complain about tasks blocking the grace period. */ rcu_print_detail_task_stall(rsp); @@ -1418,7 +1418,7 @@ static void print_cpu_stall(struct rcu_state *rsp) rcu_check_gp_kthread_starvation(); - rcu_dump_cpu_stacks(rsp); + rcu_dump_cpu_stacks(); raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Rewrite if needed in case of slow consoles. */ -- cgit v1.2.3 From e1741c69d427596c67639b25f1309836e001c224 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_stall_kick_kthreads() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_stall_kick_kthreads(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3e252b80e0bf..20466fe22e82 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1290,9 +1290,10 @@ static void rcu_dump_cpu_stacks(void) * If too much time has passed in the current grace period, and if * so configured, go kick the relevant kthreads. */ -static void rcu_stall_kick_kthreads(struct rcu_state *rsp) +static void rcu_stall_kick_kthreads(void) { unsigned long j; + struct rcu_state *rsp = &rcu_state; if (!rcu_kick_kthreads) return; @@ -1323,7 +1324,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) long totqlen = 0; /* Kick and suppress, if so configured. */ - rcu_stall_kick_kthreads(rsp); + rcu_stall_kick_kthreads(); if (rcu_cpu_stall_suppress) return; @@ -1394,7 +1395,7 @@ static void print_cpu_stall(struct rcu_state *rsp) long totqlen = 0; /* Kick and suppress, if so configured. */ - rcu_stall_kick_kthreads(rsp); + rcu_stall_kick_kthreads(); if (rcu_cpu_stall_suppress) return; @@ -1452,7 +1453,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || !rcu_gp_in_progress()) return; - rcu_stall_kick_kthreads(rsp); + rcu_stall_kick_kthreads(); j = jiffies; /* -- cgit v1.2.3 From a91e7e58b1016cd3ce043ab3dd5cde7a1b098215 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from print_other_cpu_stall() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from print_other_cpu_stall(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 20466fe22e82..13f507789588 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1313,7 +1313,7 @@ static void panic_on_rcu_stall(void) panic("RCU Stall\n"); } -static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) +static void print_other_cpu_stall(unsigned long gp_seq) { int cpu; unsigned long flags; @@ -1321,6 +1321,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) unsigned long j; int ndetected = 0; struct rcu_node *rnp = rcu_get_root(); + struct rcu_state *rsp = &rcu_state; long totqlen = 0; /* Kick and suppress, if so configured. */ @@ -1498,7 +1499,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) cmpxchg(&rsp->jiffies_stall, js, jn) == js) { /* They had a few time units to dump stack, so complain. */ - print_other_cpu_stall(rsp, gs2); + print_other_cpu_stall(gs2); } } -- cgit v1.2.3 From 4e8b8e08f931c9378dec9f304f8a170bcf5e70bb Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from print_cpu_stall() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from print_cpu_stall(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 13f507789588..f139b8202d5d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1387,12 +1387,13 @@ static void print_other_cpu_stall(unsigned long gp_seq) force_quiescent_state(rsp); /* Kick them all. */ } -static void print_cpu_stall(struct rcu_state *rsp) +static void print_cpu_stall(void) { int cpu; unsigned long flags; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rcu_get_root(); + struct rcu_state *rsp = &rcu_state; long totqlen = 0; /* Kick and suppress, if so configured. */ @@ -1492,7 +1493,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) cmpxchg(&rsp->jiffies_stall, js, jn) == js) { /* We haven't checked in, so go dump stack. */ - print_cpu_stall(rsp); + print_cpu_stall(); } else if (rcu_gp_in_progress() && ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && -- cgit v1.2.3 From ea12ff2b7d97607bb69b50ccc30d3819b44ffb2b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from check_cpu_stall() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from check_cpu_stall(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f139b8202d5d..a222afb6d74d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1442,7 +1442,7 @@ static void print_cpu_stall(void) resched_cpu(smp_processor_id()); } -static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) +static void check_cpu_stall(struct rcu_data *rdp) { unsigned long gs1; unsigned long gs2; @@ -1451,6 +1451,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) unsigned long jn; unsigned long js; struct rcu_node *rnp; + struct rcu_state *rsp = &rcu_state; if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || !rcu_gp_in_progress()) @@ -3094,7 +3095,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) struct rcu_node *rnp = rdp->mynode; /* Check for CPU stalls, if enabled. */ - check_cpu_stall(rsp, rdp); + check_cpu_stall(rdp); /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ if (rcu_nohz_full_cpu(rsp)) -- cgit v1.2.3 From 3481f2eab09563456bbc7cb358ad5d151a509064 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_future_gp_cleanup() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_future_gp_cleanup(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a222afb6d74d..87fc0727a9b8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1622,7 +1622,7 @@ unlock_out: * Clean up any old requests for the just-ended grace period. Also return * whether any additional grace periods have been requested. */ -static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) +static bool rcu_future_gp_cleanup(struct rcu_node *rnp) { bool needmore; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); @@ -2055,7 +2055,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) if (rnp == rdp->mynode) needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; /* smp_mb() provided by prior unlock-lock pair. */ - needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp; + needgp = rcu_future_gp_cleanup(rnp) || needgp; sq = rcu_nocb_gp_get(rnp); raw_spin_unlock_irq_rcu_node(rnp); rcu_nocb_gp_cleanup(sq); -- cgit v1.2.3 From 532c00c97f16a2a8576d453ae13ddc38162faed4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_gp_kthread_wake() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_gp_kthread_wake(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 22 +++++++++++----------- kernel/rcu/tree_plugin.h | 4 ++-- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 87fc0727a9b8..06f83fce416b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1642,13 +1642,13 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp) * raced to awaken, and we lost), and finally don't try to awaken * a kthread that has not yet been created. */ -static void rcu_gp_kthread_wake(struct rcu_state *rsp) +static void rcu_gp_kthread_wake(void) { - if (current == rsp->gp_kthread || - !READ_ONCE(rsp->gp_flags) || - !rsp->gp_kthread) + if (current == rcu_state.gp_kthread || + !READ_ONCE(rcu_state.gp_flags) || + !rcu_state.gp_kthread) return; - swake_up_one(&rsp->gp_wq); + swake_up_one(&rcu_state.gp_wq); } /* @@ -1722,7 +1722,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp, needwake = rcu_accelerate_cbs(rsp, rnp, rdp); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ if (needwake) - rcu_gp_kthread_wake(rsp); + rcu_gp_kthread_wake(); } /* @@ -1820,7 +1820,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) needwake = __note_gp_changes(rsp, rnp, rdp); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); if (needwake) - rcu_gp_kthread_wake(rsp); + rcu_gp_kthread_wake(); } static void rcu_gp_slow(struct rcu_state *rsp, int delay) @@ -2203,7 +2203,7 @@ static void rcu_report_qs_rsp(unsigned long flags) WARN_ON_ONCE(!rcu_gp_in_progress()); WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); - rcu_gp_kthread_wake(rsp); + rcu_gp_kthread_wake(); } /* @@ -2364,7 +2364,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); /* ^^^ Released rnp->lock */ if (needwake) - rcu_gp_kthread_wake(rsp); + rcu_gp_kthread_wake(); } } @@ -2670,7 +2670,7 @@ static void force_quiescent_state(struct rcu_state *rsp) } WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); - rcu_gp_kthread_wake(rsp); + rcu_gp_kthread_wake(); } /* @@ -3672,7 +3672,7 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) !rcu_segcblist_n_cbs(&my_rdp->cblist)); raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags); if (needwake) - rcu_gp_kthread_wake(rsp); + rcu_gp_kthread_wake(); WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || !rcu_segcblist_empty(&rdp->cblist), "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 545e4ac9422a..50ca000ad9f2 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1700,7 +1700,7 @@ static void rcu_prepare_for_idle(void) needwake = rcu_accelerate_cbs(rsp, rnp, rdp); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ if (needwake) - rcu_gp_kthread_wake(rsp); + rcu_gp_kthread_wake(); } } @@ -2147,7 +2147,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) needwake = rcu_start_this_gp(rnp, rdp, c); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); if (needwake) - rcu_gp_kthread_wake(rdp->rsp); + rcu_gp_kthread_wake(); } /* -- cgit v1.2.3 From 02f501423d0dde7a4b0dd138e0de6175bcf1926c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_accelerate_cbs() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_accelerate_cbs(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 15 +++++++-------- kernel/rcu/tree_plugin.h | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 06f83fce416b..984dbbf47265 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1663,11 +1663,11 @@ static void rcu_gp_kthread_wake(void) * * The caller must hold rnp->lock with interrupts disabled. */ -static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, - struct rcu_data *rdp) +static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) { unsigned long gp_seq_req; bool ret = false; + struct rcu_state *rsp = &rcu_state; raw_lockdep_assert_held_rcu_node(rnp); @@ -1719,7 +1719,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp, return; } raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ - needwake = rcu_accelerate_cbs(rsp, rnp, rdp); + needwake = rcu_accelerate_cbs(rnp, rdp); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ if (needwake) rcu_gp_kthread_wake(); @@ -1751,7 +1751,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); /* Classify any remaining callbacks. */ - return rcu_accelerate_cbs(rsp, rnp, rdp); + return rcu_accelerate_cbs(rnp, rdp); } /* @@ -1777,7 +1777,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend")); } else { - ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */ + ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */ } /* Now handle the beginnings of any new-to-this-CPU grace periods. */ @@ -2078,7 +2078,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) needgp = true; } /* Advance CBs to reduce false positives below. */ - if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) { + if (!rcu_accelerate_cbs(rnp, rdp) && needgp) { WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); rsp->gp_req_activity = jiffies; trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), @@ -2331,7 +2331,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) unsigned long mask; bool needwake; struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_state; rnp = rdp->mynode; raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -2359,7 +2358,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. */ - needwake = rcu_accelerate_cbs(rsp, rnp, rdp); + needwake = rcu_accelerate_cbs(rnp, rdp); rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); /* ^^^ Released rnp->lock */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 50ca000ad9f2..0c59c3987c60 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1697,7 +1697,7 @@ static void rcu_prepare_for_idle(void) continue; rnp = rdp->mynode; raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ - needwake = rcu_accelerate_cbs(rsp, rnp, rdp); + needwake = rcu_accelerate_cbs(rnp, rdp); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ if (needwake) rcu_gp_kthread_wake(); -- cgit v1.2.3 From c6e09b97b9338de2b829a4005dc437e689bf903e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_accelerate_cbs_unlocked() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_accelerate_cbs_unlocked(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 984dbbf47265..e66d9e446b1d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1704,15 +1704,14 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) * that a new grace-period request be made, invokes rcu_accelerate_cbs() * while holding the leaf rcu_node structure's ->lock. */ -static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp, - struct rcu_node *rnp, +static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, struct rcu_data *rdp) { unsigned long c; bool needwake; lockdep_assert_irqs_disabled(); - c = rcu_seq_snap(&rsp->gp_seq); + c = rcu_seq_snap(&rcu_state.gp_seq); if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { /* Old request still live, so mark recent callbacks. */ (void)rcu_segcblist_accelerate(&rdp->cblist, c); @@ -2759,7 +2758,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) rcu_segcblist_is_enabled(&rdp->cblist)) { local_irq_save(flags); if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) - rcu_accelerate_cbs_unlocked(rsp, rnp, rdp); + rcu_accelerate_cbs_unlocked(rnp, rdp); local_irq_restore(flags); } @@ -2846,7 +2845,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, /* Start a new grace period if one not already started. */ if (!rcu_gp_in_progress()) { - rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp); + rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); } else { /* Give the grace period a kick. */ rdp->blimit = LONG_MAX; -- cgit v1.2.3 From 834f56bf54e866e8db9d09b02fb1f3c0184ec927 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_advance_cbs() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_advance_cbs(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e66d9e446b1d..6964d04c0823 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1734,8 +1734,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, * * The caller must hold rnp->lock with interrupts disabled. */ -static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, - struct rcu_data *rdp) +static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) { raw_lockdep_assert_held_rcu_node(rnp); @@ -1773,7 +1772,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, /* Handle the ends of any preceding grace periods first. */ if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || unlikely(READ_ONCE(rdp->gpwrap))) { - ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */ + ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */ trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend")); } else { ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */ @@ -3663,8 +3662,8 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) } raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ /* Leverage recent GPs and set GP for new callbacks. */ - needwake = rcu_advance_cbs(rsp, rnp_root, rdp) || - rcu_advance_cbs(rsp, rnp_root, my_rdp); + needwake = rcu_advance_cbs(rnp_root, rdp) || + rcu_advance_cbs(rnp_root, my_rdp); rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); -- cgit v1.2.3 From c7e48f7ba3820145d08015108ea763bd03c888e9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from __note_gp_changes() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from __note_gp_changes(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 6964d04c0823..3e1ec264a653 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1758,11 +1758,11 @@ static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) * structure corresponding to the current CPU, and must have irqs disabled. * Returns true if the grace-period kthread needs to be awakened. */ -static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, - struct rcu_data *rdp) +static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) { bool ret; bool need_gp; + struct rcu_state __maybe_unused *rsp = &rcu_state; raw_lockdep_assert_held_rcu_node(rnp); @@ -1815,7 +1815,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) local_irq_restore(flags); return; } - needwake = __note_gp_changes(rsp, rnp, rdp); + needwake = __note_gp_changes(rnp, rdp); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); if (needwake) rcu_gp_kthread_wake(); @@ -1940,7 +1940,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) rnp->qsmask = rnp->qsmaskinit; WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); if (rnp == rdp->mynode) - (void)__note_gp_changes(rsp, rnp, rdp); + (void)__note_gp_changes(rnp, rdp); rcu_preempt_boost_start_gp(rnp); trace_rcu_grace_period_init(rsp->name, rnp->gp_seq, rnp->level, rnp->grplo, @@ -2051,7 +2051,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) WRITE_ONCE(rnp->gp_seq, new_gp_seq); rdp = this_cpu_ptr(&rcu_data); if (rnp == rdp->mynode) - needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; + needgp = __note_gp_changes(rnp, rdp) || needgp; /* smp_mb() provided by prior unlock-lock pair. */ needgp = rcu_future_gp_cleanup(rnp) || needgp; sq = rcu_nocb_gp_get(rnp); -- cgit v1.2.3 From 15cabdffbbf629f2588612f092bdb37dfa16cc79 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from note_gp_changes() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from note_gp_changes(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 6 +++--- kernel/rcu/tree_plugin.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3e1ec264a653..9189f7c70df5 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1801,7 +1801,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) return ret; } -static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) +static void note_gp_changes(struct rcu_data *rdp) { unsigned long flags; bool needwake; @@ -2375,7 +2375,7 @@ static void rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) { /* Check for grace-period ends and beginnings. */ - note_gp_changes(rsp, rdp); + note_gp_changes(rdp); /* * Does this CPU still need to do its part for current grace period? @@ -2840,7 +2840,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, rdp->qlen_last_fqs_check + qhimark)) { /* Are we ignoring a completed grace period? */ - note_gp_changes(rsp, rdp); + note_gp_changes(rdp); /* Start a new grace period if one not already started. */ if (!rcu_gp_in_progress()) { diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0c59c3987c60..82f10a6bf266 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1586,7 +1586,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) rcu_seq_current(&rnp->gp_seq)) || unlikely(READ_ONCE(rdp->gpwrap))) && rcu_segcblist_pend_cbs(&rdp->cblist)) - note_gp_changes(rsp, rdp); + note_gp_changes(rdp); if (rcu_segcblist_ready_cbs(&rdp->cblist)) cbs_ready = true; -- cgit v1.2.3 From 22212332c1f37da35e0d841b1e06421a4956e1ea Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_gp_slow() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_gp_slow(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9189f7c70df5..29121629c004 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1821,10 +1821,10 @@ static void note_gp_changes(struct rcu_data *rdp) rcu_gp_kthread_wake(); } -static void rcu_gp_slow(struct rcu_state *rsp, int delay) +static void rcu_gp_slow(int delay) { if (delay > 0 && - !(rcu_seq_ctr(rsp->gp_seq) % + !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) schedule_timeout_uninterruptible(delay); } @@ -1917,7 +1917,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) raw_spin_unlock_irq_rcu_node(rnp); spin_unlock(&rsp->ofl_lock); } - rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */ + rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ /* * Set the quiescent-state-needed bits in all the rcu_node @@ -1933,7 +1933,7 @@ static bool rcu_gp_init(struct rcu_state *rsp) */ rsp->gp_state = RCU_GP_INIT; rcu_for_each_node_breadth_first(rsp, rnp) { - rcu_gp_slow(rsp, gp_init_delay); + rcu_gp_slow(gp_init_delay); raw_spin_lock_irqsave_rcu_node(rnp, flags); rdp = this_cpu_ptr(&rcu_data); rcu_preempt_check_blocked_tasks(rsp, rnp); @@ -2059,7 +2059,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) rcu_nocb_gp_cleanup(sq); cond_resched_tasks_rcu_qs(); WRITE_ONCE(rsp->gp_activity, jiffies); - rcu_gp_slow(rsp, gp_cleanup_delay); + rcu_gp_slow(gp_cleanup_delay); } rnp = rcu_get_root(); raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */ -- cgit v1.2.3 From 0854a05c9fa554930174f0fa7453c18f99108a4a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_gp_kthread() and friends There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_gp_init(), rcu_gp_fqs_check_wake(), rcu_gp_fqs(), rcu_gp_cleanup(), and rcu_gp_kthread(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 29121629c004..af4aeaaee046 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1832,13 +1832,14 @@ static void rcu_gp_slow(int delay) /* * Initialize a new grace period. Return false if no grace period required. */ -static bool rcu_gp_init(struct rcu_state *rsp) +static bool rcu_gp_init(void) { unsigned long flags; unsigned long oldmask; unsigned long mask; struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(); + struct rcu_state *rsp = &rcu_state; WRITE_ONCE(rsp->gp_activity, jiffies); raw_spin_lock_irq_rcu_node(rnp); @@ -1963,12 +1964,12 @@ static bool rcu_gp_init(struct rcu_state *rsp) * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state * time. */ -static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) +static bool rcu_gp_fqs_check_wake(int *gfp) { struct rcu_node *rnp = rcu_get_root(); /* Someone like call_rcu() requested a force-quiescent-state scan. */ - *gfp = READ_ONCE(rsp->gp_flags); + *gfp = READ_ONCE(rcu_state.gp_flags); if (*gfp & RCU_GP_FLAG_FQS) return true; @@ -1982,9 +1983,10 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) /* * Do one round of quiescent-state forcing. */ -static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time) +static void rcu_gp_fqs(bool first_time) { struct rcu_node *rnp = rcu_get_root(); + struct rcu_state *rsp = &rcu_state; WRITE_ONCE(rsp->gp_activity, jiffies); rsp->n_force_qs++; @@ -2007,13 +2009,14 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time) /* * Clean up after the old grace period. */ -static void rcu_gp_cleanup(struct rcu_state *rsp) +static void rcu_gp_cleanup(void) { unsigned long gp_duration; bool needgp = false; unsigned long new_gp_seq; struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(); + struct rcu_state *rsp = &rcu_state; struct swait_queue_head *sq; WRITE_ONCE(rsp->gp_activity, jiffies); @@ -2090,13 +2093,13 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) /* * Body of kthread that handles grace periods. */ -static int __noreturn rcu_gp_kthread(void *arg) +static int __noreturn rcu_gp_kthread(void *unused) { bool first_gp_fqs; int gf; unsigned long j; int ret; - struct rcu_state *rsp = arg; + struct rcu_state *rsp = &rcu_state; struct rcu_node *rnp = rcu_get_root(); rcu_bind_gp_kthread(); @@ -2112,7 +2115,7 @@ static int __noreturn rcu_gp_kthread(void *arg) RCU_GP_FLAG_INIT); rsp->gp_state = RCU_GP_DONE_GPS; /* Locking provides needed memory barrier. */ - if (rcu_gp_init(rsp)) + if (rcu_gp_init()) break; cond_resched_tasks_rcu_qs(); WRITE_ONCE(rsp->gp_activity, jiffies); @@ -2137,7 +2140,7 @@ static int __noreturn rcu_gp_kthread(void *arg) TPS("fqswait")); rsp->gp_state = RCU_GP_WAIT_FQS; ret = swait_event_idle_timeout_exclusive(rsp->gp_wq, - rcu_gp_fqs_check_wake(rsp, &gf), j); + rcu_gp_fqs_check_wake(&gf), j); rsp->gp_state = RCU_GP_DOING_FQS; /* Locking provides needed memory barriers. */ /* If grace period done, leave loop. */ @@ -2150,7 +2153,7 @@ static int __noreturn rcu_gp_kthread(void *arg) trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("fqsstart")); - rcu_gp_fqs(rsp, first_gp_fqs); + rcu_gp_fqs(first_gp_fqs); first_gp_fqs = false; trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), @@ -2178,7 +2181,7 @@ static int __noreturn rcu_gp_kthread(void *arg) /* Handle grace-period end. */ rsp->gp_state = RCU_GP_CLEANUP; - rcu_gp_cleanup(rsp); + rcu_gp_cleanup(); rsp->gp_state = RCU_GP_CLEANED; } } @@ -3744,7 +3747,7 @@ static int __init rcu_spawn_gp_kthread(void) rcu_scheduler_fully_active = 1; for_each_rcu_flavor(rsp) { - t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); + t = kthread_create(rcu_gp_kthread, NULL, "%s", rsp->name); BUG_ON(IS_ERR(t)); rnp = rcu_get_root(); raw_spin_lock_irqsave_rcu_node(rnp, flags); -- cgit v1.2.3 From 8087d3e3c453a7caad389dbd78a32bf19a536928 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_check_quiescent_state() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_check_quiescent_state(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index af4aeaaee046..51d076495548 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2375,7 +2375,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) * quiescent state for this grace period, and record that fact if so. */ static void -rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) +rcu_check_quiescent_state(struct rcu_data *rdp) { /* Check for grace-period ends and beginnings. */ note_gp_changes(rdp); @@ -2753,7 +2753,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) resched_cpu(rdp->cpu); /* Provoke future context switch. */ /* Update RCU state based on any recent quiescent states. */ - rcu_check_quiescent_state(rsp, rdp); + rcu_check_quiescent_state(rdp); /* No grace period and unregistered callbacks? */ if (!rcu_gp_in_progress() && -- cgit v1.2.3 From 780cd590836fe24bc2a81b8cd7c2f9cbe495421e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from CPU hotplug functions There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_cleanup_dying_cpu() and rcu_cleanup_dead_cpu(). And, as long as we are in the neighborhood, inlines them into rcutree_dying_cpu() and rcutree_dead_cpu(), respectively. This also eliminates a pair of for_each_rcu_flavor() loops. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 43 +++++++++++-------------------------------- 1 file changed, 11 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 51d076495548..f06a4bf58b25 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2402,20 +2402,22 @@ rcu_check_quiescent_state(struct rcu_data *rdp) } /* - * Trace the fact that this CPU is going offline. + * Near the end of the offline process. Trace the fact that this CPU + * is going offline. */ -static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) +int rcutree_dying_cpu(unsigned int cpu) { RCU_TRACE(bool blkd;) RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);) RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) - return; + return 0; RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);) - trace_rcu_grace_period(rsp->name, rnp->gp_seq, + trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); + return 0; } /* @@ -2469,16 +2471,19 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) * There can only be one CPU hotplug operation at a time, so no need for * explicit locking. */ -static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) +int rcutree_dead_cpu(unsigned int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) - return; + return 0; /* Adjust any no-longer-needed kthreads. */ rcu_boost_kthread_setaffinity(rnp, -1); + /* Do any needed no-CB deferred wakeups from this CPU. */ + do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); + return 0; } /* @@ -3514,32 +3519,6 @@ int rcutree_offline_cpu(unsigned int cpu) return 0; } -/* - * Near the end of the offline process. We do only tracing here. - */ -int rcutree_dying_cpu(unsigned int cpu) -{ - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - rcu_cleanup_dying_cpu(rsp); - return 0; -} - -/* - * The outgoing CPU is gone and we are running elsewhere. - */ -int rcutree_dead_cpu(unsigned int cpu) -{ - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) { - rcu_cleanup_dead_cpu(cpu, rsp); - do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); - } - return 0; -} - static DEFINE_PER_CPU(int, rcu_cpu_started); /* -- cgit v1.2.3 From 5bb5d09cc4f868497dfec2f8101f580f2c571816 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_do_batch() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_do_batch(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 +++-- kernel/rcu/tree_plugin.h | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f06a4bf58b25..174261a3c193 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2490,12 +2490,13 @@ int rcutree_dead_cpu(unsigned int cpu) * Invoke any RCU callbacks that have made it to the end of their grace * period. Thottle as specified by rdp->blimit. */ -static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) +static void rcu_do_batch(struct rcu_data *rdp) { unsigned long flags; struct rcu_head *rhp; struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); long bl, count; + struct rcu_state *rsp = &rcu_state; /* If no callbacks are ready, just return. */ if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { @@ -2808,7 +2809,7 @@ static void invoke_rcu_callbacks(struct rcu_data *rdp) if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; if (likely(!rsp->boost)) { - rcu_do_batch(rsp, rdp); + rcu_do_batch(rdp); return; } invoke_rcu_callbacks_kthread(); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 82f10a6bf266..c678c76a754e 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1324,7 +1324,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, static void rcu_kthread_do_work(void) { - rcu_do_batch(&rcu_state, this_cpu_ptr(&rcu_data)); + rcu_do_batch(this_cpu_ptr(&rcu_data)); } static void rcu_cpu_kthread_setup(unsigned int cpu) -- cgit v1.2.3 From e9ecb780fe7d881ebd290663d5cfb9dd7b5e58f4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from force-quiescent-state functions There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from force_qs_rnp() and force_quiescent_state(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 174261a3c193..2644ed685024 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -479,8 +479,8 @@ module_param(rcu_kick_kthreads, bool, 0644); static ulong jiffies_till_sched_qs = HZ / 10; module_param(jiffies_till_sched_qs, ulong, 0444); -static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)); -static void force_quiescent_state(struct rcu_state *rsp); +static void force_qs_rnp(int (*f)(struct rcu_data *rsp)); +static void force_quiescent_state(void); static int rcu_pending(void); /* @@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); */ void rcu_force_quiescent_state(void) { - force_quiescent_state(&rcu_state); + force_quiescent_state(); } EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); @@ -547,7 +547,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); */ void rcu_bh_force_quiescent_state(void) { - force_quiescent_state(&rcu_state); + force_quiescent_state(); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); @@ -1384,7 +1384,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) panic_on_rcu_stall(); - force_quiescent_state(rsp); /* Kick them all. */ + force_quiescent_state(); /* Kick them all. */ } static void print_cpu_stall(void) @@ -1992,10 +1992,10 @@ static void rcu_gp_fqs(bool first_time) rsp->n_force_qs++; if (first_time) { /* Collect dyntick-idle snapshots. */ - force_qs_rnp(rsp, dyntick_save_progress_counter); + force_qs_rnp(dyntick_save_progress_counter); } else { /* Handle dyntick-idle and offline CPUs. */ - force_qs_rnp(rsp, rcu_implicit_dynticks_qs); + force_qs_rnp(rcu_implicit_dynticks_qs); } /* Clear flag to prevent immediate re-entry. */ if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { @@ -2600,12 +2600,13 @@ void rcu_check_callbacks(int user) * * The caller must have suppressed start of new grace periods. */ -static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) +static void force_qs_rnp(int (*f)(struct rcu_data *rsp)) { int cpu; unsigned long flags; unsigned long mask; struct rcu_node *rnp; + struct rcu_state *rsp = &rcu_state; rcu_for_each_leaf_node(rsp, rnp) { cond_resched_tasks_rcu_qs(); @@ -2647,12 +2648,13 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp)) * Force quiescent states on reluctant CPUs, and also detect which * CPUs are in dyntick-idle mode. */ -static void force_quiescent_state(struct rcu_state *rsp) +static void force_quiescent_state(void) { unsigned long flags; bool ret; struct rcu_node *rnp; struct rcu_node *rnp_old = NULL; + struct rcu_state *rsp = &rcu_state; /* Funnel through hierarchy to reduce memory contention. */ rnp = __this_cpu_read(rcu_data.mynode); @@ -2859,7 +2861,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, rdp->blimit = LONG_MAX; if (rsp->n_force_qs == rdp->n_force_qs_snap && rcu_segcblist_first_pend_cb(&rdp->cblist) != head) - force_quiescent_state(rsp); + force_quiescent_state(); rdp->n_force_qs_snap = rsp->n_force_qs; rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); } -- cgit v1.2.3 From b96f9dc4fb642b2fa604bc0b64464356ef2b54f5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_check_gp_start_stall() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_check_gp_start_stall(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 2644ed685024..f0a9f809de4c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2686,13 +2686,13 @@ static void force_quiescent_state(void) * RCU to come out of its idle mode. */ static void -rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp, - struct rcu_data *rdp) +rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) { const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ; unsigned long flags; unsigned long j; struct rcu_node *rnp_root = rcu_get_root(); + struct rcu_state *rsp = &rcu_state; static atomic_t warned = ATOMIC_INIT(0); if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || @@ -2772,7 +2772,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) local_irq_restore(flags); } - rcu_check_gp_start_stall(rsp, rnp, rdp); + rcu_check_gp_start_stall(rnp, rdp); /* If there are callbacks ready, invoke them. */ if (rcu_segcblist_ready_cbs(&rdp->cblist)) -- cgit v1.2.3 From b049fdf8e3b986c2695642fa2d2ceeec55245fb1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from __rcu_process_callbacks() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from __rcu_process_callbacks(), and also inlines it into rcu_process_callbacks(), removing the for_each_rcu_flavor() while in the neighborhood. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f0a9f809de4c..6c860045eaf4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2741,17 +2741,19 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) } /* - * This does the RCU core processing work for the specified rcu_state - * and rcu_data structures. This may be called only from the CPU to - * whom the rdp belongs. + * This does the RCU core processing work for the specified rcu_data + * structures. This may be called only from the CPU to whom the rdp + * belongs. */ -static void -__rcu_process_callbacks(struct rcu_state *rsp) +static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) { unsigned long flags; struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; + if (cpu_is_offline(smp_processor_id())) + return; + trace_rcu_utilization(TPS("Start RCU core")); WARN_ON_ONCE(!rdp->beenonline); /* Report any deferred quiescent states if preemption enabled. */ @@ -2780,20 +2782,6 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* Do any needed deferred wakeups of rcuo kthreads. */ do_nocb_deferred_wakeup(rdp); -} - -/* - * Do RCU core processing for the current CPU. - */ -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) -{ - struct rcu_state *rsp; - - if (cpu_is_offline(smp_processor_id())) - return; - trace_rcu_utilization(TPS("Start RCU core")); - for_each_rcu_flavor(rsp) - __rcu_process_callbacks(rsp); trace_rcu_utilization(TPS("End RCU core")); } -- cgit v1.2.3 From 5c7d89676bc51966ea7882703d15795587e7108c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from __call_rcu() and friend There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from __call_rcu_core() and __call_rcu(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 6c860045eaf4..9f5e67e303c0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2814,8 +2814,8 @@ static void invoke_rcu_core(void) /* * Handle any core-RCU processing required by a call_rcu() invocation. */ -static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, - struct rcu_head *head, unsigned long flags) +static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, + unsigned long flags) { /* * If called from an extended quiescent state, invoke the RCU @@ -2847,10 +2847,10 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, } else { /* Give the grace period a kick. */ rdp->blimit = LONG_MAX; - if (rsp->n_force_qs == rdp->n_force_qs_snap && + if (rcu_state.n_force_qs == rdp->n_force_qs_snap && rcu_segcblist_first_pend_cb(&rdp->cblist) != head) force_quiescent_state(); - rdp->n_force_qs_snap = rsp->n_force_qs; + rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); } } @@ -2870,11 +2870,11 @@ static void rcu_leak_callback(struct rcu_head *rhp) * is expected to specify a CPU. */ static void -__call_rcu(struct rcu_head *head, rcu_callback_t func, - struct rcu_state *rsp, int cpu, bool lazy) +__call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) { unsigned long flags; struct rcu_data *rdp; + struct rcu_state __maybe_unused *rsp = &rcu_state; /* Misaligned rcu_head! */ WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); @@ -2932,7 +2932,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, rcu_segcblist_n_cbs(&rdp->cblist)); /* Go handle any RCU core processing required. */ - __call_rcu_core(rsp, rdp, head, flags); + __call_rcu_core(rdp, head, flags); local_irq_restore(flags); } @@ -2973,7 +2973,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, */ void call_rcu(struct rcu_head *head, rcu_callback_t func) { - __call_rcu(head, func, &rcu_state, -1, 0); + __call_rcu(head, func, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu); @@ -3000,7 +3000,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched); void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { - __call_rcu(head, func, &rcu_state, -1, 1); + __call_rcu(head, func, -1, 1); } EXPORT_SYMBOL_GPL(kfree_call_rcu); @@ -3272,7 +3272,7 @@ static void _rcu_barrier(struct rcu_state *rsp) smp_mb__before_atomic(); atomic_inc(&rsp->barrier_cpu_count); __call_rcu(&rdp->barrier_head, - rcu_barrier_callback, rsp, cpu, 0); + rcu_barrier_callback, cpu, 0); } } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu, -- cgit v1.2.3 From 98ece508b545bdaa5575ab46c68f17981516f689 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from __rcu_pending() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from __rcu_pending(), and also inlines it into rcu_pending(), removing the for_each_rcu_flavor() while in the neighborhood.. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9f5e67e303c0..7ce691348b51 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2997,8 +2997,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched); * callbacks in the list of pending callbacks. Until then, this * function may only be called from __kfree_rcu(). */ -void kfree_call_rcu(struct rcu_head *head, - rcu_callback_t func) +void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { __call_rcu(head, func, -1, 1); } @@ -3080,21 +3079,23 @@ void cond_synchronize_sched(unsigned long oldstate) EXPORT_SYMBOL_GPL(cond_synchronize_sched); /* - * Check to see if there is any immediate RCU-related work to be done - * by the current CPU, for the specified type of RCU, returning 1 if so. - * The checks are in order of increasing expense: checks that can be - * carried out against CPU-local state are performed first. However, - * we must check for CPU stalls first, else we might not get a chance. + * Check to see if there is any immediate RCU-related work to be done by + * the current CPU, for the specified type of RCU, returning 1 if so and + * zero otherwise. The checks are in order of increasing expense: checks + * that can be carried out against CPU-local state are performed first. + * However, we must check for CPU stalls first, else we might not get + * a chance. */ -static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) +static int rcu_pending(void) { + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; /* Check for CPU stalls, if enabled. */ check_cpu_stall(rdp); /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ - if (rcu_nohz_full_cpu(rsp)) + if (rcu_nohz_full_cpu(&rcu_state)) return 0; /* Is the RCU core waiting for a quiescent state from this CPU? */ @@ -3124,21 +3125,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) return 0; } -/* - * Check to see if there is any immediate RCU-related work to be done - * by the current CPU, returning 1 if so. This function is part of the - * RCU implementation; it is -not- an exported member of the RCU API. - */ -static int rcu_pending(void) -{ - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - if (__rcu_pending(rsp, this_cpu_ptr(&rcu_data))) - return 1; - return 0; -} - /* * Return true if the specified CPU has any callback. If all_lazy is * non-NULL, store an indication of whether all callbacks are lazy. -- cgit v1.2.3 From 8344b871b1d575ba630ca57448ea4cbc84daba0f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from _rcu_barrier() and friends There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from _rcu_barrier_trace() and _rcu_barrier(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7ce691348b51..d3428d4a68dc 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3156,11 +3156,10 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy) * Helper function for _rcu_barrier() tracing. If tracing is disabled, * the compiler is expected to optimize this away. */ -static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s, - int cpu, unsigned long done) +static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done) { - trace_rcu_barrier(rsp->name, s, cpu, - atomic_read(&rsp->barrier_cpu_count), done); + trace_rcu_barrier(rcu_state.name, s, cpu, + atomic_read(&rcu_state.barrier_cpu_count), done); } /* @@ -3173,11 +3172,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp) struct rcu_state *rsp = rdp->rsp; if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { - _rcu_barrier_trace(rsp, TPS("LastCB"), -1, - rsp->barrier_sequence); + _rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence); complete(&rsp->barrier_completion); } else { - _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence); + _rcu_barrier_trace(TPS("CB"), -1, rsp->barrier_sequence); } } @@ -3189,15 +3187,14 @@ static void rcu_barrier_func(void *type) struct rcu_state *rsp = type; struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); - _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence); + _rcu_barrier_trace(TPS("IRQ"), -1, rsp->barrier_sequence); rdp->barrier_head.func = rcu_barrier_callback; debug_rcu_head_queue(&rdp->barrier_head); if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { atomic_inc(&rsp->barrier_cpu_count); } else { debug_rcu_head_unqueue(&rdp->barrier_head); - _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1, - rsp->barrier_sequence); + _rcu_barrier_trace(TPS("IRQNQ"), -1, rsp->barrier_sequence); } } @@ -3205,21 +3202,21 @@ static void rcu_barrier_func(void *type) * Orchestrate the specified type of RCU barrier, waiting for all * RCU callbacks of the specified type to complete. */ -static void _rcu_barrier(struct rcu_state *rsp) +static void _rcu_barrier(void) { int cpu; struct rcu_data *rdp; + struct rcu_state *rsp = &rcu_state; unsigned long s = rcu_seq_snap(&rsp->barrier_sequence); - _rcu_barrier_trace(rsp, TPS("Begin"), -1, s); + _rcu_barrier_trace(TPS("Begin"), -1, s); /* Take mutex to serialize concurrent rcu_barrier() requests. */ mutex_lock(&rsp->barrier_mutex); /* Did someone else do our work for us? */ if (rcu_seq_done(&rsp->barrier_sequence, s)) { - _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1, - rsp->barrier_sequence); + _rcu_barrier_trace(TPS("EarlyExit"), -1, rsp->barrier_sequence); smp_mb(); /* caller's subsequent code after above check. */ mutex_unlock(&rsp->barrier_mutex); return; @@ -3227,7 +3224,7 @@ static void _rcu_barrier(struct rcu_state *rsp) /* Mark the start of the barrier operation. */ rcu_seq_start(&rsp->barrier_sequence); - _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence); + _rcu_barrier_trace(TPS("Inc1"), -1, rsp->barrier_sequence); /* * Initialize the count to one rather than to zero in order to @@ -3250,10 +3247,10 @@ static void _rcu_barrier(struct rcu_state *rsp) rdp = per_cpu_ptr(&rcu_data, cpu); if (rcu_is_nocb_cpu(cpu)) { if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { - _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu, + _rcu_barrier_trace(TPS("OfflineNoCB"), cpu, rsp->barrier_sequence); } else { - _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu, + _rcu_barrier_trace(TPS("OnlineNoCB"), cpu, rsp->barrier_sequence); smp_mb__before_atomic(); atomic_inc(&rsp->barrier_cpu_count); @@ -3261,11 +3258,11 @@ static void _rcu_barrier(struct rcu_state *rsp) rcu_barrier_callback, cpu, 0); } } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { - _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu, + _rcu_barrier_trace(TPS("OnlineQ"), cpu, rsp->barrier_sequence); smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); } else { - _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu, + _rcu_barrier_trace(TPS("OnlineNQ"), cpu, rsp->barrier_sequence); } } @@ -3282,7 +3279,7 @@ static void _rcu_barrier(struct rcu_state *rsp) wait_for_completion(&rsp->barrier_completion); /* Mark the end of the barrier operation. */ - _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence); + _rcu_barrier_trace(TPS("Inc2"), -1, rsp->barrier_sequence); rcu_seq_end(&rsp->barrier_sequence); /* Other rcu_barrier() invocations can now safely proceed. */ @@ -3294,7 +3291,7 @@ static void _rcu_barrier(struct rcu_state *rsp) */ void rcu_barrier_bh(void) { - _rcu_barrier(&rcu_state); + _rcu_barrier(); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -3308,7 +3305,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh); */ void rcu_barrier(void) { - _rcu_barrier(&rcu_state); + _rcu_barrier(); } EXPORT_SYMBOL_GPL(rcu_barrier); -- cgit v1.2.3 From 53b46303da84d611cd281f74a6538d47709b06b5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_boot_init_percpu_data() and friends There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_boot_init_percpu_data(), rcu_init_percpu_data(), rcu_cleanup_dying_idle_cpu(), and rcu_migrate_callbacks(). While in the neighborhood, line the last three into rcutree_prepare_cpu(), rcu_report_dead() and rcutree_migrate_callbacks(), respectively. This also gets rid of the for_each_rcu_flavor() calls that were in those tree functions. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 102 +++++++++++++++++++----------------------------------- 1 file changed, 35 insertions(+), 67 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d3428d4a68dc..2a49a04a1d98 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3352,7 +3352,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) * Do boot-time initialization of a CPU's per-CPU RCU data. */ static void __init -rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) +rcu_boot_init_percpu_data(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); @@ -3361,23 +3361,25 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) rdp->dynticks = &per_cpu(rcu_dynticks, cpu); WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); - rdp->rcu_ofl_gp_seq = rsp->gp_seq; + rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; - rdp->rcu_onl_gp_seq = rsp->gp_seq; + rdp->rcu_onl_gp_seq = rcu_state.gp_seq; rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; rdp->cpu = cpu; - rdp->rsp = rsp; + rdp->rsp = &rcu_state; rcu_boot_init_nocb_percpu_data(rdp); } /* - * Initialize a CPU's per-CPU RCU data. Note that only one online or + * Invoked early in the CPU-online process, when pretty much all services + * are available. The incoming CPU is not present. + * + * Initializes a CPU's per-CPU RCU data. Note that only one online or * offline event can be happening at a given time. Note also that we can * accept some slop in the rsp->gp_seq access due to the fact that this * CPU cannot possibly have any RCU callbacks in flight yet. */ -static void -rcu_init_percpu_data(int cpu, struct rcu_state *rsp) +int rcutree_prepare_cpu(unsigned int cpu) { unsigned long flags; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); @@ -3386,7 +3388,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) /* Set up local state, ensuring consistent view of global state. */ raw_spin_lock_irqsave_rcu_node(rnp, flags); rdp->qlen_last_fqs_check = 0; - rdp->n_force_qs_snap = rsp->n_force_qs; + rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp->blimit = blimit; if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ !init_nocb_callback_list(rdp)) @@ -3410,21 +3412,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) rdp->core_needs_qs = false; rdp->rcu_iw_pending = false; rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; - trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl")); + trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); -} - -/* - * Invoked early in the CPU-online process, when pretty much all - * services are available. The incoming CPU is not present. - */ -int rcutree_prepare_cpu(unsigned int cpu) -{ - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - rcu_init_percpu_data(cpu, rsp); - rcu_prepare_kthreads(cpu); rcu_spawn_all_nocb_kthreads(cpu); @@ -3548,23 +3537,32 @@ void rcu_cpu_starting(unsigned int cpu) #ifdef CONFIG_HOTPLUG_CPU /* - * The CPU is exiting the idle loop into the arch_cpu_idle_dead() - * function. We now remove it from the rcu_node tree's ->qsmaskinitnext - * bit masks. + * The outgoing function has no further need of RCU, so remove it from + * the rcu_node tree's ->qsmaskinitnext bit masks. + * + * Note that this function is special in that it is invoked directly + * from the outgoing CPU rather than from the cpuhp_step mechanism. + * This is because this function must be invoked at a precise location. */ -static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) +void rcu_report_dead(unsigned int cpu) { unsigned long flags; unsigned long mask; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ + /* QS for any half-done expedited RCU-sched GP. */ + preempt_disable(); + rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); + preempt_enable(); + rcu_preempt_deferred_qs(current); + /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ mask = rdp->grpmask; - spin_lock(&rsp->ofl_lock); + spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ - rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq); - rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags); + rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); + rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ /* Report quiescent state -before- changing ->qsmaskinitnext! */ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); @@ -3572,34 +3570,17 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) } rnp->qsmaskinitnext &= ~mask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - spin_unlock(&rsp->ofl_lock); -} - -/* - * The outgoing function has no further need of RCU, so remove it from - * the list of CPUs that RCU must track. - * - * Note that this function is special in that it is invoked directly - * from the outgoing CPU rather than from the cpuhp_step mechanism. - * This is because this function must be invoked at a precise location. - */ -void rcu_report_dead(unsigned int cpu) -{ - struct rcu_state *rsp; - - /* QS for any half-done expedited RCU-sched GP. */ - preempt_disable(); - rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); - preempt_enable(); - rcu_preempt_deferred_qs(current); - for_each_rcu_flavor(rsp) - rcu_cleanup_dying_idle_cpu(cpu, rsp); + spin_unlock(&rcu_state.ofl_lock); per_cpu(rcu_cpu_started, cpu) = 0; } -/* Migrate the dead CPU's callbacks to the current CPU. */ -static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) +/* + * The outgoing CPU has just passed through the dying-idle state, and we + * are being invoked from the CPU that was IPIed to continue the offline + * operation. Migrate the outgoing CPU's callbacks to the current CPU. + */ +void rcutree_migrate_callbacks(int cpu) { unsigned long flags; struct rcu_data *my_rdp; @@ -3632,19 +3613,6 @@ static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp) cpu, rcu_segcblist_n_cbs(&rdp->cblist), rcu_segcblist_first_cb(&rdp->cblist)); } - -/* - * The outgoing CPU has just passed through the dying-idle state, - * and we are being invoked from the CPU that was IPIed to continue the - * offline operation. We need to migrate the outgoing CPU's callbacks. - */ -void rcutree_migrate_callbacks(int cpu) -{ - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - rcu_migrate_callbacks(cpu, rsp); -} #endif /* @@ -3814,7 +3782,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) while (i > rnp->grphi) rnp++; per_cpu_ptr(&rcu_data, i)->mynode = rnp; - rcu_boot_init_percpu_data(i, rsp); + rcu_boot_init_percpu_data(i); } list_add(&rsp->flavors, &rcu_struct_flavors); } -- cgit v1.2.3 From b8bb1f63cf9ac43fc3015449843fe1f81c1b31a6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_init_one() and friends There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_init_one() and rcu_dump_rcu_node_tree(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 2a49a04a1d98..0b274530e8a8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3708,7 +3708,7 @@ void rcu_scheduler_starting(void) /* * Helper function for rcu_init() that initializes one rcu_state structure. */ -static void __init rcu_init_one(struct rcu_state *rsp) +static void __init rcu_init_one(void) { static const char * const buf[] = RCU_NODE_NAME_INIT; static const char * const fqs[] = RCU_FQS_NAME_INIT; @@ -3720,6 +3720,7 @@ static void __init rcu_init_one(struct rcu_state *rsp) int i; int j; struct rcu_node *rnp; + struct rcu_state *rsp = &rcu_state; BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ @@ -3870,14 +3871,14 @@ static void __init rcu_init_geometry(void) * Dump out the structure of the rcu_node combining tree associated * with the rcu_state structure referenced by rsp. */ -static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp) +static void __init rcu_dump_rcu_node_tree(void) { int level = 0; struct rcu_node *rnp; pr_info("rcu_node tree layout dump\n"); pr_info(" "); - rcu_for_each_node_breadth_first(rsp, rnp) { + rcu_for_each_node_breadth_first(&rcu_state, rnp) { if (rnp->level != level) { pr_cont("\n"); pr_info(" "); @@ -3899,9 +3900,9 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); - rcu_init_one(&rcu_state); + rcu_init_one(); if (dump_tree) - rcu_dump_rcu_node_tree(&rcu_state); + rcu_dump_rcu_node_tree(); open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* -- cgit v1.2.3 From a2887cd85f38cf2fdbf42bad97e5c412d99ff5ca Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_print_detail_task_stall() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_print_detail_task_stall(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 2 +- kernel/rcu/tree_plugin.h | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0b274530e8a8..130ce5eebdfa 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1360,7 +1360,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) rcu_dump_cpu_stacks(); /* Complain about tasks blocking the grace period. */ - rcu_print_detail_task_stall(rsp); + rcu_print_detail_task_stall(); } else { if (rcu_seq_current(&rsp->gp_seq) != gp_seq) { pr_err("INFO: Stall ended before state dump start\n"); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index d60304f1ef56..00d268cb4d04 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -452,7 +452,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static bool rcu_preempt_has_tasks(struct rcu_node *rnp); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ -static void rcu_print_detail_task_stall(struct rcu_state *rsp); +static void rcu_print_detail_task_stall(void); static int rcu_print_task_stall(struct rcu_node *rnp); static int rcu_print_task_exp_stall(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c678c76a754e..1d8148b0d4e5 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -683,12 +683,12 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) * Dump detailed information for all tasks blocking the current RCU * grace period. */ -static void rcu_print_detail_task_stall(struct rcu_state *rsp) +static void rcu_print_detail_task_stall(void) { struct rcu_node *rnp = rcu_get_root(); rcu_print_detail_task_stall_rnp(rnp); - rcu_for_each_leaf_node(rsp, rnp) + rcu_for_each_leaf_node(&rcu_state, rnp) rcu_print_detail_task_stall_rnp(rnp); } @@ -1005,7 +1005,7 @@ static void rcu_preempt_deferred_qs(struct task_struct *t) { } * Because preemptible RCU does not exist, we never have to check for * tasks blocked within RCU read-side critical sections. */ -static void rcu_print_detail_task_stall(struct rcu_state *rsp) +static void rcu_print_detail_task_stall(void) { } -- cgit v1.2.3 From 81ab59a3ad8656620d7106e855085bc12dc13a4c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from dump_blkd_tasks() and friend There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from dump_blkd_tasks() and rcu_preempt_blocked_readers_cgp(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++-- kernel/rcu/tree.h | 6 ++---- kernel/rcu/tree_plugin.h | 12 +++++------- 3 files changed, 9 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 130ce5eebdfa..0d69f198390b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1937,7 +1937,7 @@ static bool rcu_gp_init(void) rcu_gp_slow(gp_init_delay); raw_spin_lock_irqsave_rcu_node(rnp, flags); rdp = this_cpu_ptr(&rcu_data); - rcu_preempt_check_blocked_tasks(rsp, rnp); + rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); if (rnp == rdp->mynode) @@ -2049,7 +2049,7 @@ static void rcu_gp_cleanup(void) rcu_for_each_node_breadth_first(rsp, rnp) { raw_spin_lock_irq_rcu_node(rnp); if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) - dump_blkd_tasks(rsp, rnp, 10); + dump_blkd_tasks(rnp, 10); WARN_ON_ONCE(rnp->qsmask); WRITE_ONCE(rnp->gp_seq, new_gp_seq); rdp = this_cpu_ptr(&rcu_data); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 00d268cb4d04..ccdee6bd3919 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -455,12 +455,10 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static void rcu_print_detail_task_stall(void); static int rcu_print_task_stall(struct rcu_node *rnp); static int rcu_print_task_exp_stall(struct rcu_node *rnp); -static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, - struct rcu_node *rnp); +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); static void rcu_flavor_check_callbacks(int user); void call_rcu(struct rcu_head *head, rcu_callback_t func); -static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, - int ncheck); +static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static void invoke_rcu_callbacks_kthread(void); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 1d8148b0d4e5..9a3d30121815 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -756,14 +756,13 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) * Also, if there are blocked tasks on the list, they automatically * block the newly created grace period, so set up ->gp_tasks accordingly. */ -static void -rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) { struct task_struct *t; RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) - dump_blkd_tasks(rsp, rnp, 10); + dump_blkd_tasks(rnp, 10); if (rcu_preempt_has_tasks(rnp) && (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { rnp->gp_tasks = rnp->blkd_tasks.next; @@ -884,7 +883,7 @@ void exit_rcu(void) * specified number of elements. */ static void -dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) +dump_blkd_tasks(struct rcu_node *rnp, int ncheck) { int cpu; int i; @@ -1033,8 +1032,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) * so there is no need to check for blocked tasks. So check only for * bogus qsmask values. */ -static void -rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp) +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) { WARN_ON_ONCE(rnp->qsmask); } @@ -1095,7 +1093,7 @@ void exit_rcu(void) * Dump the guaranteed-empty blocked-tasks state. Trust but verify. */ static void -dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) +dump_blkd_tasks(struct rcu_node *rnp, int ncheck) { WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); } -- cgit v1.2.3 From 6dbfdc1409cf07accf7c97475c3b58d46daa319b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from rcu_spawn_one_boost_kthread() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_spawn_one_boost_kthread(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 4 ---- kernel/rcu/tree_plugin.h | 13 ++++++------- 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index ccdee6bd3919..dc1c337f6da9 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -463,10 +463,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static void invoke_rcu_callbacks_kthread(void); static bool rcu_is_callbacks_kthread(void); -#ifdef CONFIG_RCU_BOOST -static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - struct rcu_node *rnp); -#endif /* #ifdef CONFIG_RCU_BOOST */ static void __init rcu_spawn_boost_kthreads(void); static void rcu_prepare_kthreads(int cpu); static void rcu_cleanup_after_idle(void); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 9a3d30121815..9a6dea5fab86 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1290,21 +1290,20 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) * already exist. We only create this kthread for preemptible RCU. * Returns zero if all is well, a negated errno otherwise. */ -static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - struct rcu_node *rnp) +static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp) { - int rnp_index = rnp - &rsp->node[0]; + int rnp_index = rnp - rcu_get_root(); unsigned long flags; struct sched_param sp; struct task_struct *t; - if (&rcu_state != rsp) + if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) return 0; if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) return 0; - rsp->boost = 1; + rcu_state.boost = 1; if (rnp->boost_kthread_task != NULL) return 0; t = kthread_create(rcu_boost_kthread, (void *)rnp, @@ -1430,7 +1429,7 @@ static void __init rcu_spawn_boost_kthreads(void) per_cpu(rcu_cpu_has_work, cpu) = 0; BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); rcu_for_each_leaf_node(&rcu_state, rnp) - (void)rcu_spawn_one_boost_kthread(&rcu_state, rnp); + (void)rcu_spawn_one_boost_kthread(rnp); } static void rcu_prepare_kthreads(int cpu) @@ -1440,7 +1439,7 @@ static void rcu_prepare_kthreads(int cpu) /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ if (rcu_scheduler_fully_active) - (void)rcu_spawn_one_boost_kthread(&rcu_state, rnp); + (void)rcu_spawn_one_boost_kthread(rnp); } #else /* #ifdef CONFIG_RCU_BOOST */ -- cgit v1.2.3 From b21ebed951010acccbe9a55337d16cf4da4cce0a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from print_cpu_stall_info() There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from print_cpu_stall_info(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++-- kernel/rcu/tree.h | 2 +- kernel/rcu/tree_plugin.h | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0d69f198390b..1042863dab52 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1342,7 +1342,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) if (rnp->qsmask != 0) { for_each_leaf_node_possible_cpu(rnp, cpu) if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { - print_cpu_stall_info(rsp, cpu); + print_cpu_stall_info(cpu); ndetected++; } } @@ -1409,7 +1409,7 @@ static void print_cpu_stall(void) pr_err("INFO: %s self-detected stall on CPU", rsp->name); print_cpu_stall_info_begin(); raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); - print_cpu_stall_info(rsp, smp_processor_id()); + print_cpu_stall_info(smp_processor_id()); raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); print_cpu_stall_info_end(); for_each_possible_cpu(cpu) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index dc1c337f6da9..2bf57de9f78a 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -472,7 +472,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static bool rcu_preempt_need_deferred_qs(struct task_struct *t); static void rcu_preempt_deferred_qs(struct task_struct *t); static void print_cpu_stall_info_begin(void); -static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); +static void print_cpu_stall_info(int cpu); static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static void increment_cpu_stall_ticks(void); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 9a6dea5fab86..08ff162e02b3 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1771,7 +1771,7 @@ static void print_cpu_stall_info_begin(void) * * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. */ -static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) +static void print_cpu_stall_info(int cpu) { unsigned long delta; char fast_no_hz[72]; @@ -1786,7 +1786,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) */ touch_nmi_watchdog(); - ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq); + ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); if (ticks_value) { ticks_title = "GPs behind"; } else { @@ -1807,7 +1807,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) rcu_dynticks_snap(rdtp) & 0xfff, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), - READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, + READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, fast_no_hz); } -- cgit v1.2.3 From 4580b0541beac895a9ba9a4b6f60aec94355bfdd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from no-CBs CPU functions There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from rcu_nocb_cpu_needs_barrier(), rcu_spawn_one_nocb_kthread(), rcu_organize_nocb_kthreads(), rcu_nocb_cpu_needs_barrier(), and rcu_nohz_full_cpu(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++-- kernel/rcu/tree.h | 6 +++--- kernel/rcu/tree_plugin.h | 18 +++++++++--------- 3 files changed, 14 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1042863dab52..1fbe6c60adc6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3095,7 +3095,7 @@ static int rcu_pending(void) check_cpu_stall(rdp); /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ - if (rcu_nohz_full_cpu(&rcu_state)) + if (rcu_nohz_full_cpu()) return 0; /* Is the RCU core waiting for a quiescent state from this CPU? */ @@ -3246,7 +3246,7 @@ static void _rcu_barrier(void) continue; rdp = per_cpu_ptr(&rcu_data, cpu); if (rcu_is_nocb_cpu(cpu)) { - if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) { + if (!rcu_nocb_cpu_needs_barrier(cpu)) { _rcu_barrier_trace(TPS("OfflineNoCB"), cpu, rsp->barrier_sequence); } else { diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 2bf57de9f78a..7c6033d71e9d 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -476,7 +476,7 @@ static void print_cpu_stall_info(int cpu); static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static void increment_cpu_stall_ticks(void); -static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); +static bool rcu_nocb_cpu_needs_barrier(int cpu); static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); static void rcu_init_one_nocb(struct rcu_node *rnp); @@ -491,11 +491,11 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); static void rcu_spawn_all_nocb_kthreads(int cpu); static void __init rcu_spawn_nocb_kthreads(void); #ifdef CONFIG_RCU_NOCB_CPU -static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); +static void __init rcu_organize_nocb_kthreads(void); #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ static bool init_nocb_callback_list(struct rcu_data *rdp); static void rcu_bind_gp_kthread(void); -static bool rcu_nohz_full_cpu(struct rcu_state *rsp); +static bool rcu_nohz_full_cpu(void); static void rcu_dynticks_task_enter(void); static void rcu_dynticks_task_exit(void); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 08ff162e02b3..69705ec13527 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1960,7 +1960,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, * Does the specified CPU need an RCU callback for the specified flavor * of rcu_barrier()? */ -static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) +static bool rcu_nocb_cpu_needs_barrier(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); unsigned long ret; @@ -2424,7 +2424,7 @@ void __init rcu_init_nohz(void) for_each_rcu_flavor(rsp) { for_each_cpu(cpu, rcu_nocb_mask) init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu)); - rcu_organize_nocb_kthreads(rsp); + rcu_organize_nocb_kthreads(); } } @@ -2444,7 +2444,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) * brought online out of order, this can require re-organizing the * leader-follower relationships. */ -static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) +static void rcu_spawn_one_nocb_kthread(int cpu) { struct rcu_data *rdp; struct rcu_data *rdp_last; @@ -2481,7 +2481,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) /* Spawn the kthread for this CPU and RCU flavor. */ t = kthread_run(rcu_nocb_kthread, rdp_spawn, - "rcuo%c/%d", rsp->abbr, cpu); + "rcuo%c/%d", rcu_state.abbr, cpu); BUG_ON(IS_ERR(t)); WRITE_ONCE(rdp_spawn->nocb_kthread, t); } @@ -2496,7 +2496,7 @@ static void rcu_spawn_all_nocb_kthreads(int cpu) if (rcu_scheduler_fully_active) for_each_rcu_flavor(rsp) - rcu_spawn_one_nocb_kthread(rsp, cpu); + rcu_spawn_one_nocb_kthread(cpu); } /* @@ -2520,7 +2520,7 @@ module_param(rcu_nocb_leader_stride, int, 0444); /* * Initialize leader-follower relationships for all no-CBs CPU. */ -static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp) +static void __init rcu_organize_nocb_kthreads(void) { int cpu; int ls = rcu_nocb_leader_stride; @@ -2579,7 +2579,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ -static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) +static bool rcu_nocb_cpu_needs_barrier(int cpu) { WARN_ON_ONCE(1); /* Should be dead code. */ return false; @@ -2648,12 +2648,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) * This code relies on the fact that all NO_HZ_FULL CPUs are also * CONFIG_RCU_NOCB_CPU CPUs. */ -static bool rcu_nohz_full_cpu(struct rcu_state *rsp) +static bool rcu_nohz_full_cpu(void) { #ifdef CONFIG_NO_HZ_FULL if (tick_nohz_full_cpu(smp_processor_id()) && (!rcu_gp_in_progress() || - ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ))) + ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) return true; #endif /* #ifdef CONFIG_NO_HZ_FULL */ return false; -- cgit v1.2.3 From 63d4c8c97948b0be8cb7ef3b7b943c25864eae4b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Jul 2018 17:22:34 -0700 Subject: rcu: Remove rsp parameter from expedited grace-period functions There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's functions. This commit therefore removes the rsp parameter from the code in kernel/rcu/tree_exp.h, and removes all of the rsp local variables while in the area. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 +- kernel/rcu/tree.h | 1 - kernel/rcu/tree_exp.h | 185 ++++++++++++++++++++++------------------------- kernel/rcu/tree_plugin.h | 13 ++-- 4 files changed, 94 insertions(+), 109 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1fbe6c60adc6..e33bf2aeac50 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -139,7 +139,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_data *rdp); -static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp); +static void rcu_report_exp_rdp(struct rcu_data *rdp); static void sync_sched_exp_online_cleanup(int cpu); /* rcuc/rcub kthread realtime priority */ @@ -3553,7 +3553,7 @@ void rcu_report_dead(unsigned int cpu) /* QS for any half-done expedited RCU-sched GP. */ preempt_disable(); - rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); + rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); preempt_enable(); rcu_preempt_deferred_qs(current); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 7c6033d71e9d..b21d79bdab23 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -61,7 +61,6 @@ struct rcu_dynticks { /* Communicate arguments to a workqueue handler. */ struct rcu_exp_work { smp_call_func_t rew_func; - struct rcu_state *rew_rsp; unsigned long rew_s; struct work_struct rew_work; }; diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 0bcbb03c9702..b6f7bc34ac49 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -25,39 +25,39 @@ /* * Record the start of an expedited grace period. */ -static void rcu_exp_gp_seq_start(struct rcu_state *rsp) +static void rcu_exp_gp_seq_start(void) { - rcu_seq_start(&rsp->expedited_sequence); + rcu_seq_start(&rcu_state.expedited_sequence); } /* * Return then value that expedited-grace-period counter will have * at the end of the current grace period. */ -static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp) +static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) { - return rcu_seq_endval(&rsp->expedited_sequence); + return rcu_seq_endval(&rcu_state.expedited_sequence); } /* * Record the end of an expedited grace period. */ -static void rcu_exp_gp_seq_end(struct rcu_state *rsp) +static void rcu_exp_gp_seq_end(void) { - rcu_seq_end(&rsp->expedited_sequence); + rcu_seq_end(&rcu_state.expedited_sequence); smp_mb(); /* Ensure that consecutive grace periods serialize. */ } /* * Take a snapshot of the expedited-grace-period counter. */ -static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp) +static unsigned long rcu_exp_gp_seq_snap(void) { unsigned long s; smp_mb(); /* Caller's modifications seen first by other CPUs. */ - s = rcu_seq_snap(&rsp->expedited_sequence); - trace_rcu_exp_grace_period(rsp->name, s, TPS("snap")); + s = rcu_seq_snap(&rcu_state.expedited_sequence); + trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); return s; } @@ -66,9 +66,9 @@ static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp) * if a full expedited grace period has elapsed since that snapshot * was taken. */ -static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) +static bool rcu_exp_gp_seq_done(unsigned long s) { - return rcu_seq_done(&rsp->expedited_sequence, s); + return rcu_seq_done(&rcu_state.expedited_sequence, s); } /* @@ -78,26 +78,26 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) * ever been online. This means that this function normally takes its * no-work-to-do fastpath. */ -static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) +static void sync_exp_reset_tree_hotplug(void) { bool done; unsigned long flags; unsigned long mask; unsigned long oldmask; - int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */ + int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ struct rcu_node *rnp; struct rcu_node *rnp_up; /* If no new CPUs onlined since last time, nothing to do. */ - if (likely(ncpus == rsp->ncpus_snap)) + if (likely(ncpus == rcu_state.ncpus_snap)) return; - rsp->ncpus_snap = ncpus; + rcu_state.ncpus_snap = ncpus; /* * Each pass through the following loop propagates newly onlined * CPUs for the current rcu_node structure up the rcu_node tree. */ - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(&rcu_state, rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->expmaskinit == rnp->expmaskinitnext) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -135,13 +135,13 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp) * Reset the ->expmask values in the rcu_node tree in preparation for * a new expedited grace period. */ -static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp) +static void __maybe_unused sync_exp_reset_tree(void) { unsigned long flags; struct rcu_node *rnp; - sync_exp_reset_tree_hotplug(rsp); - rcu_for_each_node_breadth_first(rsp, rnp) { + sync_exp_reset_tree_hotplug(); + rcu_for_each_node_breadth_first(&rcu_state, rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); WARN_ON_ONCE(rnp->expmask); rnp->expmask = rnp->expmaskinit; @@ -194,7 +194,7 @@ static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp) * * Caller must hold the specified rcu_node structure's ->lock. */ -static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, +static void __rcu_report_exp_rnp(struct rcu_node *rnp, bool wake, unsigned long flags) __releases(rnp->lock) { @@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, raw_spin_unlock_irqrestore_rcu_node(rnp, flags); if (wake) { smp_mb(); /* EGP done before wake_up(). */ - swake_up_one(&rsp->expedited_wq); + swake_up_one(&rcu_state.expedited_wq); } break; } @@ -229,20 +229,19 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, * Report expedited quiescent state for specified node. This is a * lock-acquisition wrapper function for __rcu_report_exp_rnp(). */ -static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp, - struct rcu_node *rnp, bool wake) +static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) { unsigned long flags; raw_spin_lock_irqsave_rcu_node(rnp, flags); - __rcu_report_exp_rnp(rsp, rnp, wake, flags); + __rcu_report_exp_rnp(rnp, wake, flags); } /* * Report expedited quiescent state for multiple CPUs, all covered by the * specified leaf rcu_node structure. */ -static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, +static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, unsigned long mask, bool wake) { unsigned long flags; @@ -253,23 +252,23 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp, return; } rnp->expmask &= ~mask; - __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */ + __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ } /* * Report expedited quiescent state for specified rcu_data (CPU). */ -static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp) +static void rcu_report_exp_rdp(struct rcu_data *rdp) { WRITE_ONCE(rdp->deferred_qs, false); - rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true); + rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); } /* Common code for work-done checking. */ -static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s) +static bool sync_exp_work_done(unsigned long s) { - if (rcu_exp_gp_seq_done(rsp, s)) { - trace_rcu_exp_grace_period(rsp->name, s, TPS("done")); + if (rcu_exp_gp_seq_done(s)) { + trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); /* Ensure test happens before caller kfree(). */ smp_mb__before_atomic(); /* ^^^ */ return true; @@ -284,7 +283,7 @@ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s) * with the mutex held, indicating that the caller must actually do the * expedited grace period. */ -static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) +static bool exp_funnel_lock(unsigned long s) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); struct rcu_node *rnp = rdp->mynode; @@ -294,18 +293,18 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && (rnp == rnp_root || ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && - mutex_trylock(&rsp->exp_mutex)) + mutex_trylock(&rcu_state.exp_mutex)) goto fastpath; /* * Each pass through the following loop works its way up * the rcu_node tree, returning if others have done the work or - * otherwise falls through to acquire rsp->exp_mutex. The mapping + * otherwise falls through to acquire ->exp_mutex. The mapping * from CPU to rcu_node structure can be inexact, as it is just * promoting locality and is not strictly needed for correctness. */ for (; rnp != NULL; rnp = rnp->parent) { - if (sync_exp_work_done(rsp, s)) + if (sync_exp_work_done(s)) return true; /* Work not done, either wait here or go up. */ @@ -314,26 +313,26 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) /* Someone else doing GP, so wait for them. */ spin_unlock(&rnp->exp_lock); - trace_rcu_exp_funnel_lock(rsp->name, rnp->level, + trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, rnp->grplo, rnp->grphi, TPS("wait")); wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], - sync_exp_work_done(rsp, s)); + sync_exp_work_done(s)); return true; } rnp->exp_seq_rq = s; /* Followers can wait on us. */ spin_unlock(&rnp->exp_lock); - trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo, - rnp->grphi, TPS("nxtlvl")); + trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, + rnp->grplo, rnp->grphi, TPS("nxtlvl")); } - mutex_lock(&rsp->exp_mutex); + mutex_lock(&rcu_state.exp_mutex); fastpath: - if (sync_exp_work_done(rsp, s)) { - mutex_unlock(&rsp->exp_mutex); + if (sync_exp_work_done(s)) { + mutex_unlock(&rcu_state.exp_mutex); return true; } - rcu_exp_gp_seq_start(rsp); - trace_rcu_exp_grace_period(rsp->name, s, TPS("start")); + rcu_exp_gp_seq_start(); + trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); return false; } @@ -352,7 +351,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) struct rcu_exp_work *rewp = container_of(wp, struct rcu_exp_work, rew_work); struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); - struct rcu_state *rsp = rewp->rew_rsp; func = rewp->rew_func; raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -400,7 +398,7 @@ retry_ipi: mask_ofl_test |= mask; continue; } - ret = smp_call_function_single(cpu, func, rsp, 0); + ret = smp_call_function_single(cpu, func, NULL, 0); if (!ret) { mask_ofl_ipi &= ~mask; continue; @@ -411,7 +409,7 @@ retry_ipi: (rnp->expmask & mask)) { /* Online, so delay for a bit and try again. */ raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl")); + trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); schedule_timeout_uninterruptible(1); goto retry_ipi; } @@ -423,33 +421,31 @@ retry_ipi: /* Report quiescent states for those that went offline. */ mask_ofl_test |= mask_ofl_ipi; if (mask_ofl_test) - rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false); + rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); } /* * Select the nodes that the upcoming expedited grace period needs * to wait for. */ -static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, - smp_call_func_t func) +static void sync_rcu_exp_select_cpus(smp_call_func_t func) { int cpu; struct rcu_node *rnp; - trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); - sync_exp_reset_tree(rsp); - trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select")); + trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); + sync_exp_reset_tree(); + trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); /* Schedule work for each leaf rcu_node structure. */ - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(&rcu_state, rnp) { rnp->exp_need_flush = false; if (!READ_ONCE(rnp->expmask)) continue; /* Avoid early boot non-existent wq. */ rnp->rew.rew_func = func; - rnp->rew.rew_rsp = rsp; if (!READ_ONCE(rcu_par_gp_wq) || rcu_scheduler_active != RCU_SCHEDULER_RUNNING || - rcu_is_last_leaf_node(rsp, rnp)) { + rcu_is_last_leaf_node(&rcu_state, rnp)) { /* No workqueues yet or last leaf, do direct call. */ sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); continue; @@ -466,12 +462,12 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, } /* Wait for workqueue jobs (if any) to complete. */ - rcu_for_each_leaf_node(rsp, rnp) + rcu_for_each_leaf_node(&rcu_state, rnp) if (rnp->exp_need_flush) flush_work(&rnp->rew.rew_work); } -static void synchronize_sched_expedited_wait(struct rcu_state *rsp) +static void synchronize_sched_expedited_wait(void) { int cpu; unsigned long jiffies_stall; @@ -482,13 +478,13 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) struct rcu_node *rnp_root = rcu_get_root(); int ret; - trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait")); + trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); jiffies_stall = rcu_jiffies_till_stall_check(); jiffies_start = jiffies; for (;;) { ret = swait_event_timeout_exclusive( - rsp->expedited_wq, + rcu_state.expedited_wq, sync_rcu_preempt_exp_done_unlocked(rnp_root), jiffies_stall); if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root)) @@ -498,9 +494,9 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) continue; panic_on_rcu_stall(); pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", - rsp->name); + rcu_state.name); ndetected = 0; - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(&rcu_state, rnp) { ndetected += rcu_print_task_exp_stall(rnp); for_each_leaf_node_possible_cpu(rnp, cpu) { struct rcu_data *rdp; @@ -517,11 +513,11 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) } } pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", - jiffies - jiffies_start, rsp->expedited_sequence, + jiffies - jiffies_start, rcu_state.expedited_sequence, rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); if (ndetected) { pr_err("blocking rcu_node structures:"); - rcu_for_each_node_breadth_first(rsp, rnp) { + rcu_for_each_node_breadth_first(&rcu_state, rnp) { if (rnp == rnp_root) continue; /* printed unconditionally */ if (sync_rcu_preempt_exp_done_unlocked(rnp)) @@ -533,7 +529,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) } pr_cont("\n"); } - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(&rcu_state, rnp) { for_each_leaf_node_possible_cpu(rnp, cpu) { mask = leaf_node_cpu_bit(rnp, cpu); if (!(rnp->expmask & mask)) @@ -551,21 +547,21 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) * grace period. Also update all the ->exp_seq_rq counters as needed * in order to avoid counter-wrap problems. */ -static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) +static void rcu_exp_wait_wake(unsigned long s) { struct rcu_node *rnp; - synchronize_sched_expedited_wait(rsp); - rcu_exp_gp_seq_end(rsp); - trace_rcu_exp_grace_period(rsp->name, s, TPS("end")); + synchronize_sched_expedited_wait(); + rcu_exp_gp_seq_end(); + trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); /* * Switch over to wakeup mode, allowing the next GP, but -only- the * next GP, to proceed. */ - mutex_lock(&rsp->exp_wake_mutex); + mutex_lock(&rcu_state.exp_wake_mutex); - rcu_for_each_node_breadth_first(rsp, rnp) { + rcu_for_each_node_breadth_first(&rcu_state, rnp) { if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { spin_lock(&rnp->exp_lock); /* Recheck, avoid hang in case someone just arrived. */ @@ -574,24 +570,23 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) spin_unlock(&rnp->exp_lock); } smp_mb(); /* All above changes before wakeup. */ - wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]); + wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]); } - trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake")); - mutex_unlock(&rsp->exp_wake_mutex); + trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); + mutex_unlock(&rcu_state.exp_wake_mutex); } /* * Common code to drive an expedited grace period forward, used by * workqueues and mid-boot-time tasks. */ -static void rcu_exp_sel_wait_wake(struct rcu_state *rsp, - smp_call_func_t func, unsigned long s) +static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s) { /* Initialize the rcu_node tree in preparation for the wait. */ - sync_rcu_exp_select_cpus(rsp, func); + sync_rcu_exp_select_cpus(func); /* Wait and clean up, including waking everyone. */ - rcu_exp_wait_wake(rsp, s); + rcu_exp_wait_wake(s); } /* @@ -602,15 +597,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp) struct rcu_exp_work *rewp; rewp = container_of(wp, struct rcu_exp_work, rew_work); - rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s); + rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s); } /* * Given an rcu_state pointer and a smp_call_function() handler, kick * off the specified flavor of expedited grace period. */ -static void _synchronize_rcu_expedited(struct rcu_state *rsp, - smp_call_func_t func) +static void _synchronize_rcu_expedited(smp_call_func_t func) { struct rcu_data *rdp; struct rcu_exp_work rew; @@ -624,18 +618,17 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, } /* Take a snapshot of the sequence number. */ - s = rcu_exp_gp_seq_snap(rsp); - if (exp_funnel_lock(rsp, s)) + s = rcu_exp_gp_seq_snap(); + if (exp_funnel_lock(s)) return; /* Someone else did our work for us. */ /* Ensure that load happens before action based on it. */ if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { /* Direct call during scheduler init and early_initcalls(). */ - rcu_exp_sel_wait_wake(rsp, func, s); + rcu_exp_sel_wait_wake(func, s); } else { /* Marshall arguments & schedule the expedited grace period. */ rew.rew_func = func; - rew.rew_rsp = rsp; rew.rew_s = s; INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); queue_work(rcu_gp_wq, &rew.rew_work); @@ -645,11 +638,11 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); rnp = rcu_get_root(); wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], - sync_exp_work_done(rsp, s)); + sync_exp_work_done(s)); smp_mb(); /* Workqueue actions happen before return. */ /* Let the next expedited grace period start. */ - mutex_unlock(&rsp->exp_mutex); + mutex_unlock(&rcu_state.exp_mutex); } #ifdef CONFIG_PREEMPT_RCU @@ -661,10 +654,9 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, * ->expmask fields in the rcu_node tree. Otherwise, immediately * report the quiescent state. */ -static void sync_rcu_exp_handler(void *info) +static void sync_rcu_exp_handler(void *unused) { unsigned long flags; - struct rcu_state *rsp = info; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; struct task_struct *t = current; @@ -677,7 +669,7 @@ static void sync_rcu_exp_handler(void *info) if (!t->rcu_read_lock_nesting) { if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || rcu_dynticks_curr_cpu_in_eqs()) { - rcu_report_exp_rdp(rsp, rdp); + rcu_report_exp_rdp(rdp); } else { rdp->deferred_qs = true; resched_cpu(rdp->cpu); @@ -756,8 +748,6 @@ static void sync_sched_exp_online_cleanup(int cpu) */ void synchronize_rcu_expedited(void) { - struct rcu_state *rsp = &rcu_state; - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), @@ -765,7 +755,7 @@ void synchronize_rcu_expedited(void) if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return; - _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); + _synchronize_rcu_expedited(sync_rcu_exp_handler); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); @@ -783,7 +773,7 @@ static void sync_sched_exp_handler(void *unused) __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) return; if (rcu_is_cpu_rrupt_from_idle()) { - rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); + rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); return; } __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); @@ -798,13 +788,12 @@ static void sync_sched_exp_online_cleanup(int cpu) struct rcu_data *rdp; int ret; struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_state; rdp = per_cpu_ptr(&rcu_data, cpu); rnp = rdp->mynode; if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) return; - ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0); + ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0); WARN_ON_ONCE(ret); } @@ -831,8 +820,6 @@ static int rcu_blocking_is_gp(void) /* PREEMPT=n implementation of synchronize_rcu_expedited(). */ void synchronize_rcu_expedited(void) { - struct rcu_state *rsp = &rcu_state; - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), @@ -842,7 +829,7 @@ void synchronize_rcu_expedited(void) if (rcu_blocking_is_gp()) return; - _synchronize_rcu_expedited(rsp, sync_sched_exp_handler); + _synchronize_rcu_expedited(sync_sched_exp_handler); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 69705ec13527..e6ec25e47d00 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -123,8 +123,7 @@ static void __init rcu_bootup_announce_oddness(void) #ifdef CONFIG_PREEMPT_RCU -static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, - bool wake); +static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); static void rcu_read_unlock_special(struct task_struct *t); /* @@ -281,7 +280,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) * still in a quiescent state in any case.) */ if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs) - rcu_report_exp_rdp(rdp->rsp, rdp); + rcu_report_exp_rdp(rdp); else WARN_ON_ONCE(rdp->deferred_qs); } @@ -381,7 +380,7 @@ void rcu_note_context_switch(bool preempt) */ rcu_qs(); if (rdp->deferred_qs) - rcu_report_exp_rdp(&rcu_state, rdp); + rcu_report_exp_rdp(rdp); trace_rcu_utilization(TPS("End context switch")); barrier(); /* Avoid RCU read-side critical sections leaking up. */ } @@ -509,7 +508,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * blocked-tasks list below. */ if (rdp->deferred_qs) { - rcu_report_exp_rdp(&rcu_state, rdp); + rcu_report_exp_rdp(rdp); if (!t->rcu_read_unlock_special.s) { local_irq_restore(flags); return; @@ -580,7 +579,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * then we need to report up the rcu_node hierarchy. */ if (!empty_exp && empty_exp_now) - rcu_report_exp_rnp(&rcu_state, rnp, true); + rcu_report_exp_rnp(rnp, true); } else { local_irq_restore(flags); } @@ -947,7 +946,7 @@ static void rcu_qs(void) if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) return; __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); - rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data)); + rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); } /* -- cgit v1.2.3 From aedf4ba984168ab5b96898a03bfdb51d07194776 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Jul 2018 14:33:59 -0700 Subject: rcu: Remove rsp parameter from rcu_node tree accessor macros There now is only one rcu_state structure in a given build of the Linux kernel, so there is no need to pass it as a parameter to RCU's rcu_node tree's accessor macros. This commit therefore removes the rsp parameter from those macros in kernel/rcu/rcu.h, and removes some now-unused rsp local variables while in the area. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 28 +++++++++++----------------- kernel/rcu/srcutree.c | 4 ++-- kernel/rcu/tree.c | 19 +++++++++---------- kernel/rcu/tree_exp.h | 18 +++++++++--------- kernel/rcu/tree_plugin.h | 4 ++-- 5 files changed, 33 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 4d04683c31b2..2bb77fddc11f 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -329,29 +329,23 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) } /* Returns first leaf rcu_node of the specified RCU flavor. */ -#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1]) +#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) /* Is this rcu_node a leaf? */ #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) /* Is this rcu_node the last leaf? */ -#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1]) +#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) /* - * Do a full breadth-first scan of the rcu_node structures for the + * Do a full breadth-first scan of the {s,}rcu_node structures for the * specified rcu_state structure. */ -#define rcu_for_each_node_breadth_first(rsp, rnp) \ - for ((rnp) = &(rsp)->node[0]; \ - (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) - -/* - * Do a breadth-first scan of the non-leaf rcu_node structures for the - * specified rcu_state structure. Note that if there is a singleton - * rcu_node tree with but one rcu_node structure, this loop is a no-op. - */ -#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ - for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++) +#define srcu_for_each_node_breadth_first(sp, rnp) \ + for ((rnp) = &(sp)->node[0]; \ + (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) +#define rcu_for_each_node_breadth_first(rnp) \ + srcu_for_each_node_breadth_first(&rcu_state, rnp) /* * Scan the leaves of the rcu_node hierarchy for the specified rcu_state @@ -359,9 +353,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) * one rcu_node structure, this loop -will- visit the rcu_node structure. * It is still a leaf node, even if it is also the root node. */ -#define rcu_for_each_leaf_node(rsp, rnp) \ - for ((rnp) = rcu_first_leaf_node(rsp); \ - (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) +#define rcu_for_each_leaf_node(rnp) \ + for ((rnp) = rcu_first_leaf_node(); \ + (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) /* * Iterate over all possible CPUs in a leaf RCU node. diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 6c9866a854b1..2042080cd38b 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -105,7 +105,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) rcu_init_levelspread(levelspread, num_rcu_lvl); /* Each pass through this loop initializes one srcu_node structure. */ - rcu_for_each_node_breadth_first(sp, snp) { + srcu_for_each_node_breadth_first(sp, snp) { spin_lock_init(&ACCESS_PRIVATE(snp, lock)); WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != ARRAY_SIZE(snp->srcu_data_have_cbs)); @@ -561,7 +561,7 @@ static void srcu_gp_end(struct srcu_struct *sp) /* Initiate callback invocation as needed. */ idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); - rcu_for_each_node_breadth_first(sp, snp) { + srcu_for_each_node_breadth_first(sp, snp) { spin_lock_irq_rcu_node(snp); cbs = false; last_lvl = snp >= sp->level[rcu_num_lvls - 1]; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e33bf2aeac50..0465a85a40e1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -573,7 +573,7 @@ void show_rcu_gp_kthreads(void) for_each_rcu_flavor(rsp) { pr_info("%s: wait state: %d ->state: %#lx\n", rsp->name, rsp->gp_state, rsp->gp_kthread->state); - rcu_for_each_node_breadth_first(rsp, rnp) { + rcu_for_each_node_breadth_first(rnp) { if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed)) continue; pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n", @@ -1276,7 +1276,7 @@ static void rcu_dump_cpu_stacks(void) unsigned long flags; struct rcu_node *rnp; - rcu_for_each_leaf_node(&rcu_state, rnp) { + rcu_for_each_leaf_node(rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); for_each_leaf_node_possible_cpu(rnp, cpu) if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) @@ -1336,7 +1336,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) */ pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name); print_cpu_stall_info_begin(); - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); ndetected += rcu_print_task_stall(rnp); if (rnp->qsmask != 0) { @@ -1873,7 +1873,7 @@ static bool rcu_gp_init(void) * will handle subsequent offline CPUs. */ rsp->gp_state = RCU_GP_ONOFF; - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(rnp) { spin_lock(&rsp->ofl_lock); raw_spin_lock_irq_rcu_node(rnp); if (rnp->qsmaskinit == rnp->qsmaskinitnext && @@ -1933,7 +1933,7 @@ static bool rcu_gp_init(void) * process finishes, because this kthread handles both. */ rsp->gp_state = RCU_GP_INIT; - rcu_for_each_node_breadth_first(rsp, rnp) { + rcu_for_each_node_breadth_first(rnp) { rcu_gp_slow(gp_init_delay); raw_spin_lock_irqsave_rcu_node(rnp, flags); rdp = this_cpu_ptr(&rcu_data); @@ -2046,7 +2046,7 @@ static void rcu_gp_cleanup(void) */ new_gp_seq = rsp->gp_seq; rcu_seq_end(&new_gp_seq); - rcu_for_each_node_breadth_first(rsp, rnp) { + rcu_for_each_node_breadth_first(rnp) { raw_spin_lock_irq_rcu_node(rnp); if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) dump_blkd_tasks(rnp, 10); @@ -2606,9 +2606,8 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rsp)) unsigned long flags; unsigned long mask; struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_state; - rcu_for_each_leaf_node(rsp, rnp) { + rcu_for_each_leaf_node(rnp) { cond_resched_tasks_rcu_qs(); mask = 0; raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -3778,7 +3777,7 @@ static void __init rcu_init_one(void) init_swait_queue_head(&rsp->gp_wq); init_swait_queue_head(&rsp->expedited_wq); - rnp = rcu_first_leaf_node(rsp); + rnp = rcu_first_leaf_node(); for_each_possible_cpu(i) { while (i > rnp->grphi) rnp++; @@ -3878,7 +3877,7 @@ static void __init rcu_dump_rcu_node_tree(void) pr_info("rcu_node tree layout dump\n"); pr_info(" "); - rcu_for_each_node_breadth_first(&rcu_state, rnp) { + rcu_for_each_node_breadth_first(rnp) { if (rnp->level != level) { pr_cont("\n"); pr_info(" "); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index b6f7bc34ac49..060bdb45cd95 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -97,7 +97,7 @@ static void sync_exp_reset_tree_hotplug(void) * Each pass through the following loop propagates newly onlined * CPUs for the current rcu_node structure up the rcu_node tree. */ - rcu_for_each_leaf_node(&rcu_state, rnp) { + rcu_for_each_leaf_node(rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->expmaskinit == rnp->expmaskinitnext) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -141,7 +141,7 @@ static void __maybe_unused sync_exp_reset_tree(void) struct rcu_node *rnp; sync_exp_reset_tree_hotplug(); - rcu_for_each_node_breadth_first(&rcu_state, rnp) { + rcu_for_each_node_breadth_first(rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); WARN_ON_ONCE(rnp->expmask); rnp->expmask = rnp->expmaskinit; @@ -438,14 +438,14 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func) trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); /* Schedule work for each leaf rcu_node structure. */ - rcu_for_each_leaf_node(&rcu_state, rnp) { + rcu_for_each_leaf_node(rnp) { rnp->exp_need_flush = false; if (!READ_ONCE(rnp->expmask)) continue; /* Avoid early boot non-existent wq. */ rnp->rew.rew_func = func; if (!READ_ONCE(rcu_par_gp_wq) || rcu_scheduler_active != RCU_SCHEDULER_RUNNING || - rcu_is_last_leaf_node(&rcu_state, rnp)) { + rcu_is_last_leaf_node(rnp)) { /* No workqueues yet or last leaf, do direct call. */ sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); continue; @@ -462,7 +462,7 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func) } /* Wait for workqueue jobs (if any) to complete. */ - rcu_for_each_leaf_node(&rcu_state, rnp) + rcu_for_each_leaf_node(rnp) if (rnp->exp_need_flush) flush_work(&rnp->rew.rew_work); } @@ -496,7 +496,7 @@ static void synchronize_sched_expedited_wait(void) pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", rcu_state.name); ndetected = 0; - rcu_for_each_leaf_node(&rcu_state, rnp) { + rcu_for_each_leaf_node(rnp) { ndetected += rcu_print_task_exp_stall(rnp); for_each_leaf_node_possible_cpu(rnp, cpu) { struct rcu_data *rdp; @@ -517,7 +517,7 @@ static void synchronize_sched_expedited_wait(void) rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); if (ndetected) { pr_err("blocking rcu_node structures:"); - rcu_for_each_node_breadth_first(&rcu_state, rnp) { + rcu_for_each_node_breadth_first(rnp) { if (rnp == rnp_root) continue; /* printed unconditionally */ if (sync_rcu_preempt_exp_done_unlocked(rnp)) @@ -529,7 +529,7 @@ static void synchronize_sched_expedited_wait(void) } pr_cont("\n"); } - rcu_for_each_leaf_node(&rcu_state, rnp) { + rcu_for_each_leaf_node(rnp) { for_each_leaf_node_possible_cpu(rnp, cpu) { mask = leaf_node_cpu_bit(rnp, cpu); if (!(rnp->expmask & mask)) @@ -561,7 +561,7 @@ static void rcu_exp_wait_wake(unsigned long s) */ mutex_lock(&rcu_state.exp_wake_mutex); - rcu_for_each_node_breadth_first(&rcu_state, rnp) { + rcu_for_each_node_breadth_first(rnp) { if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { spin_lock(&rnp->exp_lock); /* Recheck, avoid hang in case someone just arrived. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index e6ec25e47d00..b60d3df92ff5 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -687,7 +687,7 @@ static void rcu_print_detail_task_stall(void) struct rcu_node *rnp = rcu_get_root(); rcu_print_detail_task_stall_rnp(rnp); - rcu_for_each_leaf_node(&rcu_state, rnp) + rcu_for_each_leaf_node(rnp) rcu_print_detail_task_stall_rnp(rnp); } @@ -1427,7 +1427,7 @@ static void __init rcu_spawn_boost_kthreads(void) for_each_possible_cpu(cpu) per_cpu(rcu_cpu_has_work, cpu) = 0; BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); - rcu_for_each_leaf_node(&rcu_state, rnp) + rcu_for_each_leaf_node(rnp) (void)rcu_spawn_one_boost_kthread(rnp); } -- cgit v1.2.3 From 88d1bead858d88cdda92ed8f3388eea8ee3a9675 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Jul 2018 14:45:00 -0700 Subject: rcu: Remove rcu_data structure's ->rsp field Now that there is only one rcu_state structure, there is no need for the rcu_data structure to indicate which it corresponds to. This commit therefore removes the rcu_data structure's ->rsp field, replacing all remaining uses of it with &rcu_state. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 28 +++++++++++++--------------- kernel/rcu/tree.h | 1 - kernel/rcu/tree_plugin.h | 42 +++++++++++++++++++++--------------------- 3 files changed, 34 insertions(+), 37 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0465a85a40e1..aeff9024bb6c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1070,7 +1070,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) { rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { - trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti")); + trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); rcu_gpnum_ovf(rdp->mynode, rdp); return 1; } @@ -1120,7 +1120,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) * of the current RCU grace period. */ if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { - trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti")); + trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); rdp->dynticks_fqs++; rcu_gpnum_ovf(rnp, rdp); return 1; @@ -1134,20 +1134,20 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) */ jtsq = jiffies_till_sched_qs; ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); - if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && + if (time_after(jiffies, rcu_state.gp_start + jtsq) && READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) { - trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc")); + trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("rqc")); rcu_gpnum_ovf(rnp, rdp); return 1; - } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) { + } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { /* Load rcu_qs_ctr before store to rcu_urgent_qs. */ smp_store_release(ruqp, true); } /* If waiting too long on an offline CPU, complain. */ if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && - time_after(jiffies, rdp->rsp->gp_start + HZ)) { + time_after(jiffies, rcu_state.gp_start + HZ)) { bool onl; struct rcu_node *rnp1; @@ -1185,12 +1185,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) */ rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); if (!READ_ONCE(*rnhqp) && - (time_after(jiffies, rdp->rsp->gp_start + jtsq) || - time_after(jiffies, rdp->rsp->jiffies_resched))) { + (time_after(jiffies, rcu_state.gp_start + jtsq) || + time_after(jiffies, rcu_state.jiffies_resched))) { WRITE_ONCE(*rnhqp, true); /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ smp_store_release(ruqp, true); - rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */ + rcu_state.jiffies_resched += jtsq; /* Re-enable beating. */ } /* @@ -1199,7 +1199,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) * see if the CPU is getting hammered with interrupts, but only * once per grace period, just to keep the IPIs down to a dull roar. */ - if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) { + if (jiffies - rcu_state.gp_start > rcu_jiffies_till_stall_check() / 2) { resched_cpu(rdp->cpu); if (IS_ENABLED(CONFIG_IRQ_WORK) && !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && @@ -1526,7 +1526,7 @@ void rcu_cpu_stall_reset(void) static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, unsigned long gp_seq_req, const char *s) { - trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req, + trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req, rnp->level, rnp->grplo, rnp->grphi, s); } @@ -1550,7 +1550,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, unsigned long gp_seq_req) { bool ret = false; - struct rcu_state *rsp = rdp->rsp; + struct rcu_state *rsp = &rcu_state; struct rcu_node *rnp; /* @@ -3167,8 +3167,7 @@ static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done) */ static void rcu_barrier_callback(struct rcu_head *rhp) { - struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); - struct rcu_state *rsp = rdp->rsp; + struct rcu_state *rsp = &rcu_state; if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { _rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence); @@ -3365,7 +3364,6 @@ rcu_boot_init_percpu_data(int cpu) rdp->rcu_onl_gp_seq = rcu_state.gp_seq; rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; rdp->cpu = cpu; - rdp->rsp = &rcu_state; rcu_boot_init_nocb_percpu_data(rdp); } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index b21d79bdab23..6f1b1a3fc23d 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -265,7 +265,6 @@ struct rcu_data { short rcu_onl_gp_flags; /* ->gp_flags at last online. */ int cpu; - struct rcu_state *rsp; }; /* Values for nocb_defer_wakeup field in struct rcu_data. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b60d3df92ff5..5423f9e58494 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -350,7 +350,7 @@ void rcu_note_context_switch(bool preempt) */ WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); - trace_rcu_preempt_task(rdp->rsp->name, + trace_rcu_preempt_task(rcu_state.name, t->pid, (rnp->qsmask & rdp->grpmask) ? rnp->gp_seq @@ -1951,7 +1951,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) mod_timer(&rdp->nocb_timer, jiffies + 1); WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); } @@ -2030,7 +2030,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, /* If we are not being polled and there is a kthread, awaken it ... */ t = READ_ONCE(rdp->nocb_kthread); if (rcu_nocb_poll || !t) { - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNotPoll")); return; } @@ -2039,7 +2039,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, if (!irqs_disabled_flags(flags)) { /* ... if queue was empty ... */ wake_nocb_leader(rdp, false); - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeEmpty")); } else { wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE, @@ -2050,7 +2050,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, /* ... or if many callbacks queued. */ if (!irqs_disabled_flags(flags)) { wake_nocb_leader(rdp, true); - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeOvf")); } else { wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE, @@ -2058,7 +2058,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, } rdp->qlen_last_fqs_check = LONG_MAX / 2; } else { - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot")); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } return; } @@ -2080,12 +2080,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, return false; __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); if (__is_kfree_rcu_offset((unsigned long)rhp->func)) - trace_rcu_kfree_callback(rdp->rsp->name, rhp, + trace_rcu_kfree_callback(rcu_state.name, rhp, (unsigned long)rhp->func, -atomic_long_read(&rdp->nocb_q_count_lazy), -atomic_long_read(&rdp->nocb_q_count)); else - trace_rcu_callback(rdp->rsp->name, rhp, + trace_rcu_callback(rcu_state.name, rhp, -atomic_long_read(&rdp->nocb_q_count_lazy), -atomic_long_read(&rdp->nocb_q_count)); @@ -2135,7 +2135,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) struct rcu_node *rnp = rdp->mynode; local_irq_save(flags); - c = rcu_seq_snap(&rdp->rsp->gp_seq); + c = rcu_seq_snap(&rcu_state.gp_seq); if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { local_irq_restore(flags); } else { @@ -2180,7 +2180,7 @@ wait_again: /* Wait for callbacks to appear. */ if (!rcu_nocb_poll) { - trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep")); + trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Sleep")); swait_event_interruptible_exclusive(my_rdp->nocb_wq, !READ_ONCE(my_rdp->nocb_leader_sleep)); raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); @@ -2190,7 +2190,7 @@ wait_again: raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags); } else if (firsttime) { firsttime = false; /* Don't drown trace log with "Poll"! */ - trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll")); + trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Poll")); } /* @@ -2217,7 +2217,7 @@ wait_again: if (rcu_nocb_poll) { schedule_timeout_interruptible(1); } else { - trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, + trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("WokeEmpty")); } goto wait_again; @@ -2262,7 +2262,7 @@ wait_again: static void nocb_follower_wait(struct rcu_data *rdp) { for (;;) { - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep")); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep")); swait_event_interruptible_exclusive(rdp->nocb_wq, READ_ONCE(rdp->nocb_follower_head)); if (smp_load_acquire(&rdp->nocb_follower_head)) { @@ -2270,7 +2270,7 @@ static void nocb_follower_wait(struct rcu_data *rdp) return; } WARN_ON(signal_pending(current)); - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty")); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); } } @@ -2305,10 +2305,10 @@ static int rcu_nocb_kthread(void *arg) rdp->nocb_follower_tail = &rdp->nocb_follower_head; raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); BUG_ON(!list); - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty")); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty")); /* Each pass through the following loop invokes a callback. */ - trace_rcu_batch_start(rdp->rsp->name, + trace_rcu_batch_start(rcu_state.name, atomic_long_read(&rdp->nocb_q_count_lazy), atomic_long_read(&rdp->nocb_q_count), -1); c = cl = 0; @@ -2316,23 +2316,23 @@ static int rcu_nocb_kthread(void *arg) next = list->next; /* Wait for enqueuing to complete, if needed. */ while (next == NULL && &list->next != tail) { - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WaitQueue")); schedule_timeout_interruptible(1); - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeQueue")); next = list->next; } debug_rcu_head_unqueue(list); local_bh_disable(); - if (__rcu_reclaim(rdp->rsp->name, list)) + if (__rcu_reclaim(rcu_state.name, list)) cl++; c++; local_bh_enable(); cond_resched_tasks_rcu_qs(); list = next; } - trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1); + trace_rcu_batch_end(rcu_state.name, c, !!list, 0, 0, 1); smp_mb__before_atomic(); /* _add after CB invocation. */ atomic_long_add(-c, &rdp->nocb_q_count); atomic_long_add(-cl, &rdp->nocb_q_count_lazy); @@ -2360,7 +2360,7 @@ static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) ndw = READ_ONCE(rdp->nocb_defer_wakeup); WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); __wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); } /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ -- cgit v1.2.3 From 564a9ae6046c64d03df0c1c1264094b1a00dccc9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Jul 2018 14:52:04 -0700 Subject: rcu: Remove last non-flavor-traversal rsp local variable from tree_plugin.h This commit removes the last non-flavor-traversal rsp local variable from kernel/rcu/tree_plugin.h in favor of &rcu_state. The flavor-traversal locals will be removed with the removal of flavor traversal. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5423f9e58494..59d66ee26310 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -782,7 +782,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) */ static void rcu_flavor_check_callbacks(int user) { - struct rcu_state *rsp = &rcu_state; struct task_struct *t = current; if (user || rcu_is_cpu_rrupt_from_idle()) { @@ -806,7 +805,7 @@ static void rcu_flavor_check_callbacks(int user) __this_cpu_read(rcu_data.core_needs_qs) && __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && !t->rcu_read_unlock_special.b.need_qs && - time_after(jiffies, rsp->gp_start + HZ)) + time_after(jiffies, rcu_state.gp_start + HZ)) t->rcu_read_unlock_special.b.need_qs = true; } @@ -1761,12 +1760,11 @@ static void print_cpu_stall_info_begin(void) /* * Print out diagnostic information for the specified stalled CPU. * - * If the specified CPU is aware of the current RCU grace period - * (flavor specified by rsp), then print the number of scheduling - * clock interrupts the CPU has taken during the time that it has - * been aware. Otherwise, print the number of RCU grace periods - * that this CPU is ignorant of, for example, "1" if the CPU was - * aware of the previous grace period. + * If the specified CPU is aware of the current RCU grace period, then + * print the number of scheduling clock interrupts the CPU has taken + * during the time that it has been aware. Otherwise, print the number + * of RCU grace periods that this CPU is ignorant of, for example, "1" + * if the CPU was aware of the previous grace period. * * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. */ -- cgit v1.2.3 From b97d23c51c9fee56b0c7598c323ab2846d873f2d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Jul 2018 15:35:00 -0700 Subject: rcu: Remove for_each_rcu_flavor() flavor-traversal macro Now that there is only ever a single flavor of RCU in a given kernel build, there isn't a whole lot of point in having a flavor-traversal macro. This commit therefore removes it and converts calls to it to straightline code, inlining trivial functions as appropriate. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 172 +++++++++++++++++++---------------------------- kernel/rcu/tree.h | 7 -- kernel/rcu/tree_plugin.h | 59 ++++++---------- 3 files changed, 92 insertions(+), 146 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index aeff9024bb6c..46a32999020d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -85,8 +85,6 @@ struct rcu_state rcu_state = { .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), }; -LIST_HEAD(rcu_struct_flavors); - /* Dump rcu_node combining tree at boot to verify correct setup. */ static bool dump_tree; module_param(dump_tree, bool, 0444); @@ -568,31 +566,28 @@ void show_rcu_gp_kthreads(void) int cpu; struct rcu_data *rdp; struct rcu_node *rnp; - struct rcu_state *rsp; - for_each_rcu_flavor(rsp) { - pr_info("%s: wait state: %d ->state: %#lx\n", - rsp->name, rsp->gp_state, rsp->gp_kthread->state); - rcu_for_each_node_breadth_first(rnp) { - if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed)) - continue; - pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n", - rnp->grplo, rnp->grphi, rnp->gp_seq, - rnp->gp_seq_needed); - if (!rcu_is_leaf_node(rnp)) + pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name, + rcu_state.gp_state, rcu_state.gp_kthread->state); + rcu_for_each_node_breadth_first(rnp) { + if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) + continue; + pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n", + rnp->grplo, rnp->grphi, rnp->gp_seq, + rnp->gp_seq_needed); + if (!rcu_is_leaf_node(rnp)) + continue; + for_each_leaf_node_possible_cpu(rnp, cpu) { + rdp = per_cpu_ptr(&rcu_data, cpu); + if (rdp->gpwrap || + ULONG_CMP_GE(rcu_state.gp_seq, + rdp->gp_seq_needed)) continue; - for_each_leaf_node_possible_cpu(rnp, cpu) { - rdp = per_cpu_ptr(&rcu_data, cpu); - if (rdp->gpwrap || - ULONG_CMP_GE(rsp->gp_seq, - rdp->gp_seq_needed)) - continue; - pr_info("\tcpu %d ->gp_seq_needed %lu\n", - cpu, rdp->gp_seq_needed); - } + pr_info("\tcpu %d ->gp_seq_needed %lu\n", + cpu, rdp->gp_seq_needed); } - /* sched_show_task(rsp->gp_kthread); */ } + /* sched_show_task(rcu_state.gp_kthread); */ } EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); @@ -638,7 +633,6 @@ static struct rcu_node *rcu_get_root(void) */ static void rcu_eqs_enter(bool user) { - struct rcu_state *rsp; struct rcu_data *rdp; struct rcu_dynticks *rdtp; @@ -655,10 +649,8 @@ static void rcu_eqs_enter(bool user) lockdep_assert_irqs_disabled(); trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); - for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(&rcu_data); - do_nocb_deferred_wakeup(rdp); - } + rdp = this_cpu_ptr(&rcu_data); + do_nocb_deferred_wakeup(rdp); rcu_prepare_for_idle(); rcu_preempt_deferred_qs(current); WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ @@ -1024,21 +1016,17 @@ bool rcu_lockdep_current_cpu_online(void) { struct rcu_data *rdp; struct rcu_node *rnp; - struct rcu_state *rsp; + bool ret = false; if (in_nmi() || !rcu_scheduler_fully_active) return true; preempt_disable(); - for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(&rcu_data); - rnp = rdp->mynode; - if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) { - preempt_enable(); - return true; - } - } + rdp = this_cpu_ptr(&rcu_data); + rnp = rdp->mynode; + if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) + ret = true; preempt_enable(); - return false; + return ret; } EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); @@ -1516,10 +1504,7 @@ static void check_cpu_stall(struct rcu_data *rdp) */ void rcu_cpu_stall_reset(void) { - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2); + WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); } /* Trace-event wrapper function for trace_rcu_future_grace_period. */ @@ -3134,17 +3119,12 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy) bool al = true; bool hc = false; struct rcu_data *rdp; - struct rcu_state *rsp; - for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(&rcu_data); - if (rcu_segcblist_empty(&rdp->cblist)) - continue; + rdp = this_cpu_ptr(&rcu_data); + if (!rcu_segcblist_empty(&rdp->cblist)) { hc = true; - if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) { + if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) al = false; - break; - } } if (all_lazy) *all_lazy = al; @@ -3436,15 +3416,12 @@ int rcutree_online_cpu(unsigned int cpu) unsigned long flags; struct rcu_data *rdp; struct rcu_node *rnp; - struct rcu_state *rsp; - for_each_rcu_flavor(rsp) { - rdp = per_cpu_ptr(&rcu_data, cpu); - rnp = rdp->mynode; - raw_spin_lock_irqsave_rcu_node(rnp, flags); - rnp->ffmask |= rdp->grpmask; - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - } + rdp = per_cpu_ptr(&rcu_data, cpu); + rnp = rdp->mynode; + raw_spin_lock_irqsave_rcu_node(rnp, flags); + rnp->ffmask |= rdp->grpmask; + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); if (IS_ENABLED(CONFIG_TREE_SRCU)) srcu_online_cpu(cpu); if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) @@ -3463,15 +3440,12 @@ int rcutree_offline_cpu(unsigned int cpu) unsigned long flags; struct rcu_data *rdp; struct rcu_node *rnp; - struct rcu_state *rsp; - for_each_rcu_flavor(rsp) { - rdp = per_cpu_ptr(&rcu_data, cpu); - rnp = rdp->mynode; - raw_spin_lock_irqsave_rcu_node(rnp, flags); - rnp->ffmask &= ~rdp->grpmask; - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - } + rdp = per_cpu_ptr(&rcu_data, cpu); + rnp = rdp->mynode; + raw_spin_lock_irqsave_rcu_node(rnp, flags); + rnp->ffmask &= ~rdp->grpmask; + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); rcutree_affinity_setting(cpu, cpu); if (IS_ENABLED(CONFIG_TREE_SRCU)) @@ -3500,34 +3474,32 @@ void rcu_cpu_starting(unsigned int cpu) unsigned long oldmask; struct rcu_data *rdp; struct rcu_node *rnp; - struct rcu_state *rsp; + struct rcu_state *rsp = &rcu_state; if (per_cpu(rcu_cpu_started, cpu)) return; per_cpu(rcu_cpu_started, cpu) = 1; - for_each_rcu_flavor(rsp) { - rdp = per_cpu_ptr(&rcu_data, cpu); - rnp = rdp->mynode; - mask = rdp->grpmask; - raw_spin_lock_irqsave_rcu_node(rnp, flags); - rnp->qsmaskinitnext |= mask; - oldmask = rnp->expmaskinitnext; - rnp->expmaskinitnext |= mask; - oldmask ^= rnp->expmaskinitnext; - nbits = bitmap_weight(&oldmask, BITS_PER_LONG); - /* Allow lockless access for expedited grace periods. */ - smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ - rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ - rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq); - rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags); - if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ - /* Report QS -after- changing ->qsmaskinitnext! */ - rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); - } else { - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - } + rdp = per_cpu_ptr(&rcu_data, cpu); + rnp = rdp->mynode; + mask = rdp->grpmask; + raw_spin_lock_irqsave_rcu_node(rnp, flags); + rnp->qsmaskinitnext |= mask; + oldmask = rnp->expmaskinitnext; + rnp->expmaskinitnext |= mask; + oldmask ^= rnp->expmaskinitnext; + nbits = bitmap_weight(&oldmask, BITS_PER_LONG); + /* Allow lockless access for expedited grace periods. */ + smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ + rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ + rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq); + rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags); + if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ + /* Report QS -after- changing ->qsmaskinitnext! */ + rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); + } else { + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ } @@ -3644,7 +3616,6 @@ static int __init rcu_spawn_gp_kthread(void) unsigned long flags; int kthread_prio_in = kthread_prio; struct rcu_node *rnp; - struct rcu_state *rsp; struct sched_param sp; struct task_struct *t; @@ -3664,19 +3635,17 @@ static int __init rcu_spawn_gp_kthread(void) kthread_prio, kthread_prio_in); rcu_scheduler_fully_active = 1; - for_each_rcu_flavor(rsp) { - t = kthread_create(rcu_gp_kthread, NULL, "%s", rsp->name); - BUG_ON(IS_ERR(t)); - rnp = rcu_get_root(); - raw_spin_lock_irqsave_rcu_node(rnp, flags); - rsp->gp_kthread = t; - if (kthread_prio) { - sp.sched_priority = kthread_prio; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - } - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - wake_up_process(t); + t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); + BUG_ON(IS_ERR(t)); + rnp = rcu_get_root(); + raw_spin_lock_irqsave_rcu_node(rnp, flags); + rcu_state.gp_kthread = t; + if (kthread_prio) { + sp.sched_priority = kthread_prio; + sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); } + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + wake_up_process(t); rcu_spawn_nocb_kthreads(); rcu_spawn_boost_kthreads(); return 0; @@ -3782,7 +3751,6 @@ static void __init rcu_init_one(void) per_cpu_ptr(&rcu_data, i)->mynode = rnp; rcu_boot_init_percpu_data(i); } - list_add(&rsp->flavors, &rcu_struct_flavors); } /* diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 6f1b1a3fc23d..8abc15c42d84 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -360,7 +360,6 @@ struct rcu_state { /* jiffies. */ const char *name; /* Name of structure. */ char abbr; /* Abbreviated name. */ - struct list_head flavors; /* List of RCU flavors. */ spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; /* Synchronize offline with */ @@ -417,12 +416,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; #define RCU_NAME rcu_name #endif /* #else #ifdef CONFIG_TRACING */ -extern struct list_head rcu_struct_flavors; - -/* Sequence through rcu_state structures for each RCU flavor. */ -#define for_each_rcu_flavor(rsp) \ - list_for_each_entry((rsp), &rcu_struct_flavors, flavors) - /* * RCU implementation internal declarations: */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 59d66ee26310..878a1d2cd465 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1561,31 +1561,28 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) struct rcu_data *rdp; struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); struct rcu_node *rnp; - struct rcu_state *rsp; /* Exit early if we advanced recently. */ if (jiffies == rdtp->last_advance_all) return false; rdtp->last_advance_all = jiffies; - for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(&rcu_data); - rnp = rdp->mynode; + rdp = this_cpu_ptr(&rcu_data); + rnp = rdp->mynode; - /* - * Don't bother checking unless a grace period has - * completed since we last checked and there are - * callbacks not yet ready to invoke. - */ - if ((rcu_seq_completed_gp(rdp->gp_seq, - rcu_seq_current(&rnp->gp_seq)) || - unlikely(READ_ONCE(rdp->gpwrap))) && - rcu_segcblist_pend_cbs(&rdp->cblist)) - note_gp_changes(rdp); - - if (rcu_segcblist_ready_cbs(&rdp->cblist)) - cbs_ready = true; - } + /* + * Don't bother checking unless a grace period has + * completed since we last checked and there are + * callbacks not yet ready to invoke. + */ + if ((rcu_seq_completed_gp(rdp->gp_seq, + rcu_seq_current(&rnp->gp_seq)) || + unlikely(READ_ONCE(rdp->gpwrap))) && + rcu_segcblist_pend_cbs(&rdp->cblist)) + note_gp_changes(rdp); + + if (rcu_segcblist_ready_cbs(&rdp->cblist)) + cbs_ready = true; return cbs_ready; } @@ -1648,7 +1645,6 @@ static void rcu_prepare_for_idle(void) struct rcu_data *rdp; struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); struct rcu_node *rnp; - struct rcu_state *rsp; int tne; lockdep_assert_irqs_disabled(); @@ -1686,10 +1682,8 @@ static void rcu_prepare_for_idle(void) if (rdtp->last_accelerate == jiffies) return; rdtp->last_accelerate = jiffies; - for_each_rcu_flavor(rsp) { - rdp = this_cpu_ptr(&rcu_data); - if (!rcu_segcblist_pend_cbs(&rdp->cblist)) - continue; + rdp = this_cpu_ptr(&rcu_data); + if (rcu_segcblist_pend_cbs(&rdp->cblist)) { rnp = rdp->mynode; raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ needwake = rcu_accelerate_cbs(rnp, rdp); @@ -1824,10 +1818,7 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp) /* Increment ->ticks_this_gp for all flavors of RCU. */ static void increment_cpu_stall_ticks(void) { - struct rcu_state *rsp; - - for_each_rcu_flavor(rsp) - raw_cpu_inc(rcu_data.ticks_this_gp); + raw_cpu_inc(rcu_data.ticks_this_gp); } #ifdef CONFIG_RCU_NOCB_CPU @@ -2384,7 +2375,6 @@ void __init rcu_init_nohz(void) { int cpu; bool need_rcu_nocb_mask = false; - struct rcu_state *rsp; #if defined(CONFIG_NO_HZ_FULL) if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) @@ -2418,11 +2408,9 @@ void __init rcu_init_nohz(void) if (rcu_nocb_poll) pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); - for_each_rcu_flavor(rsp) { - for_each_cpu(cpu, rcu_nocb_mask) - init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu)); - rcu_organize_nocb_kthreads(); - } + for_each_cpu(cpu, rcu_nocb_mask) + init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu)); + rcu_organize_nocb_kthreads(); } /* Initialize per-rcu_data variables for no-CBs CPUs. */ @@ -2489,11 +2477,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu) */ static void rcu_spawn_all_nocb_kthreads(int cpu) { - struct rcu_state *rsp; - if (rcu_scheduler_fully_active) - for_each_rcu_flavor(rsp) - rcu_spawn_one_nocb_kthread(cpu); + rcu_spawn_one_nocb_kthread(cpu); } /* -- cgit v1.2.3 From f7dd7d44fd2db80bfb2c5f81e67b5404b4735312 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Jul 2018 15:39:40 -0700 Subject: rcu: Simplify rcutorture_get_gp_data() This commit restructures rcutorture_get_gp_data() to take advantage of the fact that there is only one flavor of RCU. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 46a32999020d..254c78377c22 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -597,21 +597,16 @@ EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, unsigned long *gp_seq) { - struct rcu_state *rsp = NULL; - switch (test_type) { case RCU_FLAVOR: case RCU_BH_FLAVOR: case RCU_SCHED_FLAVOR: - rsp = &rcu_state; + *flags = READ_ONCE(rcu_state.gp_flags); + *gp_seq = rcu_seq_current(&rcu_state.gp_seq); break; default: break; } - if (rsp == NULL) - return; - *flags = READ_ONCE(rsp->gp_flags); - *gp_seq = rcu_seq_current(&rsp->gp_seq); } EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); -- cgit v1.2.3 From 7cba4775ba79d8da5775339f6a4769762626bcfd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Jul 2018 18:25:59 -0700 Subject: rcu: Restructure rcu_check_gp_kthread_starvation() This commit removes the rsp and gpa local variables, repurposes the j local variable and adds a gpk (GP kthread) local to improve readability. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 254c78377c22..4c920e2e729d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1225,24 +1225,21 @@ static const char *gp_state_getname(short gs) */ static void rcu_check_gp_kthread_starvation(void) { - unsigned long gpa; + struct task_struct *gpk = rcu_state.gp_kthread; unsigned long j; - struct rcu_state *rsp = &rcu_state; - j = jiffies; - gpa = READ_ONCE(rsp->gp_activity); - if (j - gpa > 2 * HZ) { + j = jiffies - READ_ONCE(rcu_state.gp_activity); + if (j > 2 * HZ) { pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", - rsp->name, j - gpa, - (long)rcu_seq_current(&rsp->gp_seq), - rsp->gp_flags, - gp_state_getname(rsp->gp_state), rsp->gp_state, - rsp->gp_kthread ? rsp->gp_kthread->state : ~0, - rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1); - if (rsp->gp_kthread) { + rcu_state.name, j, + (long)rcu_seq_current(&rcu_state.gp_seq), + rcu_state.gp_flags, + gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, + gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); + if (gpk) { pr_err("RCU grace-period kthread stack dump:\n"); - sched_show_task(rsp->gp_kthread); - wake_up_process(rsp->gp_kthread); + sched_show_task(gpk); + wake_up_process(gpk); } } } -- cgit v1.2.3 From 4c6ed43708bbd53112f3a455bf7fe0d224167943 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 00:02:29 -0700 Subject: rcu: Eliminate stall-warning use of rsp Now that there is only one rcu_state structure, there is less point in maintaining a pointer to it. This commit therefore replaces rsp with &rcu_state in print_other_cpu_stall(), print_cpu_stall(), and check_cpu_stall(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 80 +++++++++++++++++++++++++++---------------------------- 1 file changed, 39 insertions(+), 41 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4c920e2e729d..2f6fd076d8e6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1273,17 +1273,17 @@ static void rcu_dump_cpu_stacks(void) static void rcu_stall_kick_kthreads(void) { unsigned long j; - struct rcu_state *rsp = &rcu_state; if (!rcu_kick_kthreads) return; - j = READ_ONCE(rsp->jiffies_kick_kthreads); - if (time_after(jiffies, j) && rsp->gp_kthread && - (rcu_gp_in_progress() || READ_ONCE(rsp->gp_flags))) { - WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name); + j = READ_ONCE(rcu_state.jiffies_kick_kthreads); + if (time_after(jiffies, j) && rcu_state.gp_kthread && + (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { + WARN_ONCE(1, "Kicking %s grace-period kthread\n", + rcu_state.name); rcu_ftrace_dump(DUMP_ALL); - wake_up_process(rsp->gp_kthread); - WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ); + wake_up_process(rcu_state.gp_kthread); + WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); } } @@ -1301,7 +1301,6 @@ static void print_other_cpu_stall(unsigned long gp_seq) unsigned long j; int ndetected = 0; struct rcu_node *rnp = rcu_get_root(); - struct rcu_state *rsp = &rcu_state; long totqlen = 0; /* Kick and suppress, if so configured. */ @@ -1314,7 +1313,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) * See Documentation/RCU/stallwarn.txt for info on how to debug * RCU CPU stall warnings. */ - pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name); + pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name); print_cpu_stall_info_begin(); rcu_for_each_leaf_node(rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -1334,21 +1333,21 @@ static void print_other_cpu_stall(unsigned long gp_seq) totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, cpu)->cblist); pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", - smp_processor_id(), (long)(jiffies - rsp->gp_start), - (long)rcu_seq_current(&rsp->gp_seq), totqlen); + smp_processor_id(), (long)(jiffies - rcu_state.gp_start), + (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); if (ndetected) { rcu_dump_cpu_stacks(); /* Complain about tasks blocking the grace period. */ rcu_print_detail_task_stall(); } else { - if (rcu_seq_current(&rsp->gp_seq) != gp_seq) { + if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { pr_err("INFO: Stall ended before state dump start\n"); } else { j = jiffies; - gpa = READ_ONCE(rsp->gp_activity); + gpa = READ_ONCE(rcu_state.gp_activity); pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", - rsp->name, j - gpa, j, gpa, + rcu_state.name, j - gpa, j, gpa, jiffies_till_next_fqs, rcu_get_root()->qsmask); /* In this case, the current CPU might be at fault. */ @@ -1356,8 +1355,8 @@ static void print_other_cpu_stall(unsigned long gp_seq) } } /* Rewrite if needed in case of slow consoles. */ - if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) - WRITE_ONCE(rsp->jiffies_stall, + if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) + WRITE_ONCE(rcu_state.jiffies_stall, jiffies + 3 * rcu_jiffies_till_stall_check() + 3); rcu_check_gp_kthread_starvation(); @@ -1373,7 +1372,6 @@ static void print_cpu_stall(void) unsigned long flags; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rcu_get_root(); - struct rcu_state *rsp = &rcu_state; long totqlen = 0; /* Kick and suppress, if so configured. */ @@ -1386,7 +1384,7 @@ static void print_cpu_stall(void) * See Documentation/RCU/stallwarn.txt for info on how to debug * RCU CPU stall warnings. */ - pr_err("INFO: %s self-detected stall on CPU", rsp->name); + pr_err("INFO: %s self-detected stall on CPU", rcu_state.name); print_cpu_stall_info_begin(); raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); print_cpu_stall_info(smp_processor_id()); @@ -1396,8 +1394,8 @@ static void print_cpu_stall(void) totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data, cpu)->cblist); pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", - jiffies - rsp->gp_start, - (long)rcu_seq_current(&rsp->gp_seq), totqlen); + jiffies - rcu_state.gp_start, + (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); rcu_check_gp_kthread_starvation(); @@ -1405,8 +1403,8 @@ static void print_cpu_stall(void) raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Rewrite if needed in case of slow consoles. */ - if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) - WRITE_ONCE(rsp->jiffies_stall, + if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) + WRITE_ONCE(rcu_state.jiffies_stall, jiffies + 3 * rcu_jiffies_till_stall_check() + 3); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -1431,7 +1429,6 @@ static void check_cpu_stall(struct rcu_data *rdp) unsigned long jn; unsigned long js; struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_state; if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || !rcu_gp_in_progress()) @@ -1442,27 +1439,28 @@ static void check_cpu_stall(struct rcu_data *rdp) /* * Lots of memory barriers to reject false positives. * - * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall, - * then rsp->gp_start, and finally another copy of rsp->gp_seq. - * These values are updated in the opposite order with memory - * barriers (or equivalent) during grace-period initialization - * and cleanup. Now, a false positive can occur if we get an new - * value of rsp->gp_start and a old value of rsp->jiffies_stall. - * But given the memory barriers, the only way that this can happen - * is if one grace period ends and another starts between these - * two fetches. This is detected by comparing the second fetch - * of rsp->gp_seq with the previous fetch from rsp->gp_seq. + * The idea is to pick up rcu_state.gp_seq, then + * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally + * another copy of rcu_state.gp_seq. These values are updated in + * the opposite order with memory barriers (or equivalent) during + * grace-period initialization and cleanup. Now, a false positive + * can occur if we get an new value of rcu_state.gp_start and a old + * value of rcu_state.jiffies_stall. But given the memory barriers, + * the only way that this can happen is if one grace period ends + * and another starts between these two fetches. This is detected + * by comparing the second fetch of rcu_state.gp_seq with the + * previous fetch from rcu_state.gp_seq. * - * Given this check, comparisons of jiffies, rsp->jiffies_stall, - * and rsp->gp_start suffice to forestall false positives. + * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, + * and rcu_state.gp_start suffice to forestall false positives. */ - gs1 = READ_ONCE(rsp->gp_seq); + gs1 = READ_ONCE(rcu_state.gp_seq); smp_rmb(); /* Pick up ->gp_seq first... */ - js = READ_ONCE(rsp->jiffies_stall); + js = READ_ONCE(rcu_state.jiffies_stall); smp_rmb(); /* ...then ->jiffies_stall before the rest... */ - gps = READ_ONCE(rsp->gp_start); + gps = READ_ONCE(rcu_state.gp_start); smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ - gs2 = READ_ONCE(rsp->gp_seq); + gs2 = READ_ONCE(rcu_state.gp_seq); if (gs1 != gs2 || ULONG_CMP_LT(j, js) || ULONG_CMP_GE(gps, js)) @@ -1471,14 +1469,14 @@ static void check_cpu_stall(struct rcu_data *rdp) jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; if (rcu_gp_in_progress() && (READ_ONCE(rnp->qsmask) & rdp->grpmask) && - cmpxchg(&rsp->jiffies_stall, js, jn) == js) { + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { /* We haven't checked in, so go dump stack. */ print_cpu_stall(); } else if (rcu_gp_in_progress() && ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && - cmpxchg(&rsp->jiffies_stall, js, jn) == js) { + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { /* They had a few time units to dump stack, so complain. */ print_other_cpu_stall(gs2); -- cgit v1.2.3 From 9cbc5b97029bff2db7fb413d6ce588d38373834c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 15:47:01 -0700 Subject: rcu: Eliminate grace-period management code use of rsp Now that there is only one rcu_state structure, there is less point in maintaining a pointer to it. This commit therefore replaces rsp with &rcu_state in rcu_start_this_gp(), rcu_accelerate_cbs(), __note_gp_changes(), rcu_gp_init(), rcu_gp_fqs(), rcu_gp_cleanup(), rcu_gp_kthread(), and rcu_report_qs_rsp(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 168 ++++++++++++++++++++++++++---------------------------- 1 file changed, 82 insertions(+), 86 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 2f6fd076d8e6..88915372ba38 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1525,7 +1525,6 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, unsigned long gp_seq_req) { bool ret = false; - struct rcu_state *rsp = &rcu_state; struct rcu_node *rnp; /* @@ -1574,13 +1573,13 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, goto unlock_out; } trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); - WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT); - rsp->gp_req_activity = jiffies; - if (!rsp->gp_kthread) { + WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); + rcu_state.gp_req_activity = jiffies; + if (!rcu_state.gp_kthread) { trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); goto unlock_out; } - trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq")); + trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq")); ret = true; /* Caller must wake GP kthread. */ unlock_out: /* Push furthest requested GP to leaf node and rcu_data structure. */ @@ -1642,7 +1641,6 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) { unsigned long gp_seq_req; bool ret = false; - struct rcu_state *rsp = &rcu_state; raw_lockdep_assert_held_rcu_node(rnp); @@ -1660,15 +1658,15 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) * accelerating callback invocation to an earlier grace-period * number. */ - gp_seq_req = rcu_seq_snap(&rsp->gp_seq); + gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); /* Trace depending on how much we were able to accelerate. */ if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) - trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB")); + trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB")); else - trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB")); + trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB")); return ret; } @@ -1737,7 +1735,6 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) { bool ret; bool need_gp; - struct rcu_state __maybe_unused *rsp = &rcu_state; raw_lockdep_assert_held_rcu_node(rnp); @@ -1748,7 +1745,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || unlikely(READ_ONCE(rdp->gpwrap))) { ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */ - trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend")); + trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); } else { ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */ } @@ -1761,7 +1758,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) * set up to detect a quiescent state, otherwise don't * go looking for one. */ - trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart")); + trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); need_gp = !!(rnp->qsmask & rdp->grpmask); rdp->cpu_no_qs.b.norm = need_gp; rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); @@ -1814,16 +1811,15 @@ static bool rcu_gp_init(void) unsigned long mask; struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(); - struct rcu_state *rsp = &rcu_state; - WRITE_ONCE(rsp->gp_activity, jiffies); + WRITE_ONCE(rcu_state.gp_activity, jiffies); raw_spin_lock_irq_rcu_node(rnp); - if (!READ_ONCE(rsp->gp_flags)) { + if (!READ_ONCE(rcu_state.gp_flags)) { /* Spurious wakeup, tell caller to go back to sleep. */ raw_spin_unlock_irq_rcu_node(rnp); return false; } - WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ + WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ if (WARN_ON_ONCE(rcu_gp_in_progress())) { /* @@ -1837,8 +1833,8 @@ static bool rcu_gp_init(void) /* Advance to a new grace period and initialize state. */ record_gp_stall_check_time(); /* Record GP times before starting GP, hence rcu_seq_start(). */ - rcu_seq_start(&rsp->gp_seq); - trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start")); + rcu_seq_start(&rcu_state.gp_seq); + trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); raw_spin_unlock_irq_rcu_node(rnp); /* @@ -1847,15 +1843,15 @@ static bool rcu_gp_init(void) * for subsequent online CPUs, and that quiescent-state forcing * will handle subsequent offline CPUs. */ - rsp->gp_state = RCU_GP_ONOFF; + rcu_state.gp_state = RCU_GP_ONOFF; rcu_for_each_leaf_node(rnp) { - spin_lock(&rsp->ofl_lock); + spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irq_rcu_node(rnp); if (rnp->qsmaskinit == rnp->qsmaskinitnext && !rnp->wait_blkd_tasks) { /* Nothing to do on this leaf rcu_node structure. */ raw_spin_unlock_irq_rcu_node(rnp); - spin_unlock(&rsp->ofl_lock); + spin_unlock(&rcu_state.ofl_lock); continue; } @@ -1891,34 +1887,34 @@ static bool rcu_gp_init(void) } raw_spin_unlock_irq_rcu_node(rnp); - spin_unlock(&rsp->ofl_lock); + spin_unlock(&rcu_state.ofl_lock); } rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ /* * Set the quiescent-state-needed bits in all the rcu_node - * structures for all currently online CPUs in breadth-first order, - * starting from the root rcu_node structure, relying on the layout - * of the tree within the rsp->node[] array. Note that other CPUs - * will access only the leaves of the hierarchy, thus seeing that no - * grace period is in progress, at least until the corresponding - * leaf node has been initialized. + * structures for all currently online CPUs in breadth-first + * order, starting from the root rcu_node structure, relying on the + * layout of the tree within the rcu_state.node[] array. Note that + * other CPUs will access only the leaves of the hierarchy, thus + * seeing that no grace period is in progress, at least until the + * corresponding leaf node has been initialized. * * The grace period cannot complete until the initialization * process finishes, because this kthread handles both. */ - rsp->gp_state = RCU_GP_INIT; + rcu_state.gp_state = RCU_GP_INIT; rcu_for_each_node_breadth_first(rnp) { rcu_gp_slow(gp_init_delay); raw_spin_lock_irqsave_rcu_node(rnp, flags); rdp = this_cpu_ptr(&rcu_data); rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; - WRITE_ONCE(rnp->gp_seq, rsp->gp_seq); + WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); if (rnp == rdp->mynode) (void)__note_gp_changes(rnp, rdp); rcu_preempt_boost_start_gp(rnp); - trace_rcu_grace_period_init(rsp->name, rnp->gp_seq, + trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, rnp->level, rnp->grplo, rnp->grphi, rnp->qsmask); /* Quiescent states for tasks on any now-offline CPUs. */ @@ -1929,7 +1925,7 @@ static bool rcu_gp_init(void) else raw_spin_unlock_irq_rcu_node(rnp); cond_resched_tasks_rcu_qs(); - WRITE_ONCE(rsp->gp_activity, jiffies); + WRITE_ONCE(rcu_state.gp_activity, jiffies); } return true; @@ -1961,10 +1957,9 @@ static bool rcu_gp_fqs_check_wake(int *gfp) static void rcu_gp_fqs(bool first_time) { struct rcu_node *rnp = rcu_get_root(); - struct rcu_state *rsp = &rcu_state; - WRITE_ONCE(rsp->gp_activity, jiffies); - rsp->n_force_qs++; + WRITE_ONCE(rcu_state.gp_activity, jiffies); + rcu_state.n_force_qs++; if (first_time) { /* Collect dyntick-idle snapshots. */ force_qs_rnp(dyntick_save_progress_counter); @@ -1973,10 +1968,10 @@ static void rcu_gp_fqs(bool first_time) force_qs_rnp(rcu_implicit_dynticks_qs); } /* Clear flag to prevent immediate re-entry. */ - if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { + if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { raw_spin_lock_irq_rcu_node(rnp); - WRITE_ONCE(rsp->gp_flags, - READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); + WRITE_ONCE(rcu_state.gp_flags, + READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); raw_spin_unlock_irq_rcu_node(rnp); } } @@ -1991,14 +1986,13 @@ static void rcu_gp_cleanup(void) unsigned long new_gp_seq; struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(); - struct rcu_state *rsp = &rcu_state; struct swait_queue_head *sq; - WRITE_ONCE(rsp->gp_activity, jiffies); + WRITE_ONCE(rcu_state.gp_activity, jiffies); raw_spin_lock_irq_rcu_node(rnp); - gp_duration = jiffies - rsp->gp_start; - if (gp_duration > rsp->gp_max) - rsp->gp_max = gp_duration; + gp_duration = jiffies - rcu_state.gp_start; + if (gp_duration > rcu_state.gp_max) + rcu_state.gp_max = gp_duration; /* * We know the grace period is complete, but to everyone else @@ -2019,7 +2013,7 @@ static void rcu_gp_cleanup(void) * the rcu_node structures before the beginning of the next grace * period is recorded in any of the rcu_node structures. */ - new_gp_seq = rsp->gp_seq; + new_gp_seq = rcu_state.gp_seq; rcu_seq_end(&new_gp_seq); rcu_for_each_node_breadth_first(rnp) { raw_spin_lock_irq_rcu_node(rnp); @@ -2036,16 +2030,16 @@ static void rcu_gp_cleanup(void) raw_spin_unlock_irq_rcu_node(rnp); rcu_nocb_gp_cleanup(sq); cond_resched_tasks_rcu_qs(); - WRITE_ONCE(rsp->gp_activity, jiffies); + WRITE_ONCE(rcu_state.gp_activity, jiffies); rcu_gp_slow(gp_cleanup_delay); } rnp = rcu_get_root(); - raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */ + raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ /* Declare grace period done. */ - rcu_seq_end(&rsp->gp_seq); - trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end")); - rsp->gp_state = RCU_GP_IDLE; + rcu_seq_end(&rcu_state.gp_seq); + trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); + rcu_state.gp_state = RCU_GP_IDLE; /* Check for GP requests since above loop. */ rdp = this_cpu_ptr(&rcu_data); if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { @@ -2055,12 +2049,14 @@ static void rcu_gp_cleanup(void) } /* Advance CBs to reduce false positives below. */ if (!rcu_accelerate_cbs(rnp, rdp) && needgp) { - WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); - rsp->gp_req_activity = jiffies; - trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), + WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); + rcu_state.gp_req_activity = jiffies; + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), TPS("newreq")); } else { - WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT); + WRITE_ONCE(rcu_state.gp_flags, + rcu_state.gp_flags & RCU_GP_FLAG_INIT); } raw_spin_unlock_irq_rcu_node(rnp); } @@ -2074,7 +2070,6 @@ static int __noreturn rcu_gp_kthread(void *unused) int gf; unsigned long j; int ret; - struct rcu_state *rsp = &rcu_state; struct rcu_node *rnp = rcu_get_root(); rcu_bind_gp_kthread(); @@ -2082,21 +2077,22 @@ static int __noreturn rcu_gp_kthread(void *unused) /* Handle grace-period start. */ for (;;) { - trace_rcu_grace_period(rsp->name, - READ_ONCE(rsp->gp_seq), + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), TPS("reqwait")); - rsp->gp_state = RCU_GP_WAIT_GPS; - swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) & - RCU_GP_FLAG_INIT); - rsp->gp_state = RCU_GP_DONE_GPS; + rcu_state.gp_state = RCU_GP_WAIT_GPS; + swait_event_idle_exclusive(rcu_state.gp_wq, + READ_ONCE(rcu_state.gp_flags) & + RCU_GP_FLAG_INIT); + rcu_state.gp_state = RCU_GP_DONE_GPS; /* Locking provides needed memory barrier. */ if (rcu_gp_init()) break; cond_resched_tasks_rcu_qs(); - WRITE_ONCE(rsp->gp_activity, jiffies); + WRITE_ONCE(rcu_state.gp_activity, jiffies); WARN_ON(signal_pending(current)); - trace_rcu_grace_period(rsp->name, - READ_ONCE(rsp->gp_seq), + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), TPS("reqwaitsig")); } @@ -2106,58 +2102,59 @@ static int __noreturn rcu_gp_kthread(void *unused) ret = 0; for (;;) { if (!ret) { - rsp->jiffies_force_qs = jiffies + j; - WRITE_ONCE(rsp->jiffies_kick_kthreads, + rcu_state.jiffies_force_qs = jiffies + j; + WRITE_ONCE(rcu_state.jiffies_kick_kthreads, jiffies + 3 * j); } - trace_rcu_grace_period(rsp->name, - READ_ONCE(rsp->gp_seq), + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), TPS("fqswait")); - rsp->gp_state = RCU_GP_WAIT_FQS; - ret = swait_event_idle_timeout_exclusive(rsp->gp_wq, + rcu_state.gp_state = RCU_GP_WAIT_FQS; + ret = swait_event_idle_timeout_exclusive(rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); - rsp->gp_state = RCU_GP_DOING_FQS; + rcu_state.gp_state = RCU_GP_DOING_FQS; /* Locking provides needed memory barriers. */ /* If grace period done, leave loop. */ if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) break; /* If time for quiescent-state forcing, do it. */ - if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || + if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) || (gf & RCU_GP_FLAG_FQS)) { - trace_rcu_grace_period(rsp->name, - READ_ONCE(rsp->gp_seq), + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), TPS("fqsstart")); rcu_gp_fqs(first_gp_fqs); first_gp_fqs = false; - trace_rcu_grace_period(rsp->name, - READ_ONCE(rsp->gp_seq), + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), TPS("fqsend")); cond_resched_tasks_rcu_qs(); - WRITE_ONCE(rsp->gp_activity, jiffies); + WRITE_ONCE(rcu_state.gp_activity, jiffies); ret = 0; /* Force full wait till next FQS. */ j = jiffies_till_next_fqs; } else { /* Deal with stray signal. */ cond_resched_tasks_rcu_qs(); - WRITE_ONCE(rsp->gp_activity, jiffies); + WRITE_ONCE(rcu_state.gp_activity, jiffies); WARN_ON(signal_pending(current)); - trace_rcu_grace_period(rsp->name, - READ_ONCE(rsp->gp_seq), + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), TPS("fqswaitsig")); ret = 1; /* Keep old FQS timing. */ j = jiffies; - if (time_after(jiffies, rsp->jiffies_force_qs)) + if (time_after(jiffies, + rcu_state.jiffies_force_qs)) j = 1; else - j = rsp->jiffies_force_qs - j; + j = rcu_state.jiffies_force_qs - j; } } /* Handle grace-period end. */ - rsp->gp_state = RCU_GP_CLEANUP; + rcu_state.gp_state = RCU_GP_CLEANUP; rcu_gp_cleanup(); - rsp->gp_state = RCU_GP_CLEANED; + rcu_state.gp_state = RCU_GP_CLEANED; } } @@ -2173,11 +2170,10 @@ static int __noreturn rcu_gp_kthread(void *unused) static void rcu_report_qs_rsp(unsigned long flags) __releases(rcu_get_root()->lock) { - struct rcu_state *rsp = &rcu_state; - raw_lockdep_assert_held_rcu_node(rcu_get_root()); WARN_ON_ONCE(!rcu_gp_in_progress()); - WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); + WRITE_ONCE(rcu_state.gp_flags, + READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); rcu_gp_kthread_wake(); } -- cgit v1.2.3 From 3c779dfef2c45248c5916e5acb79570649374fd6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 15:54:02 -0700 Subject: rcu: Eliminate callback-invocation/invocation use of rsp Now that there is only one rcu_state structure, there is less point in maintaining a pointer to it. This commit therefore replaces rsp with &rcu_state in rcu_do_batch(), invoke_rcu_callbacks(), and __call_rcu(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 88915372ba38..46bdb52aded1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2467,14 +2467,13 @@ static void rcu_do_batch(struct rcu_data *rdp) struct rcu_head *rhp; struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); long bl, count; - struct rcu_state *rsp = &rcu_state; /* If no callbacks are ready, just return. */ if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { - trace_rcu_batch_start(rsp->name, + trace_rcu_batch_start(rcu_state.name, rcu_segcblist_n_lazy_cbs(&rdp->cblist), rcu_segcblist_n_cbs(&rdp->cblist), 0); - trace_rcu_batch_end(rsp->name, 0, + trace_rcu_batch_end(rcu_state.name, 0, !rcu_segcblist_empty(&rdp->cblist), need_resched(), is_idle_task(current), rcu_is_callbacks_kthread()); @@ -2489,7 +2488,8 @@ static void rcu_do_batch(struct rcu_data *rdp) local_irq_save(flags); WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); bl = rdp->blimit; - trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist), + trace_rcu_batch_start(rcu_state.name, + rcu_segcblist_n_lazy_cbs(&rdp->cblist), rcu_segcblist_n_cbs(&rdp->cblist), bl); rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); local_irq_restore(flags); @@ -2498,7 +2498,7 @@ static void rcu_do_batch(struct rcu_data *rdp) rhp = rcu_cblist_dequeue(&rcl); for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { debug_rcu_head_unqueue(rhp); - if (__rcu_reclaim(rsp->name, rhp)) + if (__rcu_reclaim(rcu_state.name, rhp)) rcu_cblist_dequeued_lazy(&rcl); /* * Stop only if limit reached and CPU has something to do. @@ -2512,7 +2512,7 @@ static void rcu_do_batch(struct rcu_data *rdp) local_irq_save(flags); count = -rcl.len; - trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(), + trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), is_idle_task(current), rcu_is_callbacks_kthread()); /* Update counts and requeue any remaining callbacks. */ @@ -2528,7 +2528,7 @@ static void rcu_do_batch(struct rcu_data *rdp) /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ if (count == 0 && rdp->qlen_last_fqs_check != 0) { rdp->qlen_last_fqs_check = 0; - rdp->n_force_qs_snap = rsp->n_force_qs; + rdp->n_force_qs_snap = rcu_state.n_force_qs; } else if (count < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = count; @@ -2764,11 +2764,9 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused */ static void invoke_rcu_callbacks(struct rcu_data *rdp) { - struct rcu_state *rsp = &rcu_state; - if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; - if (likely(!rsp->boost)) { + if (likely(!rcu_state.boost)) { rcu_do_batch(rdp); return; } @@ -2844,7 +2842,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) { unsigned long flags; struct rcu_data *rdp; - struct rcu_state __maybe_unused *rsp = &rcu_state; /* Misaligned rcu_head! */ WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); @@ -2893,11 +2890,12 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) rcu_idle_count_callbacks_posted(); if (__is_kfree_rcu_offset((unsigned long)func)) - trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, + trace_rcu_kfree_callback(rcu_state.name, head, + (unsigned long)func, rcu_segcblist_n_lazy_cbs(&rdp->cblist), rcu_segcblist_n_cbs(&rdp->cblist)); else - trace_rcu_callback(rsp->name, head, + trace_rcu_callback(rcu_state.name, head, rcu_segcblist_n_lazy_cbs(&rdp->cblist), rcu_segcblist_n_cbs(&rdp->cblist)); -- cgit v1.2.3 From 67a0edbf3c4dfcf3d20dafaff0d8c1c0ed44c292 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 16:15:38 -0700 Subject: rcu: Eliminate quiescent-state and grace-period-nonstart use of rsp Now that there is only one rcu_state structure, there is less point in maintaining a pointer to it. This commit therefore replaces rsp with &rcu_state in rcu_report_qs_rnp(), force_quiescent_state(), and rcu_check_gp_start_stall(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 46bdb52aded1..f329282dd305 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2198,7 +2198,6 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, { unsigned long oldmask = 0; struct rcu_node *rnp_c; - struct rcu_state __maybe_unused *rsp = &rcu_state; raw_lockdep_assert_held_rcu_node(rnp); @@ -2217,7 +2216,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && rcu_preempt_blocked_readers_cgp(rnp)); rnp->qsmask &= ~mask; - trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq, + trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, mask, rnp->qsmask, rnp->level, rnp->grplo, rnp->grphi, !!rnp->gp_tasks); @@ -2624,12 +2623,11 @@ static void force_quiescent_state(void) bool ret; struct rcu_node *rnp; struct rcu_node *rnp_old = NULL; - struct rcu_state *rsp = &rcu_state; /* Funnel through hierarchy to reduce memory contention. */ rnp = __this_cpu_read(rcu_data.mynode); for (; rnp != NULL; rnp = rnp->parent) { - ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || + ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || !raw_spin_trylock(&rnp->fqslock); if (rnp_old != NULL) raw_spin_unlock(&rnp_old->fqslock); @@ -2642,11 +2640,12 @@ static void force_quiescent_state(void) /* Reached the root of the rcu_node tree, acquire lock. */ raw_spin_lock_irqsave_rcu_node(rnp_old, flags); raw_spin_unlock(&rnp_old->fqslock); - if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { + if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); return; /* Someone beat us to it. */ } - WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); + WRITE_ONCE(rcu_state.gp_flags, + READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); rcu_gp_kthread_wake(); } @@ -2662,15 +2661,14 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) unsigned long flags; unsigned long j; struct rcu_node *rnp_root = rcu_get_root(); - struct rcu_state *rsp = &rcu_state; static atomic_t warned = ATOMIC_INIT(0); if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed)) return; j = jiffies; /* Expensive access, and in common case don't get here. */ - if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) || - time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) || + if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || + time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || atomic_read(&warned)) return; @@ -2678,8 +2676,8 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) j = jiffies; if (rcu_gp_in_progress() || ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || - time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) || - time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) || + time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || + time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || atomic_read(&warned)) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return; @@ -2691,19 +2689,19 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp) j = jiffies; if (rcu_gp_in_progress() || ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || - time_before(j, rsp->gp_req_activity + gpssdelay) || - time_before(j, rsp->gp_activity + gpssdelay) || + time_before(j, rcu_state.gp_req_activity + gpssdelay) || + time_before(j, rcu_state.gp_activity + gpssdelay) || atomic_xchg(&warned, 1)) { raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */ raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return; } pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n", - __func__, (long)READ_ONCE(rsp->gp_seq), + __func__, (long)READ_ONCE(rcu_state.gp_seq), (long)READ_ONCE(rnp_root->gp_seq_needed), - j - rsp->gp_req_activity, j - rsp->gp_activity, - rsp->gp_flags, rsp->gp_state, rsp->name, - rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL); + j - rcu_state.gp_req_activity, j - rcu_state.gp_activity, + rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name, + rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL); WARN_ON(1); if (rnp_root != rnp) raw_spin_unlock_rcu_node(rnp_root); -- cgit v1.2.3 From ec9f5835f74cba5cc2285d3032bb2b16afc312c3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 16:26:12 -0700 Subject: rcu: Eliminate RCU-barrier use of rsp Now that there is only one rcu_state structure, there is less point in maintaining a pointer to it. This commit therefore replaces rsp with &rcu_state in rcu_barrier_callback(), rcu_barrier_func(), and _rcu_barrier(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 65 +++++++++++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f329282dd305..ce5fb177a0f7 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3129,32 +3129,31 @@ static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done) */ static void rcu_barrier_callback(struct rcu_head *rhp) { - struct rcu_state *rsp = &rcu_state; - - if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { - _rcu_barrier_trace(TPS("LastCB"), -1, rsp->barrier_sequence); - complete(&rsp->barrier_completion); + if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { + _rcu_barrier_trace(TPS("LastCB"), -1, + rcu_state.barrier_sequence); + complete(&rcu_state.barrier_completion); } else { - _rcu_barrier_trace(TPS("CB"), -1, rsp->barrier_sequence); + _rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence); } } /* * Called with preemption disabled, and from cross-cpu IRQ context. */ -static void rcu_barrier_func(void *type) +static void rcu_barrier_func(void *unused) { - struct rcu_state *rsp = type; struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); - _rcu_barrier_trace(TPS("IRQ"), -1, rsp->barrier_sequence); + _rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); rdp->barrier_head.func = rcu_barrier_callback; debug_rcu_head_queue(&rdp->barrier_head); if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { - atomic_inc(&rsp->barrier_cpu_count); + atomic_inc(&rcu_state.barrier_cpu_count); } else { debug_rcu_head_unqueue(&rdp->barrier_head); - _rcu_barrier_trace(TPS("IRQNQ"), -1, rsp->barrier_sequence); + _rcu_barrier_trace(TPS("IRQNQ"), -1, + rcu_state.barrier_sequence); } } @@ -3166,25 +3165,25 @@ static void _rcu_barrier(void) { int cpu; struct rcu_data *rdp; - struct rcu_state *rsp = &rcu_state; - unsigned long s = rcu_seq_snap(&rsp->barrier_sequence); + unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); _rcu_barrier_trace(TPS("Begin"), -1, s); /* Take mutex to serialize concurrent rcu_barrier() requests. */ - mutex_lock(&rsp->barrier_mutex); + mutex_lock(&rcu_state.barrier_mutex); /* Did someone else do our work for us? */ - if (rcu_seq_done(&rsp->barrier_sequence, s)) { - _rcu_barrier_trace(TPS("EarlyExit"), -1, rsp->barrier_sequence); + if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { + _rcu_barrier_trace(TPS("EarlyExit"), -1, + rcu_state.barrier_sequence); smp_mb(); /* caller's subsequent code after above check. */ - mutex_unlock(&rsp->barrier_mutex); + mutex_unlock(&rcu_state.barrier_mutex); return; } /* Mark the start of the barrier operation. */ - rcu_seq_start(&rsp->barrier_sequence); - _rcu_barrier_trace(TPS("Inc1"), -1, rsp->barrier_sequence); + rcu_seq_start(&rcu_state.barrier_sequence); + _rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); /* * Initialize the count to one rather than to zero in order to @@ -3192,8 +3191,8 @@ static void _rcu_barrier(void) * (or preemption of this task). Exclude CPU-hotplug operations * to ensure that no offline CPU has callbacks queued. */ - init_completion(&rsp->barrier_completion); - atomic_set(&rsp->barrier_cpu_count, 1); + init_completion(&rcu_state.barrier_completion); + atomic_set(&rcu_state.barrier_cpu_count, 1); get_online_cpus(); /* @@ -3208,22 +3207,22 @@ static void _rcu_barrier(void) if (rcu_is_nocb_cpu(cpu)) { if (!rcu_nocb_cpu_needs_barrier(cpu)) { _rcu_barrier_trace(TPS("OfflineNoCB"), cpu, - rsp->barrier_sequence); + rcu_state.barrier_sequence); } else { _rcu_barrier_trace(TPS("OnlineNoCB"), cpu, - rsp->barrier_sequence); + rcu_state.barrier_sequence); smp_mb__before_atomic(); - atomic_inc(&rsp->barrier_cpu_count); + atomic_inc(&rcu_state.barrier_cpu_count); __call_rcu(&rdp->barrier_head, rcu_barrier_callback, cpu, 0); } } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { _rcu_barrier_trace(TPS("OnlineQ"), cpu, - rsp->barrier_sequence); - smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); + rcu_state.barrier_sequence); + smp_call_function_single(cpu, rcu_barrier_func, NULL, 1); } else { _rcu_barrier_trace(TPS("OnlineNQ"), cpu, - rsp->barrier_sequence); + rcu_state.barrier_sequence); } } put_online_cpus(); @@ -3232,18 +3231,18 @@ static void _rcu_barrier(void) * Now that we have an rcu_barrier_callback() callback on each * CPU, and thus each counted, remove the initial count. */ - if (atomic_dec_and_test(&rsp->barrier_cpu_count)) - complete(&rsp->barrier_completion); + if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) + complete(&rcu_state.barrier_completion); /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ - wait_for_completion(&rsp->barrier_completion); + wait_for_completion(&rcu_state.barrier_completion); /* Mark the end of the barrier operation. */ - _rcu_barrier_trace(TPS("Inc2"), -1, rsp->barrier_sequence); - rcu_seq_end(&rsp->barrier_sequence); + _rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); + rcu_seq_end(&rcu_state.barrier_sequence); /* Other rcu_barrier() invocations can now safely proceed. */ - mutex_unlock(&rsp->barrier_mutex); + mutex_unlock(&rcu_state.barrier_mutex); } /** -- cgit v1.2.3 From eb7a6653887b540a81d1b91ee0fc68b604da9386 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 17:47:45 -0700 Subject: rcu: Eliminate initialization-time use of rsp Now that there is only one rcu_state structure, there is less point in maintaining a pointer to it. This commit therefore replaces rsp with &rcu_state in rcu_cpu_starting() and rcu_init_one(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ce5fb177a0f7..5e3a3001a50d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3455,7 +3455,6 @@ void rcu_cpu_starting(unsigned int cpu) unsigned long oldmask; struct rcu_data *rdp; struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_state; if (per_cpu(rcu_cpu_started, cpu)) return; @@ -3472,10 +3471,10 @@ void rcu_cpu_starting(unsigned int cpu) oldmask ^= rnp->expmaskinitnext; nbits = bitmap_weight(&oldmask, BITS_PER_LONG); /* Allow lockless access for expedited grace periods. */ - smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ + smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */ rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ - rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq); - rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags); + rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); + rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ /* Report QS -after- changing ->qsmaskinitnext! */ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); @@ -3667,7 +3666,6 @@ static void __init rcu_init_one(void) int i; int j; struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_state; BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ @@ -3678,14 +3676,15 @@ static void __init rcu_init_one(void) /* Initialize the level-tracking arrays. */ for (i = 1; i < rcu_num_lvls; i++) - rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1]; + rcu_state.level[i] = + rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; rcu_init_levelspread(levelspread, num_rcu_lvl); /* Initialize the elements themselves, starting from the leaves. */ for (i = rcu_num_lvls - 1; i >= 0; i--) { cpustride *= levelspread[i]; - rnp = rsp->level[i]; + rnp = rcu_state.level[i]; for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), @@ -3693,9 +3692,9 @@ static void __init rcu_init_one(void) raw_spin_lock_init(&rnp->fqslock); lockdep_set_class_and_name(&rnp->fqslock, &rcu_fqs_class[i], fqs[i]); - rnp->gp_seq = rsp->gp_seq; - rnp->gp_seq_needed = rsp->gp_seq; - rnp->completedqs = rsp->gp_seq; + rnp->gp_seq = rcu_state.gp_seq; + rnp->gp_seq_needed = rcu_state.gp_seq; + rnp->completedqs = rcu_state.gp_seq; rnp->qsmask = 0; rnp->qsmaskinit = 0; rnp->grplo = j * cpustride; @@ -3709,7 +3708,7 @@ static void __init rcu_init_one(void) } else { rnp->grpnum = j % levelspread[i - 1]; rnp->grpmask = 1UL << rnp->grpnum; - rnp->parent = rsp->level[i - 1] + + rnp->parent = rcu_state.level[i - 1] + j / levelspread[i - 1]; } rnp->level = i; @@ -3723,8 +3722,8 @@ static void __init rcu_init_one(void) } } - init_swait_queue_head(&rsp->gp_wq); - init_swait_queue_head(&rsp->expedited_wq); + init_swait_queue_head(&rcu_state.gp_wq); + init_swait_queue_head(&rcu_state.expedited_wq); rnp = rcu_first_leaf_node(); for_each_possible_cpu(i) { while (i > rnp->grphi) -- cgit v1.2.3 From 8ff0b90780910821a53c70d5e68d28382f2a1a07 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 17:55:14 -0700 Subject: rcu: Fix typo in force_qs_rnp()'s parameter's parameter Pointers to rcu_data structures should be named rdp, not rsp. This commit therefore makes this change. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5e3a3001a50d..c1ce4cf41068 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -477,7 +477,7 @@ module_param(rcu_kick_kthreads, bool, 0644); static ulong jiffies_till_sched_qs = HZ / 10; module_param(jiffies_till_sched_qs, ulong, 0444); -static void force_qs_rnp(int (*f)(struct rcu_data *rsp)); +static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); static void force_quiescent_state(void); static int rcu_pending(void); @@ -2570,7 +2570,7 @@ void rcu_check_callbacks(int user) * * The caller must have suppressed start of new grace periods. */ -static void force_qs_rnp(int (*f)(struct rcu_data *rsp)) +static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) { int cpu; unsigned long flags; -- cgit v1.2.3 From 4e95020cdd34bbfc86f9c705f4d46ed63fa2e231 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 17:59:36 -0700 Subject: rcu: Inline increment_cpu_stall_ticks() into its sole caller Consolidation of the RCU flavors into one makes increment_cpu_stall_ticks() a trivial one-line function with only one caller. This commit therefore inlines it. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 1 - kernel/rcu/tree_plugin.h | 6 ------ 3 files changed, 1 insertion(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c1ce4cf41068..ee130b0dc54a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2555,7 +2555,7 @@ static void rcu_do_batch(struct rcu_data *rdp) void rcu_check_callbacks(int user) { trace_rcu_utilization(TPS("Start scheduler-tick")); - increment_cpu_stall_ticks(); + raw_cpu_inc(rcu_data.ticks_this_gp); rcu_flavor_check_callbacks(user); if (rcu_pending()) invoke_rcu_core(); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8abc15c42d84..46452d3d0fad 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -466,7 +466,6 @@ static void print_cpu_stall_info_begin(void); static void print_cpu_stall_info(int cpu); static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); -static void increment_cpu_stall_ticks(void); static bool rcu_nocb_cpu_needs_barrier(int cpu); static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 878a1d2cd465..cd276c46bc14 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1815,12 +1815,6 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp) rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); } -/* Increment ->ticks_this_gp for all flavors of RCU. */ -static void increment_cpu_stall_ticks(void) -{ - raw_cpu_inc(rcu_data.ticks_this_gp); -} - #ifdef CONFIG_RCU_NOCB_CPU /* -- cgit v1.2.3 From c3854a055bc834806b481b34f5f552ac415b2000 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Jul 2018 18:23:23 -0700 Subject: rcu: Pull rcu_gp_kthread() FQS loop into separate function The rcu_gp_kthread() function is long and deeply indented, so this commit pulls the loop that repeatedly invokes rcu_gp_fqs() into a new rcu_gp_fqs_loop() function. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 125 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 66 insertions(+), 59 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ee130b0dc54a..53ba7747878c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1976,6 +1976,71 @@ static void rcu_gp_fqs(bool first_time) } } +/* + * Loop doing repeated quiescent-state forcing until the grace period ends. + */ +static void rcu_gp_fqs_loop(void) +{ + bool first_gp_fqs; + int gf; + unsigned long j; + int ret; + struct rcu_node *rnp = rcu_get_root(); + + first_gp_fqs = true; + j = jiffies_till_first_fqs; + ret = 0; + for (;;) { + if (!ret) { + rcu_state.jiffies_force_qs = jiffies + j; + WRITE_ONCE(rcu_state.jiffies_kick_kthreads, + jiffies + 3 * j); + } + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), + TPS("fqswait")); + rcu_state.gp_state = RCU_GP_WAIT_FQS; + ret = swait_event_idle_timeout_exclusive( + rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); + rcu_state.gp_state = RCU_GP_DOING_FQS; + /* Locking provides needed memory barriers. */ + /* If grace period done, leave loop. */ + if (!READ_ONCE(rnp->qsmask) && + !rcu_preempt_blocked_readers_cgp(rnp)) + break; + /* If time for quiescent-state forcing, do it. */ + if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) || + (gf & RCU_GP_FLAG_FQS)) { + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), + TPS("fqsstart")); + rcu_gp_fqs(first_gp_fqs); + first_gp_fqs = false; + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), + TPS("fqsend")); + cond_resched_tasks_rcu_qs(); + WRITE_ONCE(rcu_state.gp_activity, jiffies); + ret = 0; /* Force full wait till next FQS. */ + j = jiffies_till_next_fqs; + } else { + /* Deal with stray signal. */ + cond_resched_tasks_rcu_qs(); + WRITE_ONCE(rcu_state.gp_activity, jiffies); + WARN_ON(signal_pending(current)); + trace_rcu_grace_period(rcu_state.name, + READ_ONCE(rcu_state.gp_seq), + TPS("fqswaitsig")); + ret = 1; /* Keep old FQS timing. */ + j = jiffies; + if (time_after(jiffies, rcu_state.jiffies_force_qs)) + j = 1; + else + j = rcu_state.jiffies_force_qs - j; + } + } +} + /* * Clean up after the old grace period. */ @@ -2066,12 +2131,6 @@ static void rcu_gp_cleanup(void) */ static int __noreturn rcu_gp_kthread(void *unused) { - bool first_gp_fqs; - int gf; - unsigned long j; - int ret; - struct rcu_node *rnp = rcu_get_root(); - rcu_bind_gp_kthread(); for (;;) { @@ -2097,59 +2156,7 @@ static int __noreturn rcu_gp_kthread(void *unused) } /* Handle quiescent-state forcing. */ - first_gp_fqs = true; - j = jiffies_till_first_fqs; - ret = 0; - for (;;) { - if (!ret) { - rcu_state.jiffies_force_qs = jiffies + j; - WRITE_ONCE(rcu_state.jiffies_kick_kthreads, - jiffies + 3 * j); - } - trace_rcu_grace_period(rcu_state.name, - READ_ONCE(rcu_state.gp_seq), - TPS("fqswait")); - rcu_state.gp_state = RCU_GP_WAIT_FQS; - ret = swait_event_idle_timeout_exclusive(rcu_state.gp_wq, - rcu_gp_fqs_check_wake(&gf), j); - rcu_state.gp_state = RCU_GP_DOING_FQS; - /* Locking provides needed memory barriers. */ - /* If grace period done, leave loop. */ - if (!READ_ONCE(rnp->qsmask) && - !rcu_preempt_blocked_readers_cgp(rnp)) - break; - /* If time for quiescent-state forcing, do it. */ - if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) || - (gf & RCU_GP_FLAG_FQS)) { - trace_rcu_grace_period(rcu_state.name, - READ_ONCE(rcu_state.gp_seq), - TPS("fqsstart")); - rcu_gp_fqs(first_gp_fqs); - first_gp_fqs = false; - trace_rcu_grace_period(rcu_state.name, - READ_ONCE(rcu_state.gp_seq), - TPS("fqsend")); - cond_resched_tasks_rcu_qs(); - WRITE_ONCE(rcu_state.gp_activity, jiffies); - ret = 0; /* Force full wait till next FQS. */ - j = jiffies_till_next_fqs; - } else { - /* Deal with stray signal. */ - cond_resched_tasks_rcu_qs(); - WRITE_ONCE(rcu_state.gp_activity, jiffies); - WARN_ON(signal_pending(current)); - trace_rcu_grace_period(rcu_state.name, - READ_ONCE(rcu_state.gp_seq), - TPS("fqswaitsig")); - ret = 1; /* Keep old FQS timing. */ - j = jiffies; - if (time_after(jiffies, - rcu_state.jiffies_force_qs)) - j = 1; - else - j = rcu_state.jiffies_force_qs - j; - } - } + rcu_gp_fqs_loop(); /* Handle grace-period end. */ rcu_state.gp_state = RCU_GP_CLEANUP; -- cgit v1.2.3 From 4c7e9c1434c6fc960774a5475f2fbccbf557fdeb Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 6 Jul 2018 09:54:25 -0700 Subject: rcu: Consolidate RCU-bh update-side function definitions This commit saves a few lines by consolidating the RCU-bh function definitions at the end of include/linux/rcupdate.h. This consolidation also makes it easier to remove them all when the time comes. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 27 ++++++++++++++++++++++----- include/linux/rcutiny.h | 15 --------------- include/linux/rcutree.h | 17 ----------------- kernel/rcu/tree.c | 9 --------- 4 files changed, 22 insertions(+), 46 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1207c6c9bd8b..e530f5739033 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -58,11 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void synchronize_rcu(void); -static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} - #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); @@ -875,4 +870,26 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ +/* Transitional pre-consolidation compatibility definitions. */ + +static inline void synchronize_rcu_bh(void) +{ + synchronize_rcu(); +} + +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_rcu_expedited(); +} + +static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) +{ + call_rcu(head, func); +} + +static inline void rcu_barrier_bh(void) +{ + rcu_barrier(); +} + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e66fb8bc2127..df82bada9b19 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -68,21 +68,6 @@ static inline void rcu_barrier_sched(void) rcu_barrier(); /* Only one CPU, so only one list of callbacks! */ } -static inline void rcu_barrier_bh(void) -{ - rcu_barrier(); -} - -static inline void synchronize_rcu_bh(void) -{ - synchronize_sched(); -} - -static inline void synchronize_rcu_bh_expedited(void) -{ - synchronize_sched(); -} - static inline void synchronize_rcu_expedited(void) { synchronize_sched(); diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 6d30a0809300..94820156aa62 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -45,11 +45,6 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(false); } -static inline void synchronize_rcu_bh(void) -{ - synchronize_rcu(); -} - void synchronize_rcu_expedited(void); static inline void synchronize_sched_expedited(void) @@ -59,19 +54,7 @@ static inline void synchronize_sched_expedited(void) void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); -/** - * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period - * - * This is a transitional API and will soon be removed, with all - * callers converted to synchronize_rcu_expedited(). - */ -static inline void synchronize_rcu_bh_expedited(void) -{ - synchronize_rcu_expedited(); -} - void rcu_barrier(void); -void rcu_barrier_bh(void); void rcu_barrier_sched(void); bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 53ba7747878c..8d5dadaf3c53 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3252,15 +3252,6 @@ static void _rcu_barrier(void) mutex_unlock(&rcu_state.barrier_mutex); } -/** - * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. - */ -void rcu_barrier_bh(void) -{ - _rcu_barrier(); -} -EXPORT_SYMBOL_GPL(rcu_barrier_bh); - /** * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. * -- cgit v1.2.3 From a8bb74acd8efe2eb934d524ae20859980975b602 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 6 Jul 2018 11:46:47 -0700 Subject: rcu: Consolidate RCU-sched update-side function definitions This commit saves a few lines by consolidating the RCU-sched function definitions at the end of include/linux/rcupdate.h. This consolidation also makes it easier to remove them all when the time comes. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 38 +++++++++++++++++++++++++------ include/linux/rcutiny.h | 32 +------------------------- include/linux/rcutree.h | 9 -------- kernel/rcu/tree.c | 58 ------------------------------------------------ 4 files changed, 32 insertions(+), 105 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e530f5739033..12103e1bbe67 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -48,12 +48,6 @@ #define ulong2long(a) (*(long *)(&(a))) /* Exported common interfaces */ - -#ifndef CONFIG_TINY_RCU -void synchronize_sched(void); -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); -#endif - void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void synchronize_rcu(void); @@ -170,7 +164,7 @@ void exit_tasks_rcu_finish(void); #define rcu_tasks_qs(t) do { } while (0) #define rcu_note_voluntary_context_switch(t) rcu_all_qs() #define call_rcu_tasks call_rcu_sched -#define synchronize_rcu_tasks synchronize_sched +#define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU */ @@ -892,4 +886,34 @@ static inline void rcu_barrier_bh(void) rcu_barrier(); } +static inline void synchronize_sched(void) +{ + synchronize_rcu(); +} + +static inline void synchronize_sched_expedited(void) +{ + synchronize_rcu_expedited(); +} + +static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) +{ + call_rcu(head, func); +} + +static inline void rcu_barrier_sched(void) +{ + rcu_barrier(); +} + +static inline unsigned long get_state_synchronize_sched(void) +{ + return get_state_synchronize_rcu(); +} + +static inline void cond_synchronize_sched(unsigned long oldstate) +{ + cond_synchronize_rcu(oldstate); +} + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index df82bada9b19..7fa4fb9e899e 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -36,11 +36,6 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } -static inline void synchronize_sched(void) -{ - synchronize_rcu(); -} - static inline unsigned long get_state_synchronize_rcu(void) { return 0; @@ -51,36 +46,11 @@ static inline void cond_synchronize_rcu(unsigned long oldstate) might_sleep(); } -static inline unsigned long get_state_synchronize_sched(void) -{ - return 0; -} - -static inline void cond_synchronize_sched(unsigned long oldstate) -{ - might_sleep(); -} - extern void rcu_barrier(void); -static inline void rcu_barrier_sched(void) -{ - rcu_barrier(); /* Only one CPU, so only one list of callbacks! */ -} - static inline void synchronize_rcu_expedited(void) { - synchronize_sched(); -} - -static inline void synchronize_sched_expedited(void) -{ - synchronize_sched(); -} - -static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); + synchronize_rcu(); } static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 94820156aa62..d09a9abe9440 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -46,21 +46,12 @@ static inline void rcu_virt_note_context_switch(int cpu) } void synchronize_rcu_expedited(void); - -static inline void synchronize_sched_expedited(void) -{ - synchronize_rcu_expedited(); -} - void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier(void); -void rcu_barrier_sched(void); bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); -unsigned long get_state_synchronize_sched(void); -void cond_synchronize_sched(unsigned long oldstate); void rcu_idle_enter(void); void rcu_idle_exit(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8d5dadaf3c53..1a2551a4d583 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2950,19 +2950,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(call_rcu); -/** - * call_rcu_sched() - Queue an RCU for invocation after sched grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual callback function to be invoked after the grace period - * - * This is transitional. - */ -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} -EXPORT_SYMBOL_GPL(call_rcu_sched); - /* * Queue an RCU callback for lazy invocation after a grace period. * This will likely be later named something like "call_rcu_lazy()", @@ -2976,17 +2963,6 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(kfree_call_rcu); -/** - * synchronize_sched - wait until an rcu-sched grace period has elapsed. - * - * This is transitional. - */ -void synchronize_sched(void) -{ - synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(synchronize_sched); - /** * get_state_synchronize_rcu - Snapshot current RCU state * @@ -3028,29 +3004,6 @@ void cond_synchronize_rcu(unsigned long oldstate) } EXPORT_SYMBOL_GPL(cond_synchronize_rcu); -/** - * get_state_synchronize_sched - Snapshot current RCU-sched state - * - * This is transitional, and only used by rcutorture. - */ -unsigned long get_state_synchronize_sched(void) -{ - return get_state_synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(get_state_synchronize_sched); - -/** - * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period - * @oldstate: return value from earlier call to get_state_synchronize_sched() - * - * This is transitional and only used by rcutorture. - */ -void cond_synchronize_sched(unsigned long oldstate) -{ - cond_synchronize_rcu(oldstate); -} -EXPORT_SYMBOL_GPL(cond_synchronize_sched); - /* * Check to see if there is any immediate RCU-related work to be done by * the current CPU, for the specified type of RCU, returning 1 if so and @@ -3266,17 +3219,6 @@ void rcu_barrier(void) } EXPORT_SYMBOL_GPL(rcu_barrier); -/** - * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. - * - * This is transitional. - */ -void rcu_barrier_sched(void) -{ - rcu_barrier(); -} -EXPORT_SYMBOL_GPL(rcu_barrier_sched); - /* * Propagate ->qsinitmask bits up the rcu_node tree to account for the * first CPU in a given leaf rcu_node structure coming online. The caller -- cgit v1.2.3 From 2ceebc035082a42f1416d4b47270c0acb5354949 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 6 Jul 2018 15:16:12 -0700 Subject: rcutorture: Add RCU-bh and RCU-sched support for extended readers Since there is now a single consolidated RCU flavor, rcutorture needs to test extending of RCU readers via rcu_read_lock_bh() and rcu_read_lock_sched(). This commit adds this support, with added checks (just like for local_bh_enable()) to ensure that rcu_read_unlock_bh() will not be invoked while interrupts are disabled. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index c55d1483886e..1bc0e37dffa8 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -66,13 +66,16 @@ MODULE_AUTHOR("Paul E. McKenney and Josh Triplett extendables field, extendables param, and related definitions. */ #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) -#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */ -#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */ -#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */ -#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */ -#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */ -#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \ - RCUTORTURE_RDR_PREEMPT) +#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ +#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ +#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ +#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ +#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ +#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ +#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ +#define RCUTORTURE_MAX_EXTEND \ + (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ + RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ /* Must be power of two minus one. */ @@ -1217,6 +1220,10 @@ static void rcutorture_one_extend(int *readstate, int newstate, local_irq_disable(); if (statesnew & RCUTORTURE_RDR_PREEMPT) preempt_disable(); + if (statesnew & RCUTORTURE_RDR_RBH) + rcu_read_lock_bh(); + if (statesnew & RCUTORTURE_RDR_SCHED) + rcu_read_lock_sched(); if (statesnew & RCUTORTURE_RDR_RCU) idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; @@ -1227,6 +1234,10 @@ static void rcutorture_one_extend(int *readstate, int newstate, local_bh_enable(); if (statesold & RCUTORTURE_RDR_PREEMPT) preempt_enable(); + if (statesold & RCUTORTURE_RDR_RBH) + rcu_read_unlock_bh(); + if (statesold & RCUTORTURE_RDR_SCHED) + rcu_read_unlock_sched(); if (statesold & RCUTORTURE_RDR_RCU) cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); @@ -1269,10 +1280,11 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) mask = mask & randmask2; else mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); + /* Can't enable bh w/irq disabled. */ if ((mask & RCUTORTURE_RDR_IRQ) && - !(mask & RCUTORTURE_RDR_BH) && - (oldmask & RCUTORTURE_RDR_BH)) - mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */ + ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || + (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) + mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; if ((mask & RCUTORTURE_RDR_IRQ) && !(mask & cur_ops->ext_irq_conflict) && (oldmask & cur_ops->ext_irq_conflict)) -- cgit v1.2.3 From 72ce30dd1f9bdbd6913ba868d0d2ca55c268eff3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 10:24:23 -0700 Subject: rcu: Stop testing RCU-bh and RCU-sched Now that the RCU-bh and RCU-sched update-side functions are simple wrappers around their RCU counterparts, there isn't a whole lot of point in testing them. This commit therefore removes the self-test capability and removes the corresponding kernel-boot parameters. It also updates the various rcutorture .boot files to remove the kernel boot parameters that call for testing RCU-bh and RCU-sched. Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 6 ---- kernel/rcu/update.c | 38 +--------------------- .../selftests/rcutorture/configs/rcu/TINY02.boot | 2 -- .../selftests/rcutorture/configs/rcu/TREE01.boot | 2 +- .../selftests/rcutorture/configs/rcu/TREE04.boot | 2 +- .../selftests/rcutorture/configs/rcu/TREE05.boot | 2 -- .../selftests/rcutorture/configs/rcu/TREE06.boot | 2 -- .../selftests/rcutorture/configs/rcu/TREE08.boot | 2 -- 8 files changed, 3 insertions(+), 53 deletions(-) (limited to 'kernel') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9871e649ffef..aa96e669bcb8 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3863,12 +3863,6 @@ rcupdate.rcu_self_test= [KNL] Run the RCU early boot self tests - rcupdate.rcu_self_test_bh= [KNL] - Run the RCU bh early boot self tests - - rcupdate.rcu_self_test_sched= [KNL] - Run the RCU sched early boot self tests - rdinit= [KNL] Format: Run specified binary instead of /init from the ramdisk, diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 9ea87d0aa386..ee366faecea6 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -203,11 +203,7 @@ void rcu_test_sync_prims(void) if (!IS_ENABLED(CONFIG_PROVE_RCU)) return; synchronize_rcu(); - synchronize_rcu_bh(); - synchronize_sched(); synchronize_rcu_expedited(); - synchronize_rcu_bh_expedited(); - synchronize_sched_expedited(); } #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) @@ -870,15 +866,10 @@ static void __init rcu_tasks_bootup_oddness(void) #ifdef CONFIG_PROVE_RCU /* - * Early boot self test parameters, one for each flavor + * Early boot self test parameters. */ static bool rcu_self_test; -static bool rcu_self_test_bh; -static bool rcu_self_test_sched; - module_param(rcu_self_test, bool, 0444); -module_param(rcu_self_test_bh, bool, 0444); -module_param(rcu_self_test_sched, bool, 0444); static int rcu_self_test_counter; @@ -895,30 +886,12 @@ static void early_boot_test_call_rcu(void) call_rcu(&head, test_callback); } -static void early_boot_test_call_rcu_bh(void) -{ - static struct rcu_head head; - - call_rcu_bh(&head, test_callback); -} - -static void early_boot_test_call_rcu_sched(void) -{ - static struct rcu_head head; - - call_rcu_sched(&head, test_callback); -} - void rcu_early_boot_tests(void) { pr_info("Running RCU self tests\n"); if (rcu_self_test) early_boot_test_call_rcu(); - if (rcu_self_test_bh) - early_boot_test_call_rcu_bh(); - if (rcu_self_test_sched) - early_boot_test_call_rcu_sched(); rcu_test_sync_prims(); } @@ -931,15 +904,6 @@ static int rcu_verify_early_boot_tests(void) early_boot_test_counter++; rcu_barrier(); } - if (rcu_self_test_bh) { - early_boot_test_counter++; - rcu_barrier_bh(); - } - if (rcu_self_test_sched) { - early_boot_test_counter++; - rcu_barrier_sched(); - } - if (rcu_self_test_counter != early_boot_test_counter) { WARN_ON(1); ret = -1; diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot index 6c1a292a65fb..b39f1553a478 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot @@ -1,3 +1 @@ rcupdate.rcu_self_test=1 -rcupdate.rcu_self_test_bh=1 -rcutorture.torture_type=rcu_bh diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot index 9f3a4d28e508..ea47da95374b 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot @@ -1,4 +1,4 @@ -rcutorture.torture_type=rcu_bh maxcpus=8 nr_cpus=43 +maxcpus=8 nr_cpus=43 rcutree.gp_preinit_delay=3 rcutree.gp_init_delay=3 rcutree.gp_cleanup_delay=3 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot index e6071bb96c7d..5adc6756792a 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot @@ -1 +1 @@ -rcutorture.torture_type=rcu_bh rcutree.rcu_fanout_leaf=4 nohz_full=1-7 +rcutree.rcu_fanout_leaf=4 nohz_full=1-7 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot index c7fd050dfcd9..779f1aed4606 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot @@ -1,5 +1,3 @@ -rcutorture.torture_type=sched -rcupdate.rcu_self_test_sched=1 rcutree.gp_preinit_delay=3 rcutree.gp_init_delay=3 rcutree.gp_cleanup_delay=3 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot index ad18b52a2cad..055f4aa79077 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot @@ -1,6 +1,4 @@ rcupdate.rcu_self_test=1 -rcupdate.rcu_self_test_bh=1 -rcupdate.rcu_self_test_sched=1 rcutree.rcu_fanout_exact=1 rcutree.gp_preinit_delay=3 rcutree.gp_init_delay=3 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot index 1bd8efc4141e..22478fd3a865 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot @@ -1,5 +1,3 @@ -rcutorture.torture_type=sched rcupdate.rcu_self_test=1 -rcupdate.rcu_self_test_sched=1 rcutree.rcu_fanout_exact=1 rcu_nocbs=0-7 -- cgit v1.2.3 From c770c82a238237d7e97b9101b9e44db14203de47 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 10:28:07 -0700 Subject: rcutorture: Remove the "rcu_bh" and "sched" torture types Now that the RCU-bh and RCU-sched update-side functions are simple wrappers around their RCU counterparts, there isn't a whole lot of point in testing them. This commit therefore removes the "rcu_bh" and "sched" torture types from rcutorture. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 89 ++----------------------------------------------- 1 file changed, 3 insertions(+), 86 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 1bc0e37dffa8..a228ad762fba 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -128,7 +128,7 @@ torture_param(int, verbose, 1, static char *torture_type = "rcu"; module_param(torture_type, charp, 0444); -MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); +MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); static int nrealreaders; static int ncbflooders; @@ -438,47 +438,6 @@ static struct rcu_torture_ops rcu_ops = { .name = "rcu" }; -/* - * Definitions for rcu_bh torture testing. - */ - -static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) -{ - rcu_read_lock_bh(); - return 0; -} - -static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) -{ - rcu_read_unlock_bh(); -} - -static void rcu_bh_torture_deferred_free(struct rcu_torture *p) -{ - call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); -} - -static struct rcu_torture_ops rcu_bh_ops = { - .ttype = RCU_BH_FLAVOR, - .init = rcu_sync_torture_init, - .readlock = rcu_bh_torture_read_lock, - .read_delay = rcu_read_delay, /* just reuse rcu's version. */ - .readunlock = rcu_bh_torture_read_unlock, - .get_gp_seq = rcu_bh_get_gp_seq, - .gp_diff = rcu_seq_diff, - .deferred_free = rcu_bh_torture_deferred_free, - .sync = synchronize_rcu_bh, - .exp_sync = synchronize_rcu_bh_expedited, - .call = call_rcu_bh, - .cb_barrier = rcu_barrier_bh, - .fqs = rcu_bh_force_quiescent_state, - .stats = NULL, - .irq_capable = 1, - .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ), - .ext_irq_conflict = RCUTORTURE_RDR_RCU, - .name = "rcu_bh" -}; - /* * Don't even think about trying any of these in real life!!! * The names includes "busted", and they really means it! @@ -666,48 +625,6 @@ static struct rcu_torture_ops busted_srcud_ops = { .name = "busted_srcud" }; -/* - * Definitions for sched torture testing. - */ - -static int sched_torture_read_lock(void) -{ - preempt_disable(); - return 0; -} - -static void sched_torture_read_unlock(int idx) -{ - preempt_enable(); -} - -static void rcu_sched_torture_deferred_free(struct rcu_torture *p) -{ - call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); -} - -static struct rcu_torture_ops sched_ops = { - .ttype = RCU_SCHED_FLAVOR, - .init = rcu_sync_torture_init, - .readlock = sched_torture_read_lock, - .read_delay = rcu_read_delay, /* just reuse rcu's version. */ - .readunlock = sched_torture_read_unlock, - .get_gp_seq = rcu_sched_get_gp_seq, - .gp_diff = rcu_seq_diff, - .deferred_free = rcu_sched_torture_deferred_free, - .sync = synchronize_sched, - .exp_sync = synchronize_sched_expedited, - .get_state = get_state_synchronize_sched, - .cond_sync = cond_synchronize_sched, - .call = call_rcu_sched, - .cb_barrier = rcu_barrier_sched, - .fqs = rcu_sched_force_quiescent_state, - .stats = NULL, - .irq_capable = 1, - .extendables = RCUTORTURE_MAX_EXTEND, - .name = "sched" -}; - /* * Definitions for RCU-tasks torture testing. */ @@ -1956,8 +1873,8 @@ rcu_torture_init(void) int cpu; int firsterr = 0; static struct rcu_torture_ops *torture_ops[] = { - &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, - &busted_srcud_ops, &sched_ops, &tasks_ops, + &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, + &busted_srcud_ops, &tasks_ops, }; if (!torture_init_begin(torture_type, verbose)) -- cgit v1.2.3 From 620d246065cdca4c4a8ad9ed28a191665cd3d457 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:25:10 -0700 Subject: rcuperf: Remove the "rcu_bh" and "sched" torture types Now that the RCU-bh and RCU-sched update-side functions are simple wrappers around their RCU counterparts, there isn't a whole lot of point in testing them. This commit therefore removes the "rcu_bh" and "sched" torture types from rcuperf. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuperf.c | 65 ++-------------------------------------------------- 1 file changed, 2 insertions(+), 63 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 34244523550e..8de53f3dc5b0 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -189,36 +189,6 @@ static struct rcu_perf_ops rcu_ops = { .name = "rcu" }; -/* - * Definitions for rcu_bh perf testing. - */ - -static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH) -{ - rcu_read_lock_bh(); - return 0; -} - -static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH) -{ - rcu_read_unlock_bh(); -} - -static struct rcu_perf_ops rcu_bh_ops = { - .ptype = RCU_BH_FLAVOR, - .init = rcu_sync_perf_init, - .readlock = rcu_bh_perf_read_lock, - .readunlock = rcu_bh_perf_read_unlock, - .get_gp_seq = rcu_bh_get_gp_seq, - .gp_diff = rcu_seq_diff, - .exp_completed = rcu_exp_batches_completed_sched, - .async = call_rcu_bh, - .gp_barrier = rcu_barrier_bh, - .sync = synchronize_rcu_bh, - .exp_sync = synchronize_rcu_bh_expedited, - .name = "rcu_bh" -}; - /* * Definitions for srcu perf testing. */ @@ -305,36 +275,6 @@ static struct rcu_perf_ops srcud_ops = { .name = "srcud" }; -/* - * Definitions for sched perf testing. - */ - -static int sched_perf_read_lock(void) -{ - preempt_disable(); - return 0; -} - -static void sched_perf_read_unlock(int idx) -{ - preempt_enable(); -} - -static struct rcu_perf_ops sched_ops = { - .ptype = RCU_SCHED_FLAVOR, - .init = rcu_sync_perf_init, - .readlock = sched_perf_read_lock, - .readunlock = sched_perf_read_unlock, - .get_gp_seq = rcu_sched_get_gp_seq, - .gp_diff = rcu_seq_diff, - .exp_completed = rcu_exp_batches_completed_sched, - .async = call_rcu_sched, - .gp_barrier = rcu_barrier_sched, - .sync = synchronize_sched, - .exp_sync = synchronize_sched_expedited, - .name = "sched" -}; - /* * Definitions for RCU-tasks perf testing. */ @@ -611,7 +551,7 @@ rcu_perf_cleanup(void) kfree(writer_n_durations); } - /* Do flavor-specific cleanup operations. */ + /* Do torture-type-specific cleanup operations. */ if (cur_ops->cleanup != NULL) cur_ops->cleanup(); @@ -661,8 +601,7 @@ rcu_perf_init(void) long i; int firsterr = 0; static struct rcu_perf_ops *perf_ops[] = { - &rcu_ops, &rcu_bh_ops, &srcu_ops, &srcud_ops, &sched_ops, - &tasks_ops, + &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops, }; if (!torture_init_begin(perf_type, verbose)) -- cgit v1.2.3 From de3875d3023310416d08eaab3c1a8527e9b452bf Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 21:43:17 -0700 Subject: rcu: Remove now-unused rcutorture APIs This commit removes rcu_sched_get_gp_seq(), rcu_bh_get_gp_seq(), rcu_exp_batches_completed_sched(), rcu_sched_force_quiescent_state(), and rcu_bh_force_quiescent_state(), which are no longer used because rcutorture no longer does "rcu_bh" and "rcu_sched" torture types. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 10 ---------- kernel/rcu/tree.c | 47 ----------------------------------------------- 2 files changed, 57 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 2bb77fddc11f..aa3dc08af4b3 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -509,29 +509,19 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type, #ifdef CONFIG_TINY_RCU static inline unsigned long rcu_get_gp_seq(void) { return 0; } -static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; } -static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; } static inline unsigned long rcu_exp_batches_completed(void) { return 0; } -static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; } static inline unsigned long srcu_batches_completed(struct srcu_struct *sp) { return 0; } static inline void rcu_force_quiescent_state(void) { } -static inline void rcu_bh_force_quiescent_state(void) { } -static inline void rcu_sched_force_quiescent_state(void) { } static inline void show_rcu_gp_kthreads(void) { } static inline int rcu_get_gp_kthreads_prio(void) { return 0; } #else /* #ifdef CONFIG_TINY_RCU */ unsigned long rcu_get_gp_seq(void); -unsigned long rcu_bh_get_gp_seq(void); -unsigned long rcu_sched_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); -unsigned long rcu_exp_batches_completed_sched(void); unsigned long srcu_batches_completed(struct srcu_struct *sp); void show_rcu_gp_kthreads(void); int rcu_get_gp_kthreads_prio(void); void rcu_force_quiescent_state(void); -void rcu_bh_force_quiescent_state(void); -void rcu_sched_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; extern struct workqueue_struct *rcu_par_gp_wq; #endif /* #else #ifdef CONFIG_TINY_RCU */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1a2551a4d583..5e14a19c066c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -490,25 +490,6 @@ unsigned long rcu_get_gp_seq(void) } EXPORT_SYMBOL_GPL(rcu_get_gp_seq); -/* - * Return the number of RCU-sched GPs completed thus far for debug & stats. - */ -unsigned long rcu_sched_get_gp_seq(void) -{ - return rcu_get_gp_seq(); -} -EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); - -/* - * Return the number of RCU GPs completed thus far for debug & stats. - * This is a transitional API and will soon be removed. - */ -unsigned long rcu_bh_get_gp_seq(void) -{ - return READ_ONCE(rcu_state.gp_seq); -} -EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); - /* * Return the number of RCU expedited batches completed thus far for * debug & stats. Odd numbers mean that a batch is in progress, even @@ -521,16 +502,6 @@ unsigned long rcu_exp_batches_completed(void) } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); -/* - * Return the number of RCU-sched expedited batches completed thus far - * for debug & stats. Similar to rcu_exp_batches_completed(). - */ -unsigned long rcu_exp_batches_completed_sched(void) -{ - return rcu_state.expedited_sequence; -} -EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); - /* * Force a quiescent state. */ @@ -540,24 +511,6 @@ void rcu_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); -/* - * Force a quiescent state for RCU BH. - */ -void rcu_bh_force_quiescent_state(void) -{ - force_quiescent_state(); -} -EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); - -/* - * Force a quiescent state for RCU-sched. - */ -void rcu_sched_force_quiescent_state(void) -{ - rcu_force_quiescent_state(); -} -EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); - /* * Show the state of the grace-period kthreads. */ -- cgit v1.2.3 From 8c1cf2da6f8af7f6b6e0e06d3a83115712cc04b8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in Kconfig Signed-off-by: Paul E. McKenney --- kernel/rcu/Kconfig | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index a0b7f0103ca9..939a2056c87a 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -196,7 +196,7 @@ config RCU_BOOST This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. This option also prevents heavy loads from blocking RCU - callback invocation for all flavors of RCU. + callback invocation. Say Y here if you are working with real-time apps or heavy loads Say N here if you are unsure. @@ -225,15 +225,15 @@ config RCU_NOCB_CPU callback invocation to energy-efficient CPUs in battery-powered asymmetric multiprocessors. - This option offloads callback invocation from the set of - CPUs specified at boot time by the rcu_nocbs parameter. - For each such CPU, a kthread ("rcuox/N") will be created to - invoke callbacks, where the "N" is the CPU being offloaded, - and where the "p" for RCU-preempt and "s" for RCU-sched. - Nothing prevents this kthread from running on the specified - CPUs, but (1) the kthreads may be preempted between each - callback, and (2) affinity or cgroups can be used to force - the kthreads to run on whatever set of CPUs is desired. + This option offloads callback invocation from the set of CPUs + specified at boot time by the rcu_nocbs parameter. For each + such CPU, a kthread ("rcuox/N") will be created to invoke + callbacks, where the "N" is the CPU being offloaded, and where + the "p" for RCU-preempt (PREEMPT kernels) and "s" for RCU-sched + (!PREEMPT kernels). Nothing prevents this kthread from running + on the specified CPUs, but (1) the kthreads may be preempted + between each callback, and (2) affinity or cgroups can be used + to force the kthreads to run on whatever set of CPUs is desired. Say Y here if you want to help to debug reduced OS jitter. Say N here if you are unsure. -- cgit v1.2.3 From 7f87c036fea3c17eb6a6e4f4164c67aeb98710ea Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in rcu.h Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index aa3dc08af4b3..5dec94509a7e 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -176,8 +176,9 @@ static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) /* * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally - * by call_rcu() and rcu callback execution, and are therefore not part of the - * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. + * by call_rcu() and rcu callback execution, and are therefore not part + * of the RCU API. These are in rcupdate.h because they are used by all + * RCU implementations. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD @@ -328,7 +329,7 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) } } -/* Returns first leaf rcu_node of the specified RCU flavor. */ +/* Returns a pointer to the first leaf rcu_node structure. */ #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) /* Is this rcu_node a leaf? */ @@ -339,7 +340,8 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) /* * Do a full breadth-first scan of the {s,}rcu_node structures for the - * specified rcu_state structure. + * specified state structure (for SRCU) or the only rcu_state structure + * (for RCU). */ #define srcu_for_each_node_breadth_first(sp, rnp) \ for ((rnp) = &(sp)->node[0]; \ @@ -348,10 +350,10 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) srcu_for_each_node_breadth_first(&rcu_state, rnp) /* - * Scan the leaves of the rcu_node hierarchy for the specified rcu_state - * structure. Note that if there is a singleton rcu_node tree with but - * one rcu_node structure, this loop -will- visit the rcu_node structure. - * It is still a leaf node, even if it is also the root node. + * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. + * Note that if there is a singleton rcu_node tree with but one rcu_node + * structure, this loop -will- visit the rcu_node structure. It is still + * a leaf node, even if it is also the root node. */ #define rcu_for_each_leaf_node(rnp) \ for ((rnp) = rcu_first_leaf_node(); \ -- cgit v1.2.3 From 62a1a945368ff8b4011dfc791f89152ef3da0ecf Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in rcutorture.c Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index a228ad762fba..294b3f6b7eb6 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1221,7 +1221,7 @@ static void rcutorture_loop_extend(int *readstate, WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ if (!((mask - 1) & mask)) - return; /* Current RCU flavor not extendable. */ + return; /* Current RCU reader not extendable. */ i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS; while (i--) { mask = rcutorture_extend_mask(*readstate, trsp); @@ -1790,7 +1790,7 @@ rcu_torture_cleanup(void) cpuhp_remove_state(rcutor_hp); /* - * Wait for all RCU callbacks to fire, then do flavor-specific + * Wait for all RCU callbacks to fire, then do torture-type-specific * cleanup operations. */ if (cur_ops->cb_barrier != NULL) -- cgit v1.2.3 From 6eb95cc4507a765de06d30028390da1b4a9c8e5c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in srcutree.h Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 2042080cd38b..7f266b0f9832 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -980,7 +980,7 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); * There are memory-ordering constraints implied by synchronize_srcu(). * On systems with more than one CPU, when synchronize_srcu() returns, * each CPU is guaranteed to have executed a full memory barrier since - * the end of its last corresponding SRCU-sched read-side critical section + * the end of its last corresponding SRCU read-side critical section * whose beginning preceded the call to synchronize_srcu(). In addition, * each CPU having an SRCU read-side critical section that extends beyond * the return from synchronize_srcu() is guaranteed to have executed a -- cgit v1.2.3 From 679d3f30923eb687ce3bcd3dfaf108a2809d5a57 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in tiny.c Signed-off-by: Paul E. McKenney --- kernel/rcu/tiny.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 30826fb6e438..a77853b73bfe 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -117,9 +117,9 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused /* * Wait for a grace period to elapse. But it is illegal to invoke - * synchronize_sched() from within an RCU read-side critical section. - * Therefore, any legal call to synchronize_sched() is a quiescent - * state, and so on a UP system, synchronize_sched() need do nothing. + * synchronize_rcu() from within an RCU read-side critical section. + * Therefore, any legal call to synchronize_rcu() is a quiescent + * state, and so on a UP system, synchronize_rcu() need do nothing. * (But Lai Jiangshan points out the benefits of doing might_sleep() * to reduce latency.) * @@ -130,12 +130,12 @@ void synchronize_rcu(void) RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_sched() in RCU read-side critical section"); + "Illegal synchronize_rcu() in RCU read-side critical section"); } EXPORT_SYMBOL_GPL(synchronize_rcu); /* - * Post an RCU callback to be invoked after the end of an RCU-sched grace + * Post an RCU callback to be invoked after the end of an RCU grace * period. But since we have but one CPU, that would be after any * quiescent state. */ -- cgit v1.2.3 From 49918a54e63c99899aa3aa64d456c5bf14122e5a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in tree.c Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 74 ++++++++++++++++++++++++------------------------------- 1 file changed, 32 insertions(+), 42 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 5e14a19c066c..c8761e7c7c00 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -382,12 +382,11 @@ static int rcu_is_cpu_rrupt_from_idle(void) } /* - * Register a quiescent state for all RCU flavors. If there is an + * Register an urgently needed quiescent state. If there is an * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight - * dyntick-idle quiescent state visible to other CPUs (but only for those - * RCU flavors in desperate need of a quiescent state, which will normally - * be none of them). Either way, do a lightweight quiescent state for - * all RCU flavors. + * dyntick-idle quiescent state visible to other CPUs, which will in + * some cases serve for expedited as well as normal grace periods. + * Either way, register a lightweight quiescent state. * * The barrier() calls are redundant in the common case when this is * called externally, but just in case this is called from within this @@ -564,7 +563,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); /* - * Return the root node of the specified rcu_state structure. + * Return the root node of the rcu_state structure. */ static struct rcu_node *rcu_get_root(void) { @@ -949,11 +948,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t) * Disable preemption to avoid false positives that could otherwise * happen due to the current CPU number being sampled, this task being * preempted, its old CPU being taken offline, resuming on some other CPU, - * then determining that its old CPU is now offline. Because there are - * multiple flavors of RCU, and because this function can be called in the - * midst of updating the flavors while a given CPU coming online or going - * offline, it is necessary to check all flavors. If any of the flavors - * believe that given CPU is online, it is considered to be online. + * then determining that its old CPU is now offline. * * Disable checking if in an NMI handler because we cannot safely * report errors from NMI handlers anyway. In addition, it is OK to use @@ -1563,11 +1558,10 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp) } /* - * Awaken the grace-period kthread for the specified flavor of RCU. - * Don't do a self-awaken, and don't bother awakening when there is - * nothing for the grace-period kthread to do (as in several CPUs - * raced to awaken, and we lost), and finally don't try to awaken - * a kthread that has not yet been created. + * Awaken the grace-period kthread. Don't do a self-awaken, and don't + * bother awakening when there is nothing for the grace-period kthread + * to do (as in several CPUs raced to awaken, and we lost), and finally + * don't try to awaken a kthread that has not yet been created. */ static void rcu_gp_kthread_wake(void) { @@ -2119,13 +2113,13 @@ static int __noreturn rcu_gp_kthread(void *unused) } /* - * Report a full set of quiescent states to the specified rcu_state data - * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period - * kthread if another grace period is required. Whether we wake - * the grace-period kthread or it awakens itself for the next round - * of quiescent-state forcing, that kthread will clean up after the - * just-completed grace period. Note that the caller must hold rnp->lock, - * which is released before return. + * Report a full set of quiescent states to the rcu_state data structure. + * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if + * another grace period is required. Whether we wake the grace-period + * kthread or it awakens itself for the next round of quiescent-state + * forcing, that kthread will clean up after the just-completed grace + * period. Note that the caller must hold rnp->lock, which is released + * before return. */ static void rcu_report_qs_rsp(unsigned long flags) __releases(rcu_get_root()->lock) @@ -2212,7 +2206,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, /* * Record a quiescent state for all tasks that were previously queued * on the specified rcu_node structure and that were blocking the current - * RCU grace period. The caller must hold the specified rnp->lock with + * RCU grace period. The caller must hold the corresponding rnp->lock with * irqs disabled, and this lock is released upon return, but irqs remain * disabled. */ @@ -2714,11 +2708,11 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused } /* - * Schedule RCU callback invocation. If the specified type of RCU - * does not support RCU priority boosting, just do a direct call, - * otherwise wake up the per-CPU kernel kthread. Note that because we - * are running on the current CPU with softirqs disabled, the - * rcu_cpu_kthread_task cannot disappear out from under us. + * Schedule RCU callback invocation. If the running implementation of RCU + * does not support RCU priority boosting, just do a direct call, otherwise + * wake up the per-CPU kernel kthread. Note that because we are running + * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task + * cannot disappear out from under us. */ static void invoke_rcu_callbacks(struct rcu_data *rdp) { @@ -2959,11 +2953,10 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu); /* * Check to see if there is any immediate RCU-related work to be done by - * the current CPU, for the specified type of RCU, returning 1 if so and - * zero otherwise. The checks are in order of increasing expense: checks - * that can be carried out against CPU-local state are performed first. - * However, we must check for CPU stalls first, else we might not get - * a chance. + * the current CPU, returning 1 if so and zero otherwise. The checks are + * in order of increasing expense: checks that can be carried out against + * CPU-local state are performed first. However, we must check for CPU + * stalls first, else we might not get a chance. */ static int rcu_pending(void) { @@ -3070,10 +3063,7 @@ static void rcu_barrier_func(void *unused) } } -/* - * Orchestrate the specified type of RCU barrier, waiting for all - * RCU callbacks of the specified type to complete. - */ +/* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete. */ static void _rcu_barrier(void) { int cpu; @@ -3393,7 +3383,7 @@ void rcu_report_dead(unsigned int cpu) struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ - /* QS for any half-done expedited RCU-sched GP. */ + /* QS for any half-done expedited grace period. */ preempt_disable(); rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); preempt_enable(); @@ -3482,7 +3472,7 @@ static int rcu_pm_notify(struct notifier_block *self, } /* - * Spawn the kthreads that handle each RCU flavor's grace periods. + * Spawn the kthreads that handle RCU's grace periods. */ static int __init rcu_spawn_gp_kthread(void) { @@ -3545,7 +3535,7 @@ void rcu_scheduler_starting(void) } /* - * Helper function for rcu_init() that initializes one rcu_state structure. + * Helper function for rcu_init() that initializes the rcu_state structure. */ static void __init rcu_init_one(void) { @@ -3707,7 +3697,7 @@ static void __init rcu_init_geometry(void) /* * Dump out the structure of the rcu_node combining tree associated - * with the rcu_state structure referenced by rsp. + * with the rcu_state structure. */ static void __init rcu_dump_rcu_node_tree(void) { -- cgit v1.2.3 From 8fa946d42855c2e3a481bf105aa2b25cefebe111 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in tree_exp.h Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 060bdb45cd95..78553a8fa3c6 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -601,8 +601,8 @@ static void wait_rcu_exp_gp(struct work_struct *wp) } /* - * Given an rcu_state pointer and a smp_call_function() handler, kick - * off the specified flavor of expedited grace period. + * Given a smp_call_function() handler, kick off the specified + * implementation of expedited grace period. */ static void _synchronize_rcu_expedited(smp_call_func_t func) { @@ -721,7 +721,7 @@ static void sync_rcu_exp_handler(void *unused) resched_cpu(rdp->cpu); } -/* PREEMPT=y, so no RCU-sched to clean up after. */ +/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */ static void sync_sched_exp_online_cleanup(int cpu) { } @@ -798,13 +798,13 @@ static void sync_sched_exp_online_cleanup(int cpu) } /* - * Because a context switch is a grace period for RCU-sched, any blocking - * grace-period wait automatically implies a grace period if there - * is only one CPU online at any point time during execution of either - * synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally - * incorrectly indicate that there are multiple CPUs online when there - * was in fact only one the whole time, as this just adds some overhead: - * RCU still operates correctly. + * Because a context switch is a grace period for !PREEMPT, any + * blocking grace-period wait automatically implies a grace period if + * there is only one CPU online at any point time during execution of + * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to + * occasionally incorrectly indicate that there are multiple CPUs online + * when there was in fact only one the whole time, as this just adds some + * overhead: RCU still operates correctly. */ static int rcu_blocking_is_gp(void) { @@ -823,7 +823,7 @@ void synchronize_rcu_expedited(void) RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_sched_expedited() in RCU read-side critical section"); + "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); /* If only one CPU, this is automatically a grace period. */ if (rcu_blocking_is_gp()) -- cgit v1.2.3 From 0ae86a272656b34edfe90a637363d10f470c65d8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 7 Jul 2018 18:12:26 -0700 Subject: rcu: Clean up flavor-related definitions and comments in tree_plugin.h Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index cd276c46bc14..cd4c1b979446 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -38,8 +38,7 @@ #include "../locking/rtmutex_common.h" /* - * Control variables for per-CPU and per-rcu_node kthreads. These - * handle all flavors of RCU. + * Control variables for per-CPU and per-rcu_node kthreads. */ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); @@ -826,8 +825,8 @@ static void rcu_flavor_check_callbacks(int user) * * Note that this guarantee implies further memory-ordering guarantees. * On systems with more than one CPU, when synchronize_rcu() returns, - * each CPU is guaranteed to have executed a full memory barrier since the - * end of its last RCU-sched read-side critical section whose beginning + * each CPU is guaranteed to have executed a full memory barrier since + * the end of its last RCU read-side critical section whose beginning * preceded the call to synchronize_rcu(). In addition, each CPU having * an RCU read-side critical section that extends beyond the return from * synchronize_rcu() is guaranteed to have executed a full memory barrier @@ -1069,7 +1068,7 @@ void synchronize_rcu(void) RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map), - "Illegal synchronize_rcu() in RCU-sched read-side critical section"); + "Illegal synchronize_rcu() in RCU read-side critical section"); if (rcu_blocking_is_gp()) return; if (rcu_gp_is_expedited()) @@ -1341,9 +1340,9 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu) } /* - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the - * RCU softirq used in flavors and configurations of RCU that do not - * support RCU priority boosting. + * Per-CPU kernel thread that invokes RCU callbacks. This replaces + * the RCU softirq used in configurations of RCU that do not support RCU + * priority boosting. */ static void rcu_cpu_kthread(unsigned int cpu) { @@ -1484,8 +1483,8 @@ static void rcu_prepare_kthreads(int cpu) * 1 if so. This function is part of the RCU implementation; it is -not- * an exported member of the RCU API. * - * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs - * any flavor of RCU. + * Because we not have RCU_FAST_NO_HZ, just check whether or not this + * CPU has RCU callbacks queued. */ int rcu_needs_cpu(u64 basemono, u64 *nextevt) { @@ -1551,9 +1550,9 @@ static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; module_param(rcu_idle_lazy_gp_delay, int, 0644); /* - * Try to advance callbacks for all flavors of RCU on the current CPU, but - * only if it has been awhile since the last time we did so. Afterwards, - * if there are any callbacks ready for immediate invocation, return true. + * Try to advance callbacks on the current CPU, but only if it has been + * awhile since the last time we did so. Afterwards, if there are any + * callbacks ready for immediate invocation, return true. */ static bool __maybe_unused rcu_try_advance_all_cbs(void) { @@ -1808,7 +1807,7 @@ static void print_cpu_stall_info_end(void) pr_err("\t"); } -/* Zero ->ticks_this_gp for all flavors of RCU. */ +/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ static void zero_cpu_stall_ticks(struct rcu_data *rdp) { rdp->ticks_this_gp = 0; @@ -1939,7 +1938,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype, } /* - * Does the specified CPU need an RCU callback for the specified flavor + * Does the specified CPU need an RCU callback for this invocation * of rcu_barrier()? */ static bool rcu_nocb_cpu_needs_barrier(int cpu) @@ -2419,9 +2418,8 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) /* * If the specified CPU is a no-CBs CPU that does not already have its - * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are - * brought online out of order, this can require re-organizing the - * leader-follower relationships. + * rcuo kthread, spawn it. If the CPUs are brought online out of order, + * this can require re-organizing the leader-follower relationships. */ static void rcu_spawn_one_nocb_kthread(int cpu) { @@ -2458,7 +2456,7 @@ static void rcu_spawn_one_nocb_kthread(int cpu) rdp_spawn->nocb_next_follower = rdp_old_leader; } - /* Spawn the kthread for this CPU and RCU flavor. */ + /* Spawn the kthread for this CPU. */ t = kthread_run(rcu_nocb_kthread, rdp_spawn, "rcuo%c/%d", rcu_state.abbr, cpu); BUG_ON(IS_ERR(t)); -- cgit v1.2.3 From 06462efc808c956f462ec5c3b5e10bbee0be2545 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 8 Jul 2018 10:58:37 -0700 Subject: rcu: Clean up flavor-related definitions and comments in update.c Signed-off-by: Paul E. McKenney --- kernel/rcu/update.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index ee366faecea6..fa089ead4bd6 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -332,7 +332,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, int i; int j; - /* Initialize and register callbacks for each flavor specified. */ + /* Initialize and register callbacks for each crcu_array element. */ for (i = 0; i < n; i++) { if (checktiny && (crcu_array[i] == call_rcu || @@ -697,19 +697,19 @@ static int __noreturn rcu_tasks_kthread(void *arg) /* * Wait for all pre-existing t->on_rq and t->nvcsw - * transitions to complete. Invoking synchronize_sched() + * transitions to complete. Invoking synchronize_rcu() * suffices because all these transitions occur with - * interrupts disabled. Without this synchronize_sched(), + * interrupts disabled. Without this synchronize_rcu(), * a read-side critical section that started before the * grace period might be incorrectly seen as having started * after the grace period. * - * This synchronize_sched() also dispenses with the + * This synchronize_rcu() also dispenses with the * need for a memory barrier on the first store to * ->rcu_tasks_holdout, as it forces the store to happen * after the beginning of the grace period. */ - synchronize_sched(); + synchronize_rcu(); /* * There were callbacks, so we need to wait for an @@ -736,7 +736,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) * This does only part of the job, ensuring that all * tasks that were previously exiting reach the point * where they have disabled preemption, allowing the - * later synchronize_sched() to finish the job. + * later synchronize_rcu() to finish the job. */ synchronize_srcu(&tasks_rcu_exit_srcu); @@ -786,20 +786,20 @@ static int __noreturn rcu_tasks_kthread(void *arg) * cause their RCU-tasks read-side critical sections to * extend past the end of the grace period. However, * because these ->nvcsw updates are carried out with - * interrupts disabled, we can use synchronize_sched() + * interrupts disabled, we can use synchronize_rcu() * to force the needed ordering on all such CPUs. * - * This synchronize_sched() also confines all + * This synchronize_rcu() also confines all * ->rcu_tasks_holdout accesses to be within the grace * period, avoiding the need for memory barriers for * ->rcu_tasks_holdout accesses. * - * In addition, this synchronize_sched() waits for exiting + * In addition, this synchronize_rcu() waits for exiting * tasks to complete their final preempt_disable() region * of execution, cleaning up after the synchronize_srcu() * above. */ - synchronize_sched(); + synchronize_rcu(); /* Invoke the callbacks. */ while (list) { -- cgit v1.2.3 From 395a2f097ebdddf2bfa286b6119f1b231025c2f1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 10 Jul 2018 14:00:14 -0700 Subject: rcu: Define rcu_all_qs() only in !PREEMPT builds Now that rcu_all_qs() is used only in !PREEMPT builds, move it to tree_plugin.h so that it is defined only in those builds. This in turn means that rcu_momentary_dyntick_idle() is only used in !PREEMPT builds, but it is simply marked __maybe_unused in order to keep it near the rest of the dyntick-idle code. Signed-off-by: Paul E. McKenney --- include/linux/rcutree.h | 2 ++ kernel/rcu/tree.c | 41 +---------------------------------------- kernel/rcu/tree_plugin.h | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d09a9abe9440..7f83179177d1 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -66,7 +66,9 @@ void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_is_watching(void); +#ifndef CONFIG_PREEMPT void rcu_all_qs(void); +#endif /* RCUtree hotplug events */ int rcutree_prepare_cpu(unsigned int cpu); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c8761e7c7c00..e140aaa78527 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -356,7 +356,7 @@ bool rcu_eqs_special_set(int cpu) * * The caller must have disabled interrupts and must not be idle. */ -static void rcu_momentary_dyntick_idle(void) +static void __maybe_unused rcu_momentary_dyntick_idle(void) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); int special; @@ -381,45 +381,6 @@ static int rcu_is_cpu_rrupt_from_idle(void) __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1; } -/* - * Register an urgently needed quiescent state. If there is an - * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight - * dyntick-idle quiescent state visible to other CPUs, which will in - * some cases serve for expedited as well as normal grace periods. - * Either way, register a lightweight quiescent state. - * - * The barrier() calls are redundant in the common case when this is - * called externally, but just in case this is called from within this - * file. - * - */ -void rcu_all_qs(void) -{ - unsigned long flags; - - if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs)) - return; - preempt_disable(); - /* Load rcu_urgent_qs before other flags. */ - if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { - preempt_enable(); - return; - } - this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); - barrier(); /* Avoid RCU read-side critical sections leaking down. */ - if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) { - local_irq_save(flags); - rcu_momentary_dyntick_idle(); - local_irq_restore(flags); - } - if (unlikely(raw_cpu_read(rcu_data.cpu_no_qs.b.exp))) - rcu_qs(); - this_cpu_inc(rcu_dynticks.rcu_qs_ctr); - barrier(); /* Avoid RCU read-side critical sections leaking up. */ - preempt_enable(); -} -EXPORT_SYMBOL_GPL(rcu_all_qs); - #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */ static long blimit = DEFAULT_RCU_BLIMIT; #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index cd4c1b979446..7add1c297500 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -947,6 +947,45 @@ static void rcu_qs(void) rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); } +/* + * Register an urgently needed quiescent state. If there is an + * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight + * dyntick-idle quiescent state visible to other CPUs, which will in + * some cases serve for expedited as well as normal grace periods. + * Either way, register a lightweight quiescent state. + * + * The barrier() calls are redundant in the common case when this is + * called externally, but just in case this is called from within this + * file. + * + */ +void rcu_all_qs(void) +{ + unsigned long flags; + + if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs)) + return; + preempt_disable(); + /* Load rcu_urgent_qs before other flags. */ + if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { + preempt_enable(); + return; + } + this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); + barrier(); /* Avoid RCU read-side critical sections leaking down. */ + if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) { + local_irq_save(flags); + rcu_momentary_dyntick_idle(); + local_irq_restore(flags); + } + if (unlikely(raw_cpu_read(rcu_data.cpu_no_qs.b.exp))) + rcu_qs(); + this_cpu_inc(rcu_dynticks.rcu_qs_ctr); + barrier(); /* Avoid RCU read-side critical sections leaking up. */ + preempt_enable(); +} +EXPORT_SYMBOL_GPL(rcu_all_qs); + /* * Note a PREEMPT=n context switch. The caller must have disabled interrupts. */ -- cgit v1.2.3 From dd46a7882c2c2006201e053ebf5e9ad761860cc0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 10 Jul 2018 18:37:30 -0700 Subject: rcu: Inline _rcu_barrier() into its sole remaining caller Because rcu_barrier() is a one-line wrapper function for _rcu_barrier() and because nothing else calls _rcu_barrier(), this commit inlines _rcu_barrier() into rcu_barrier(). Signed-off-by: Paul E. McKenney --- include/trace/events/rcu.h | 20 ++++++++-------- kernel/rcu/tree.c | 58 +++++++++++++++++++++------------------------- kernel/rcu/tree.h | 4 ++-- kernel/rcu/tree_plugin.h | 2 +- 4 files changed, 39 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index a8d07feff6a0..175e0bce22bd 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -705,20 +705,20 @@ TRACE_EVENT(rcu_torture_read, ); /* - * Tracepoint for _rcu_barrier() execution. The string "s" describes - * the _rcu_barrier phase: - * "Begin": _rcu_barrier() started. - * "EarlyExit": _rcu_barrier() piggybacked, thus early exit. - * "Inc1": _rcu_barrier() piggyback check counter incremented. - * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU - * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU. - * "OnlineQ": _rcu_barrier() found online CPU with callbacks. - * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks. + * Tracepoint for rcu_barrier() execution. The string "s" describes + * the rcu_barrier phase: + * "Begin": rcu_barrier() started. + * "EarlyExit": rcu_barrier() piggybacked, thus early exit. + * "Inc1": rcu_barrier() piggyback check counter incremented. + * "OfflineNoCB": rcu_barrier() found callback on never-online CPU + * "OnlineNoCB": rcu_barrier() found online no-CBs CPU. + * "OnlineQ": rcu_barrier() found online CPU with callbacks. + * "OnlineNQ": rcu_barrier() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. * "IRQNQ": An rcu_barrier_callback() callback found no callbacks. * "CB": An rcu_barrier_callback() invoked a callback, not the last. * "LastCB": An rcu_barrier_callback() invoked the last callback. - * "Inc2": _rcu_barrier() piggyback check counter incremented. + * "Inc2": rcu_barrier() piggyback check counter incremented. * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * is the count of remaining callbacks, and "done" is the piggybacking count. */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e140aaa78527..ce16b8da2c6f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2747,7 +2747,7 @@ static void rcu_leak_callback(struct rcu_head *rhp) /* * Helper function for call_rcu() and friends. The cpu argument will * normally be -1, indicating "currently running CPU". It may specify - * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier() + * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier() * is expected to specify a CPU. */ static void @@ -2981,27 +2981,27 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy) } /* - * Helper function for _rcu_barrier() tracing. If tracing is disabled, + * Helper function for rcu_barrier() tracing. If tracing is disabled, * the compiler is expected to optimize this away. */ -static void _rcu_barrier_trace(const char *s, int cpu, unsigned long done) +static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) { trace_rcu_barrier(rcu_state.name, s, cpu, atomic_read(&rcu_state.barrier_cpu_count), done); } /* - * RCU callback function for _rcu_barrier(). If we are last, wake - * up the task executing _rcu_barrier(). + * RCU callback function for rcu_barrier(). If we are last, wake + * up the task executing rcu_barrier(). */ static void rcu_barrier_callback(struct rcu_head *rhp) { if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { - _rcu_barrier_trace(TPS("LastCB"), -1, + rcu_barrier_trace(TPS("LastCB"), -1, rcu_state.barrier_sequence); complete(&rcu_state.barrier_completion); } else { - _rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence); + rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence); } } @@ -3012,33 +3012,40 @@ static void rcu_barrier_func(void *unused) { struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); - _rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); + rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); rdp->barrier_head.func = rcu_barrier_callback; debug_rcu_head_queue(&rdp->barrier_head); if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { atomic_inc(&rcu_state.barrier_cpu_count); } else { debug_rcu_head_unqueue(&rdp->barrier_head); - _rcu_barrier_trace(TPS("IRQNQ"), -1, + rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); } } -/* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete. */ -static void _rcu_barrier(void) +/** + * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. + * + * Note that this primitive does not necessarily wait for an RCU grace period + * to complete. For example, if there are no RCU callbacks queued anywhere + * in the system, then rcu_barrier() is within its rights to return + * immediately, without waiting for anything, much less an RCU grace period. + */ +void rcu_barrier(void) { int cpu; struct rcu_data *rdp; unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); - _rcu_barrier_trace(TPS("Begin"), -1, s); + rcu_barrier_trace(TPS("Begin"), -1, s); /* Take mutex to serialize concurrent rcu_barrier() requests. */ mutex_lock(&rcu_state.barrier_mutex); /* Did someone else do our work for us? */ if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { - _rcu_barrier_trace(TPS("EarlyExit"), -1, + rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); smp_mb(); /* caller's subsequent code after above check. */ mutex_unlock(&rcu_state.barrier_mutex); @@ -3047,7 +3054,7 @@ static void _rcu_barrier(void) /* Mark the start of the barrier operation. */ rcu_seq_start(&rcu_state.barrier_sequence); - _rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); + rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); /* * Initialize the count to one rather than to zero in order to @@ -3070,10 +3077,10 @@ static void _rcu_barrier(void) rdp = per_cpu_ptr(&rcu_data, cpu); if (rcu_is_nocb_cpu(cpu)) { if (!rcu_nocb_cpu_needs_barrier(cpu)) { - _rcu_barrier_trace(TPS("OfflineNoCB"), cpu, + rcu_barrier_trace(TPS("OfflineNoCB"), cpu, rcu_state.barrier_sequence); } else { - _rcu_barrier_trace(TPS("OnlineNoCB"), cpu, + rcu_barrier_trace(TPS("OnlineNoCB"), cpu, rcu_state.barrier_sequence); smp_mb__before_atomic(); atomic_inc(&rcu_state.barrier_cpu_count); @@ -3081,11 +3088,11 @@ static void _rcu_barrier(void) rcu_barrier_callback, cpu, 0); } } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { - _rcu_barrier_trace(TPS("OnlineQ"), cpu, + rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence); smp_call_function_single(cpu, rcu_barrier_func, NULL, 1); } else { - _rcu_barrier_trace(TPS("OnlineNQ"), cpu, + rcu_barrier_trace(TPS("OnlineNQ"), cpu, rcu_state.barrier_sequence); } } @@ -3102,25 +3109,12 @@ static void _rcu_barrier(void) wait_for_completion(&rcu_state.barrier_completion); /* Mark the end of the barrier operation. */ - _rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); + rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); rcu_seq_end(&rcu_state.barrier_sequence); /* Other rcu_barrier() invocations can now safely proceed. */ mutex_unlock(&rcu_state.barrier_mutex); } - -/** - * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. - * - * Note that this primitive does not necessarily wait for an RCU grace period - * to complete. For example, if there are no RCU callbacks queued anywhere - * in the system, then rcu_barrier() is within its rights to return - * immediately, without waiting for anything, much less an RCU grace period. - */ -void rcu_barrier(void) -{ - _rcu_barrier(); -} EXPORT_SYMBOL_GPL(rcu_barrier); /* diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 46452d3d0fad..8cf93ac277ec 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -222,7 +222,7 @@ struct rcu_data { /* Grace period that needs help */ /* from cond_resched(). */ - /* 5) _rcu_barrier(), OOM callbacks, and expediting. */ + /* 5) rcu_barrier(), OOM callbacks, and expediting. */ struct rcu_head barrier_head; int exp_dynticks_snap; /* Double-check need for IPI. */ @@ -328,7 +328,7 @@ struct rcu_state { atomic_t barrier_cpu_count; /* # CPUs waiting on. */ struct completion barrier_completion; /* Wake at barrier end. */ unsigned long barrier_sequence; /* ++ at start and end of */ - /* _rcu_barrier(). */ + /* rcu_barrier(). */ /* End of fields guarded by barrier_mutex. */ struct mutex exp_mutex; /* Serialize expedited GP. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 7add1c297500..beaaca7a11f4 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1993,7 +1993,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu) * There needs to be a barrier before this function is called, * but associated with a prior determination that no more * callbacks would be posted. In the worst case, the first - * barrier in _rcu_barrier() suffices (but the caller cannot + * barrier in rcu_barrier() suffices (but the caller cannot * necessarily rely on this, not a substitute for the caller * getting the concurrency design right!). There must also be * a barrier between the following load an posting of a callback -- cgit v1.2.3 From 92aa39e9dc77481b90cbef25e547d66cab901496 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 9 Jul 2018 13:47:30 -0700 Subject: rcu: Make need_resched() respond to urgent RCU-QS needs The per-CPU rcu_dynticks.rcu_urgent_qs variable communicates an urgent need for an RCU quiescent state from the force-quiescent-state processing within the grace-period kthread to context switches and to cond_resched(). Unfortunately, such urgent needs are not communicated to need_resched(), which is sometimes used to decide when to invoke cond_resched(), for but one example, within the KVM vcpu_run() function. As of v4.15, this can result in synchronize_sched() being delayed by up to ten seconds, which can be problematic, to say nothing of annoying. This commit therefore checks rcu_dynticks.rcu_urgent_qs from within rcu_check_callbacks(), which is invoked from the scheduling-clock interrupt handler. If the current task is not an idle task and is not executing in usermode, a context switch is forced, and either way, the rcu_dynticks.rcu_urgent_qs variable is set to false. If the current task is an idle task, then RCU's dyntick-idle code will detect the quiescent state, so no further action is required. Similarly, if the task is executing in usermode, other code in rcu_check_callbacks() and its called functions will report the corresponding quiescent state. Reported-by: Marius Hillenbrand Reported-by: David Woodhouse Suggested-by: Peter Zijlstra Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ce16b8da2c6f..f47ac7a4719f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2471,6 +2471,15 @@ void rcu_check_callbacks(int user) { trace_rcu_utilization(TPS("Start scheduler-tick")); raw_cpu_inc(rcu_data.ticks_this_gp); + /* The load-acquire pairs with the store-release setting to true. */ + if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { + /* Idle and userspace execution already are quiescent states. */ + if (!is_idle_task(current) && !user) { + set_tsk_need_resched(current); + set_preempt_need_resched(); + } + __this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); + } rcu_flavor_check_callbacks(user); if (rcu_pending()) invoke_rcu_core(); -- cgit v1.2.3 From a0ef9ec24144799b5b47fa54c38f9a0f9dfe9a59 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 9 Jul 2018 15:50:16 -0700 Subject: rcu: Provide improved interrupt-from-idle check in rcu_check_callbacks() The patch making need_resched() respond to urgent RCU-QS needs used is_idle_task(current) to detect an interrupt from idle, which does work reasonably, but is (in theory at least) vulnerable to loops containing need_resched() invoked from within RCU_NONIDLE() or its tracepoint equivalent. This commit therefore moves rcu_is_cpu_rrupt_from_idle() to a place from which rcu_check_callbacks() can invoke it and replaces the is_idle_task(current) with rcu_is_cpu_rrupt_from_idle(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f47ac7a4719f..77d2cbf7c831 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2474,7 +2474,7 @@ void rcu_check_callbacks(int user) /* The load-acquire pairs with the store-release setting to true. */ if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { /* Idle and userspace execution already are quiescent states. */ - if (!is_idle_task(current) && !user) { + if (!rcu_is_cpu_rrupt_from_idle() && !user) { set_tsk_need_resched(current); set_preempt_need_resched(); } -- cgit v1.2.3 From c116dba68d19246639e4fdb8c75756c67d6d268f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Jul 2018 12:09:14 -0700 Subject: rcutorture: Dump reader protection sequence if failures or close calls Now that RCU can have readers with multiple segments, it is quite possible that a specific sequence of reader segments might result in an rcutorture failure (reader spans a full grace period as detected by one of the grace-period primitives) or an rcutorture close call (reader potentially spans a full grace period based on reading out the RCU implementation's grace-period counter, but with no ordering). In such cases, it would clearly ease debugging if the offending specific sequence was known. For the first reader encountering a failure or a close call, this commit therefore dumps out the segments, delay durations, and whether or not the reader was preempted. Signed-off-by: Paul E. McKenney [ paulmck: Mark variables static, as suggested by kbuild test robot. ] --- kernel/rcu/rcutorture.c | 119 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 96 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 294b3f6b7eb6..1141e0d84ff1 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -78,6 +78,7 @@ MODULE_AUTHOR("Paul E. McKenney and Josh Triplett get_gp_seq(); ts = rcu_trace_clock_local(); mdelay(longdelay_ms); + rtrsp->rt_delay_ms = longdelay_ms; completed = cur_ops->get_gp_seq(); do_trace_rcu_torture_read(cur_ops->name, NULL, ts, started, completed); } - if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) + if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { udelay(shortdelay_us); + rtrsp->rt_delay_us = shortdelay_us; + } if (!preempt_count() && - !(torture_random(rrsp) % (nrealreaders * 500))) + !(torture_random(rrsp) % (nrealreaders * 500))) { torture_preempt_schedule(); /* QS only if preemptible. */ + rtrsp->rt_preempted = true; + } } static void rcu_torture_read_unlock(int idx) __releases(RCU) @@ -494,7 +514,8 @@ static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) return srcu_read_lock(srcu_ctlp); } -static void srcu_read_delay(struct torture_random_state *rrsp) +static void +srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) { long delay; const long uspertick = 1000000 / HZ; @@ -504,10 +525,12 @@ static void srcu_read_delay(struct torture_random_state *rrsp) delay = torture_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); - if (!delay && in_task()) + if (!delay && in_task()) { schedule_timeout_interruptible(longdelay); - else - rcu_read_delay(rrsp); + rtrsp->rt_delay_jiffies = longdelay; + } else { + rcu_read_delay(rrsp, rtrsp); + } } static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) @@ -1120,7 +1143,8 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp) * change, do a ->read_delay(). */ static void rcutorture_one_extend(int *readstate, int newstate, - struct torture_random_state *trsp) + struct torture_random_state *trsp, + struct rt_read_seg *rtrsp) { int idxnew = -1; int idxold = *readstate; @@ -1129,6 +1153,7 @@ static void rcutorture_one_extend(int *readstate, int newstate, WARN_ON_ONCE(idxold < 0); WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); + rtrsp->rt_readstate = newstate; /* First, put new protection in place to avoid critical-section gap. */ if (statesnew & RCUTORTURE_RDR_BH) @@ -1160,7 +1185,7 @@ static void rcutorture_one_extend(int *readstate, int newstate, /* Delay if neither beginning nor end and there was a change. */ if ((statesnew || statesold) && *readstate && newstate) - cur_ops->read_delay(trsp); + cur_ops->read_delay(trsp, rtrsp); /* Update the reader state. */ if (idxnew == -1) @@ -1189,11 +1214,11 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) { int mask = rcutorture_extend_mask_max(); unsigned long randmask1 = torture_random(trsp) >> 8; - unsigned long randmask2 = randmask1 >> 1; + unsigned long randmask2 = randmask1 >> 3; WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); - /* Half the time lots of bits, half the time only one bit. */ - if (randmask1 & 0x1) + /* Most of the time lots of bits, half the time only one bit. */ + if (!(randmask1 & 0x7)) mask = mask & randmask2; else mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); @@ -1213,20 +1238,25 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) * Do a randomly selected number of extensions of an existing RCU read-side * critical section. */ -static void rcutorture_loop_extend(int *readstate, - struct torture_random_state *trsp) +static struct rt_read_seg * +rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, + struct rt_read_seg *rtrsp) { int i; + int j; int mask = rcutorture_extend_mask_max(); WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ if (!((mask - 1) & mask)) - return; /* Current RCU reader not extendable. */ - i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS; - while (i--) { + return rtrsp; /* Current RCU reader not extendable. */ + /* Bias towards larger numbers of loops. */ + i = (torture_random(trsp) >> 3); + i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; + for (j = 0; j < i; j++) { mask = rcutorture_extend_mask(*readstate, trsp); - rcutorture_one_extend(readstate, mask, trsp); + rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); } + return &rtrsp[j]; } /* @@ -1236,16 +1266,20 @@ static void rcutorture_loop_extend(int *readstate, */ static bool rcu_torture_one_read(struct torture_random_state *trsp) { + int i; unsigned long started; unsigned long completed; int newstate; struct rcu_torture *p; int pipe_count; int readstate = 0; + struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; + struct rt_read_seg *rtrsp = &rtseg[0]; + struct rt_read_seg *rtrsp1; unsigned long long ts; newstate = rcutorture_extend_mask(readstate, trsp); - rcutorture_one_extend(&readstate, newstate, trsp); + rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, @@ -1255,12 +1289,12 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) torturing_tasks()); if (p == NULL) { /* Wait for rcu_torture_writer to get underway */ - rcutorture_one_extend(&readstate, 0, trsp); + rcutorture_one_extend(&readstate, 0, trsp, rtrsp); return false; } if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); - rcutorture_loop_extend(&readstate, trsp); + rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); preempt_disable(); pipe_count = p->rtort_pipe_count; if (pipe_count > RCU_TORTURE_PIPE_LEN) { @@ -1281,8 +1315,17 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) } __this_cpu_inc(rcu_torture_batch[completed]); preempt_enable(); - rcutorture_one_extend(&readstate, 0, trsp); + rcutorture_one_extend(&readstate, 0, trsp, rtrsp); WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); + + /* If error or close call, record the sequence of reader protections. */ + if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { + i = 0; + for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) + err_segs[i++] = *rtrsp1; + rt_read_nsegs = i; + } + return true; } @@ -1747,6 +1790,7 @@ static enum cpuhp_state rcutor_hp; static void rcu_torture_cleanup(void) { + int firsttime; int flags = 0; unsigned long gp_seq = 0; int i; @@ -1800,6 +1844,33 @@ rcu_torture_cleanup(void) rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ + if (err_segs_recorded) { + pr_alert("Failure/close-call rcutorture reader segments:\n"); + if (rt_read_nsegs == 0) + pr_alert("\t: No segments recorded!!!\n"); + firsttime = 1; + for (i = 0; i < rt_read_nsegs; i++) { + pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); + if (err_segs[i].rt_delay_jiffies != 0) { + pr_cont("%s%ldjiffies", firsttime ? "" : "+", + err_segs[i].rt_delay_jiffies); + firsttime = 0; + } + if (err_segs[i].rt_delay_ms != 0) { + pr_cont("%s%ldms", firsttime ? "" : "+", + err_segs[i].rt_delay_ms); + firsttime = 0; + } + if (err_segs[i].rt_delay_us != 0) { + pr_cont("%s%ldus", firsttime ? "" : "+", + err_segs[i].rt_delay_us); + firsttime = 0; + } + pr_cont("%s\n", + err_segs[i].rt_preempted ? "preempted" : ""); + + } + } if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); else if (torture_onoff_failures()) @@ -1943,6 +2014,8 @@ rcu_torture_init(void) per_cpu(rcu_torture_batch, cpu)[i] = 0; } } + err_segs_recorded = 0; + rt_read_nsegs = 0; /* Start up the kthreads. */ -- cgit v1.2.3 From c5bacd94173ec49d7dce7ac7c64bbdde3a6e69ae Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 20 Jul 2018 14:18:23 -0700 Subject: rcu: Motivate Tiny RCU forward progress If a long-running CPU-bound in-kernel task invokes call_rcu(), the callback won't be invoked until the next context switch. If there are no other runnable tasks (which is not an uncommon situation on deep embedded systems), the callback might never be invoked. This commit therefore causes rcu_check_callbacks() to ask the scheduler for a context switch if there are callbacks posted that are still waiting for a grace period. Suggested-by: Peter Zijlstra Signed-off-by: Paul E. McKenney --- kernel/rcu/tiny.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index a77853b73bfe..1745d30e170e 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -78,8 +78,12 @@ void rcu_qs(void) */ void rcu_check_callbacks(int user) { - if (user) + if (user) { rcu_qs(); + } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { + set_tsk_need_resched(current); + set_preempt_need_resched(); + } } /* Invoke the RCU callbacks whose grace period has elapsed. */ -- cgit v1.2.3 From 7e28c5af4ef6b539334aa5de40feca0c041c94df Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 11 Jul 2018 08:09:28 -0700 Subject: rcu: Eliminate ->rcu_qs_ctr from the rcu_dynticks structure The ->rcu_qs_ctr counter was intended to allow providing a lightweight report of a quiescent state to all RCU flavors. But now that there is only one flavor of RCU in any one running kernel, there is no point in having this feature. This commit therefore removes the ->rcu_qs_ctr field from the rcu_dynticks structure and the ->rcu_qs_ctr_snap field from the rcu_data structure. This results in the "rqc" option to the rcu_fqs trace event no longer being used, so this commit also removes the "rqc" description from the header comment. While in the neighborhood, this commit also causes the forward-progress request .rcu_need_heavy_qs be set one jiffies_till_sched_qs interval later in the grace period than the first setting of .rcu_urgent_qs. Signed-off-by: Paul E. McKenney --- include/trace/events/rcu.h | 5 ++--- kernel/rcu/tree.c | 52 +++++++++++++--------------------------------- kernel/rcu/tree.h | 3 --- kernel/rcu/tree_plugin.h | 5 +---- 4 files changed, 17 insertions(+), 48 deletions(-) (limited to 'kernel') diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 175e0bce22bd..f0c4d10e614b 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -393,9 +393,8 @@ TRACE_EVENT(rcu_quiescent_state_report, * Tracepoint for quiescent states detected by force_quiescent_state(). * These trace events include the type of RCU, the grace-period number * that was blocked by the CPU, the CPU itself, and the type of quiescent - * state, which can be "dti" for dyntick-idle mode, "kick" when kicking - * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the - * CPU got a quiescent state via its rcu_qs_ctr. + * state, which can be "dti" for dyntick-idle mode or "kick" when kicking + * a CPU that has been in dyntick-idle mode for too long. */ TRACE_EVENT(rcu_fqs, diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 77d2cbf7c831..bc42c600027c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1018,25 +1018,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) return 1; } - /* - * Has this CPU encountered a cond_resched() since the beginning - * of the grace period? For this to be the case, the CPU has to - * have noticed the current grace period. This might not be the - * case for nohz_full CPUs looping in the kernel. - */ - jtsq = jiffies_till_sched_qs; - ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); - if (time_after(jiffies, rcu_state.gp_start + jtsq) && - READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && - rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) { - trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("rqc")); - rcu_gpnum_ovf(rnp, rdp); - return 1; - } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { - /* Load rcu_qs_ctr before store to rcu_urgent_qs. */ - smp_store_release(ruqp, true); - } - /* If waiting too long on an offline CPU, complain. */ if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && time_after(jiffies, rcu_state.gp_start + HZ)) { @@ -1060,29 +1041,27 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) /* * A CPU running for an extended time within the kernel can - * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode, - * even context-switching back and forth between a pair of - * in-kernel CPU-bound tasks cannot advance grace periods. - * So if the grace period is old enough, make the CPU pay attention. - * Note that the unsynchronized assignments to the per-CPU - * rcu_need_heavy_qs variable are safe. Yes, setting of - * bits can be lost, but they will be set again on the next - * force-quiescent-state pass. So lost bit sets do not result - * in incorrect behavior, merely in a grace period lasting - * a few jiffies longer than it might otherwise. Because - * there are at most four threads involved, and because the - * updates are only once every few jiffies, the probability of - * lossage (and thus of slight grace-period extension) is - * quite low. + * delay RCU grace periods: (1) At age jiffies_till_sched_qs, + * set .rcu_urgent_qs, (2) At age 2*jiffies_till_sched_qs, set + * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the + * unsynchronized assignments to the per-CPU rcu_need_heavy_qs + * variable are safe because the assignments are repeated if this + * CPU failed to pass through a quiescent state. This code + * also checks .jiffies_resched in case jiffies_till_sched_qs + * is set way high. */ + jtsq = jiffies_till_sched_qs; + ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); if (!READ_ONCE(*rnhqp) && - (time_after(jiffies, rcu_state.gp_start + jtsq) || + (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || time_after(jiffies, rcu_state.jiffies_resched))) { WRITE_ONCE(*rnhqp, true); /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ smp_store_release(ruqp, true); rcu_state.jiffies_resched += jtsq; /* Re-enable beating. */ + } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { + WRITE_ONCE(*ruqp, true); } /* @@ -1091,7 +1070,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) * see if the CPU is getting hammered with interrupts, but only * once per grace period, just to keep the IPIs down to a dull roar. */ - if (jiffies - rcu_state.gp_start > rcu_jiffies_till_stall_check() / 2) { + if (time_after(jiffies, rcu_state.jiffies_resched)) { resched_cpu(rdp->cpu); if (IS_ENABLED(CONFIG_IRQ_WORK) && !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && @@ -1669,7 +1648,6 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); need_gp = !!(rnp->qsmask & rdp->grpmask); rdp->cpu_no_qs.b.norm = need_gp; - rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); rdp->core_needs_qs = need_gp; zero_cpu_stall_ticks(rdp); } @@ -2230,7 +2208,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) * within the current grace period. */ rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ - rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return; } @@ -3213,7 +3190,6 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->gp_seq = rnp->gp_seq; rdp->gp_seq_needed = rnp->gp_seq; rdp->cpu_no_qs.b.norm = true; - rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); rdp->core_needs_qs = false; rdp->rcu_iw_pending = false; rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8cf93ac277ec..4866fa44ab0b 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -42,7 +42,6 @@ struct rcu_dynticks { long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ atomic_t dynticks; /* Even value for idle, else odd. */ bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ - unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */ bool rcu_urgent_qs; /* GP old need light quiescent state. */ #ifdef CONFIG_RCU_FAST_NO_HZ bool all_lazy; /* Are all CPU's CBs lazy? */ @@ -188,8 +187,6 @@ struct rcu_data { /* 1) quiescent-state and grace-period handling : */ unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */ unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */ - unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ - /* for rcu_all_qs() invocations. */ union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ bool core_needs_qs; /* Core waits for quiesc state. */ bool beenonline; /* CPU online at least once. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index beaaca7a11f4..726d57708849 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -978,9 +978,7 @@ void rcu_all_qs(void) rcu_momentary_dyntick_idle(); local_irq_restore(flags); } - if (unlikely(raw_cpu_read(rcu_data.cpu_no_qs.b.exp))) - rcu_qs(); - this_cpu_inc(rcu_dynticks.rcu_qs_ctr); + rcu_qs(); barrier(); /* Avoid RCU read-side critical sections leaking up. */ preempt_enable(); } @@ -1000,7 +998,6 @@ void rcu_note_context_switch(bool preempt) this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) rcu_momentary_dyntick_idle(); - this_cpu_inc(rcu_dynticks.rcu_qs_ctr); if (!preempt) rcu_tasks_qs(current); out: -- cgit v1.2.3 From 74de6960c99d8df0d09fb29a7b014cb9c5571e2b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Jul 2018 15:28:09 -0700 Subject: rcu: Provide functions for determining if call_rcu() has been invoked This commit adds rcu_head_init() and rcu_head_after_call_rcu() functions to help RCU users detect when another CPU has passed the specified rcu_head structure and function to call_rcu(). The rcu_head_init() should be invoked before making the structure visible to RCU readers, and then the rcu_head_after_call_rcu() may be invoked from within an RCU read-side critical section on an rcu_head structure that was obtained during a traversal of the data structure in question. The rcu_head_after_call_rcu() function will return true if the rcu_head structure has already been passed (with the specified function) to call_rcu(), otherwise it will return false. If rcu_head_init() has not been invoked on the rcu_head structure or if the rcu_head (AKA callback) has already been invoked, then rcu_head_after_call_rcu() will do WARN_ON_ONCE(). Reported-by: NeilBrown Signed-off-by: Paul E. McKenney [ paulmck: Apply neilb naming feedback. ] --- include/linux/rcupdate.h | 40 ++++++++++++++++++++++++++++++++++++++++ kernel/rcu/rcu.h | 5 ++++- 2 files changed, 44 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e4f821165d0b..4db8bcacc51a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -857,6 +857,46 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ +/* Has the specified rcu_head structure been handed to call_rcu()? */ + +/* + * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() + * @rhp: The rcu_head structure to initialize. + * + * If you intend to invoke rcu_head_after_call_rcu() to test whether a + * given rcu_head structure has already been passed to call_rcu(), then + * you must also invoke this rcu_head_init() function on it just after + * allocating that structure. Calls to this function must not race with + * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. + */ +static inline void rcu_head_init(struct rcu_head *rhp) +{ + rhp->func = (rcu_callback_t)~0L; +} + +/* + * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? + * @rhp: The rcu_head structure to test. + * @func: The function passed to call_rcu() along with @rhp. + * + * Returns @true if the @rhp has been passed to call_rcu() with @func, + * and @false otherwise. Emits a warning in any other case, including + * the case where @rhp has already been invoked after a grace period. + * Calls to this function must not race with callback invocation. One way + * to avoid such races is to enclose the call to rcu_head_after_call_rcu() + * in an RCU read-side critical section that includes a read-side fetch + * of the pointer to the structure containing @rhp. + */ +static inline bool +rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) +{ + if (READ_ONCE(rhp->func) == f) + return true; + WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L); + return false; +} + + /* Transitional pre-consolidation compatibility definitions. */ static inline void synchronize_rcu_bh(void) diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 5dec94509a7e..4c56c1d98fb3 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -224,6 +224,7 @@ void kfree(const void *); */ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) { + rcu_callback_t f; unsigned long offset = (unsigned long)head->func; rcu_lock_acquire(&rcu_callback_map); @@ -234,7 +235,9 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) return true; } else { RCU_TRACE(trace_rcu_invoke_callback(rn, head);) - head->func(head); + f = head->func; + WRITE_ONCE(head->func, (rcu_callback_t)0L); + f(head); rcu_lock_release(&rcu_callback_map); return false; } -- cgit v1.2.3 From c06aed0e31008a248c1841f1b7fc80e9ee242a31 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 25 Jul 2018 11:25:23 -0700 Subject: rcu: Compute jiffies_till_sched_qs from other kernel parameters The jiffies_till_sched_qs value used to determine how old a grace period must be before RCU enlists the help of the scheduler to force a quiescent state on the holdout CPU. Currently, this defaults to HZ/10 regardless of system size and may be set only at boot time. This can be a problem for very large systems, because if the values of the jiffies_till_first_fqs and jiffies_till_next_fqs kernel parameters are left at their defaults, they are calculated to increase as the number of CPUs actually configured on the system increases. Thus, on a sufficiently large system, RCU would enlist the help of the scheduler before the grace-period kthread had a chance to scan for idle CPUs, which wastes CPU time. This commit therefore allows jiffies_till_sched_qs to be set, if desired, but if left as default, computes is as jiffies_till_first_fqs plus twice jiffies_till_next_fqs, thus allowing three force-quiescent-state scans for idle CPUs. This scales with the number of CPUs, providing sensible default values. Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 9 +++- kernel/rcu/tree.c | 63 ++++++++++++++++++------- kernel/rcu/tree_plugin.h | 2 + 3 files changed, 57 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index aa96e669bcb8..6153fb62abe1 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3595,7 +3595,14 @@ Set required age in jiffies for a given grace period before RCU starts soliciting quiescent-state help from - rcu_note_context_switch(). + rcu_note_context_switch(). If not specified, the + kernel will calculate a value based on the most + recent settings of rcutree.jiffies_till_first_fqs + and rcutree.jiffies_till_next_fqs. + This calculated value may be viewed in + rcutree.jiffies_to_sched_qs. Any attempt to + set rcutree.jiffies_to_sched_qs will be + cheerfully overwritten. rcutree.jiffies_till_first_fqs= [KNL] Set delay from grace-period initialization to diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index bc42c600027c..6bd0951a5f3a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -396,13 +396,47 @@ static ulong jiffies_till_first_fqs = ULONG_MAX; static ulong jiffies_till_next_fqs = ULONG_MAX; static bool rcu_kick_kthreads; +/* + * How long the grace period must be before we start recruiting + * quiescent-state help from rcu_note_context_switch(). + */ +static ulong jiffies_till_sched_qs = ULONG_MAX; +module_param(jiffies_till_sched_qs, ulong, 0444); +static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */ +module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ + +/* + * Make sure that we give the grace-period kthread time to detect any + * idle CPUs before taking active measures to force quiescent states. + * However, don't go below 100 milliseconds, adjusted upwards for really + * large systems. + */ +static void adjust_jiffies_till_sched_qs(void) +{ + unsigned long j; + + /* If jiffies_till_sched_qs was specified, respect the request. */ + if (jiffies_till_sched_qs != ULONG_MAX) { + WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); + return; + } + j = READ_ONCE(jiffies_till_first_fqs) + + 2 * READ_ONCE(jiffies_till_next_fqs); + if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) + j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; + pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); + WRITE_ONCE(jiffies_to_sched_qs, j); +} + static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) { ulong j; int ret = kstrtoul(val, 0, &j); - if (!ret) + if (!ret) { WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); + adjust_jiffies_till_sched_qs(); + } return ret; } @@ -411,8 +445,10 @@ static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param ulong j; int ret = kstrtoul(val, 0, &j); - if (!ret) + if (!ret) { WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); + adjust_jiffies_till_sched_qs(); + } return ret; } @@ -430,13 +466,6 @@ module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_fi module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); module_param(rcu_kick_kthreads, bool, 0644); -/* - * How long the grace period must be before we start recruiting - * quiescent-state help from rcu_note_context_switch(). - */ -static ulong jiffies_till_sched_qs = HZ / 10; -module_param(jiffies_till_sched_qs, ulong, 0444); - static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); static void force_quiescent_state(void); static int rcu_pending(void); @@ -1041,16 +1070,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) /* * A CPU running for an extended time within the kernel can - * delay RCU grace periods: (1) At age jiffies_till_sched_qs, - * set .rcu_urgent_qs, (2) At age 2*jiffies_till_sched_qs, set + * delay RCU grace periods: (1) At age jiffies_to_sched_qs, + * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the * unsynchronized assignments to the per-CPU rcu_need_heavy_qs * variable are safe because the assignments are repeated if this * CPU failed to pass through a quiescent state. This code - * also checks .jiffies_resched in case jiffies_till_sched_qs + * also checks .jiffies_resched in case jiffies_to_sched_qs * is set way high. */ - jtsq = jiffies_till_sched_qs; + jtsq = READ_ONCE(jiffies_to_sched_qs); ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); if (!READ_ONCE(*rnhqp) && @@ -1236,7 +1265,7 @@ static void print_other_cpu_stall(unsigned long gp_seq) gpa = READ_ONCE(rcu_state.gp_activity); pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", rcu_state.name, j - gpa, j, gpa, - jiffies_till_next_fqs, + READ_ONCE(jiffies_till_next_fqs), rcu_get_root()->qsmask); /* In this case, the current CPU might be at fault. */ sched_show_task(current); @@ -1874,7 +1903,7 @@ static void rcu_gp_fqs_loop(void) struct rcu_node *rnp = rcu_get_root(); first_gp_fqs = true; - j = jiffies_till_first_fqs; + j = READ_ONCE(jiffies_till_first_fqs); ret = 0; for (;;) { if (!ret) { @@ -1908,7 +1937,7 @@ static void rcu_gp_fqs_loop(void) cond_resched_tasks_rcu_qs(); WRITE_ONCE(rcu_state.gp_activity, jiffies); ret = 0; /* Force full wait till next FQS. */ - j = jiffies_till_next_fqs; + j = READ_ONCE(jiffies_till_next_fqs); } else { /* Deal with stray signal. */ cond_resched_tasks_rcu_qs(); @@ -3579,6 +3608,8 @@ static void __init rcu_init_geometry(void) jiffies_till_first_fqs = d; if (jiffies_till_next_fqs == ULONG_MAX) jiffies_till_next_fqs = d; + if (jiffies_till_sched_qs == ULONG_MAX) + adjust_jiffies_till_sched_qs(); /* If the compile-time values are accurate, just leave. */ if (rcu_fanout_leaf == RCU_FANOUT_LEAF && diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 726d57708849..7ec366268e2e 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -105,6 +105,8 @@ static void __init rcu_bootup_announce_oddness(void) pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); if (jiffies_till_next_fqs != ULONG_MAX) pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); + if (jiffies_till_sched_qs != ULONG_MAX) + pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); if (rcu_kick_kthreads) pr_info("\tKick kthreads if too-long grace period.\n"); if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) -- cgit v1.2.3 From d3052109c0bc9e536d17d627ae628ed8ceb6928c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 25 Jul 2018 11:49:47 -0700 Subject: rcu: More aggressively enlist scheduler aid for nohz_full CPUs Because nohz_full CPUs can leave the scheduler-clock interrupt disabled even when in kernel mode, RCU cannot rely on rcu_check_callbacks() to enlist the scheduler's aid in extracting a quiescent state from such CPUs. This commit therefore more aggressively uses resched_cpu() on nohz_full CPUs that fail to pass through a quiescent state in a timely manner. By default, the resched_cpu() beating starts 300 milliseconds into the quiescent state. While in the neighborhood, add a ->last_fqs_resched field to the rcu_data structure in order to rate-limit resched_cpu() calls from the RCU grace-period kthread. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 32 ++++++++++++++++++++++++++------ kernel/rcu/tree.h | 1 + kernel/rcu/tree_plugin.h | 1 + 3 files changed, 28 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 6bd0951a5f3a..96731f62594a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -61,6 +61,7 @@ #include #include #include +#include #include "tree.h" #include "rcu.h" @@ -1088,19 +1089,38 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) WRITE_ONCE(*rnhqp, true); /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ smp_store_release(ruqp, true); - rcu_state.jiffies_resched += jtsq; /* Re-enable beating. */ } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { WRITE_ONCE(*ruqp, true); } /* - * If more than halfway to RCU CPU stall-warning time, do a - * resched_cpu() to try to loosen things up a bit. Also check to - * see if the CPU is getting hammered with interrupts, but only - * once per grace period, just to keep the IPIs down to a dull roar. + * NO_HZ_FULL CPUs can run in-kernel without rcu_check_callbacks! + * The above code handles this, but only for straight cond_resched(). + * And some in-kernel loops check need_resched() before calling + * cond_resched(), which defeats the above code for CPUs that are + * running in-kernel with scheduling-clock interrupts disabled. + * So hit them over the head with the resched_cpu() hammer! */ - if (time_after(jiffies, rcu_state.jiffies_resched)) { + if (tick_nohz_full_cpu(rdp->cpu) && + time_after(jiffies, + READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) { resched_cpu(rdp->cpu); + WRITE_ONCE(rdp->last_fqs_resched, jiffies); + } + + /* + * If more than halfway to RCU CPU stall-warning time, invoke + * resched_cpu() more frequently to try to loosen things up a bit. + * Also check to see if the CPU is getting hammered with interrupts, + * but only once per grace period, just to keep the IPIs down to + * a dull roar. + */ + if (time_after(jiffies, rcu_state.jiffies_resched)) { + if (time_after(jiffies, + READ_ONCE(rdp->last_fqs_resched) + jtsq)) { + resched_cpu(rdp->cpu); + WRITE_ONCE(rdp->last_fqs_resched, jiffies); + } if (IS_ENABLED(CONFIG_IRQ_WORK) && !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && (rnp->ffmask & rdp->grpmask)) { diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4866fa44ab0b..8f053bb1eec8 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -260,6 +260,7 @@ struct rcu_data { short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */ unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ short rcu_onl_gp_flags; /* ->gp_flags at last online. */ + unsigned long last_fqs_resched; /* Time of last rcu_resched(). */ int cpu; }; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 7ec366268e2e..1e80a0da7924 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1850,6 +1850,7 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp) { rdp->ticks_this_gp = 0; rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); + WRITE_ONCE(rdp->last_fqs_resched, jiffies); } #ifdef CONFIG_RCU_NOCB_CPU -- cgit v1.2.3 From fced9c8cfe6bc8a26dbbf785927aa673c83a7a35 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 26 Jul 2018 13:44:00 -0700 Subject: rcu: Avoid resched_cpu() when rescheduling the current CPU The resched_cpu() interface is quite handy, but it does acquire the specified CPU's runqueue lock, which does not come for free. This commit therefore substitutes the following when directing resched_cpu() at the current CPU: set_tsk_need_resched(current); set_preempt_need_resched(); Signed-off-by: Paul E. McKenney Cc: Peter Zijlstra --- kernel/rcu/tree.c | 11 +++++++---- kernel/rcu/tree_exp.h | 17 ++++++++++------- kernel/rcu/tree_plugin.h | 6 ++++-- 3 files changed, 21 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 96731f62594a..92346ab8077d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1354,7 +1354,8 @@ static void print_cpu_stall(void) * progress and it could be we're stuck in kernel space without context * switches for an entirely unreasonable amount of time. */ - resched_cpu(smp_processor_id()); + set_tsk_need_resched(current); + set_preempt_need_resched(); } static void check_cpu_stall(struct rcu_data *rdp) @@ -2675,10 +2676,12 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused WARN_ON_ONCE(!rdp->beenonline); /* Report any deferred quiescent states if preemption enabled. */ - if (!(preempt_count() & PREEMPT_MASK)) + if (!(preempt_count() & PREEMPT_MASK)) { rcu_preempt_deferred_qs(current); - else if (rcu_preempt_need_deferred_qs(current)) - resched_cpu(rdp->cpu); /* Provoke future context switch. */ + } else if (rcu_preempt_need_deferred_qs(current)) { + set_tsk_need_resched(current); + set_preempt_need_resched(); + } /* Update RCU state based on any recent quiescent states. */ rcu_check_quiescent_state(rdp); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 78553a8fa3c6..030df96e0d3c 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -672,7 +672,8 @@ static void sync_rcu_exp_handler(void *unused) rcu_report_exp_rdp(rdp); } else { rdp->deferred_qs = true; - resched_cpu(rdp->cpu); + set_tsk_need_resched(t); + set_preempt_need_resched(); } return; } @@ -710,15 +711,16 @@ static void sync_rcu_exp_handler(void *unused) * because we are in an interrupt handler, which will cause that * function to take an early exit without doing anything. * - * Otherwise, use resched_cpu() to force a context switch after - * the CPU enables everything. + * Otherwise, force a context switch after the CPU enables everything. */ rdp->deferred_qs = true; if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || - WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) + WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { rcu_preempt_deferred_qs(t); - else - resched_cpu(rdp->cpu); + } else { + set_tsk_need_resched(t); + set_preempt_need_resched(); + } } /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */ @@ -779,7 +781,8 @@ static void sync_sched_exp_handler(void *unused) __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); /* Store .exp before .rcu_urgent_qs. */ smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); - resched_cpu(smp_processor_id()); + set_tsk_need_resched(current); + set_preempt_need_resched(); } /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 1e80a0da7924..978ce3539809 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -791,8 +791,10 @@ static void rcu_flavor_check_callbacks(int user) if (t->rcu_read_lock_nesting > 0 || (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { /* No QS, force context switch if deferred. */ - if (rcu_preempt_need_deferred_qs(t)) - resched_cpu(smp_processor_id()); + if (rcu_preempt_need_deferred_qs(t)) { + set_tsk_need_resched(t); + set_preempt_need_resched(); + } } else if (rcu_preempt_need_deferred_qs(t)) { rcu_preempt_deferred_qs(t); /* Report deferred QS. */ return; -- cgit v1.2.3 From df63fa5bc11aadf81126d4a1785080c800e2ece3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 31 Jul 2018 09:49:20 -0700 Subject: rcu: Convert "1UL << x" to "BIT(x)" This commit saves a few characters by converting "1UL << x" to "BIT(x)". Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 92346ab8077d..e778fd5546d1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3582,7 +3582,7 @@ static void __init rcu_init_one(void) rnp->parent = NULL; } else { rnp->grpnum = j % levelspread[i - 1]; - rnp->grpmask = 1UL << rnp->grpnum; + rnp->grpmask = BIT(rnp->grpnum); rnp->parent = rcu_state.level[i - 1] + j / levelspread[i - 1]; } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 8f053bb1eec8..5e561f1339d4 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -168,7 +168,7 @@ struct rcu_node { * are indexed relative to this interval rather than the global CPU ID space. * This generates the bit for a CPU in node-local masks. */ -#define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo)) +#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) /* * Union to allow "aggregate OR" operation on the need for a quiescent -- cgit v1.2.3 From cc72046cc3cce8438778f306d59442febf4b7683 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 3 Aug 2018 19:31:39 -0700 Subject: rcu: Merge rcu_dynticks structure into rcu_data structure Now that there is only ever one rcu_data structure per CPU, there is no need for a separate rcu_dynticks structure. This commit therefore adds the rcu_dynticks fields into the rcu_data structure in preparation for removing the rcu_dynticks structure entirely. Note that the ->dynticks field will be handled specially because there is a field by that name in both structures. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 5e561f1339d4..d35cd9677b08 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -212,6 +212,23 @@ struct rcu_data { /* 3) dynticks interface. */ struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ int dynticks_snap; /* Per-GP tracking for dynticks. */ + long dynticks_nesting; /* Track process nesting level. */ + long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ + // atomic_t dynticks; /* Even value for idle, else odd. */ + bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ + bool rcu_urgent_qs; /* GP old need light quiescent state. */ +#ifdef CONFIG_RCU_FAST_NO_HZ + bool all_lazy; /* Are all CPU's CBs lazy? */ + unsigned long nonlazy_posted; + /* # times non-lazy CBs posted to CPU. */ + unsigned long nonlazy_posted_snap; + /* idle-period nonlazy_posted snapshot. */ + unsigned long last_accelerate; + /* Last jiffy CBs were accelerated. */ + unsigned long last_advance_all; + /* Last jiffy CBs were all advanced. */ + int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ +#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ -- cgit v1.2.3 From 0fd79e7521bc944522c3c97f40f3d25619e329f4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 3 Aug 2018 21:00:38 -0700 Subject: rcu: Switch ->tick_nohz_enabled_snap to rcu_data structure This commit removes ->tick_nohz_enabled_snap from the rcu_dynticks structure and updates the code to access it from the rcu_data structure. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 1 - kernel/rcu/tree_plugin.h | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index d35cd9677b08..5d447ceba769 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -53,7 +53,6 @@ struct rcu_dynticks { /* Last jiffy CBs were accelerated. */ unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */ - int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ }; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 978ce3539809..6511032371c1 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1681,7 +1681,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) static void rcu_prepare_for_idle(void) { bool needwake; - struct rcu_data *rdp; + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); struct rcu_node *rnp; int tne; @@ -1692,10 +1692,10 @@ static void rcu_prepare_for_idle(void) /* Handle nohz enablement switches conservatively. */ tne = READ_ONCE(tick_nohz_active); - if (tne != rdtp->tick_nohz_enabled_snap) { + if (tne != rdp->tick_nohz_enabled_snap) { if (rcu_cpu_has_callbacks(NULL)) invoke_rcu_core(); /* force nohz to see update. */ - rdtp->tick_nohz_enabled_snap = tne; + rdp->tick_nohz_enabled_snap = tne; return; } if (!tne) @@ -1721,7 +1721,6 @@ static void rcu_prepare_for_idle(void) if (rdtp->last_accelerate == jiffies) return; rdtp->last_accelerate = jiffies; - rdp = this_cpu_ptr(&rcu_data); if (rcu_segcblist_pend_cbs(&rdp->cblist)) { rnp = rdp->mynode; raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ @@ -1765,6 +1764,7 @@ static void rcu_idle_count_callbacks_posted(void) static void print_cpu_stall_fast_no_hz(char *cp, int cpu) { + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap; @@ -1772,7 +1772,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) rdtp->last_accelerate & 0xffff, jiffies & 0xffff, ulong2long(nlpd), rdtp->all_lazy ? 'L' : '.', - rdtp->tick_nohz_enabled_snap ? '.' : 'D'); + rdp->tick_nohz_enabled_snap ? '.' : 'D'); } #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ -- cgit v1.2.3 From 5998a75adbf4f85e63b06fa7723633cc84d7129b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 3 Aug 2018 21:00:38 -0700 Subject: rcu: Switch last accelerate/advance to rcu_data structure This commit removes ->last_accelerate and ->last_advance_all from the rcu_dynticks structure and updates the code to access them from the rcu_data structure. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 4 ---- kernel/rcu/tree_plugin.h | 17 ++++++++--------- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 5d447ceba769..69bd6bec05bb 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -49,10 +49,6 @@ struct rcu_dynticks { /* # times non-lazy CBs posted to CPU. */ unsigned long nonlazy_posted_snap; /* idle-period nonlazy_posted snapshot. */ - unsigned long last_accelerate; - /* Last jiffy CBs were accelerated. */ - unsigned long last_advance_all; - /* Last jiffy CBs were all advanced. */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ }; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6511032371c1..45708164ddf9 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1597,16 +1597,14 @@ module_param(rcu_idle_lazy_gp_delay, int, 0644); static bool __maybe_unused rcu_try_advance_all_cbs(void) { bool cbs_ready = false; - struct rcu_data *rdp; - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp; /* Exit early if we advanced recently. */ - if (jiffies == rdtp->last_advance_all) + if (jiffies == rdp->last_advance_all) return false; - rdtp->last_advance_all = jiffies; + rdp->last_advance_all = jiffies; - rdp = this_cpu_ptr(&rcu_data); rnp = rdp->mynode; /* @@ -1635,6 +1633,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) */ int rcu_needs_cpu(u64 basemono, u64 *nextevt) { + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); unsigned long dj; @@ -1655,7 +1654,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) invoke_rcu_core(); return 1; } - rdtp->last_accelerate = jiffies; + rdp->last_accelerate = jiffies; /* Request timer delay depending on laziness, and round. */ if (!rdtp->all_lazy) { @@ -1718,9 +1717,9 @@ static void rcu_prepare_for_idle(void) * If we have not yet accelerated this jiffy, accelerate all * callbacks on this CPU. */ - if (rdtp->last_accelerate == jiffies) + if (rdp->last_accelerate == jiffies) return; - rdtp->last_accelerate = jiffies; + rdp->last_accelerate = jiffies; if (rcu_segcblist_pend_cbs(&rdp->cblist)) { rnp = rdp->mynode; raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ @@ -1769,7 +1768,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap; sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", - rdtp->last_accelerate & 0xffff, jiffies & 0xffff, + rdp->last_accelerate & 0xffff, jiffies & 0xffff, ulong2long(nlpd), rdtp->all_lazy ? 'L' : '.', rdp->tick_nohz_enabled_snap ? '.' : 'D'); -- cgit v1.2.3 From c458a89e964dbf3c56b23eca2018bd0e2380969d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 3 Aug 2018 21:00:38 -0700 Subject: rcu: Switch lazy counts to rcu_data structure This commit removes ->all_lazy, ->nonlazy_posted and ->nonlazy_posted_snap from the rcu_dynticks structure and updates the code to access them from the rcu_data structure. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 7 ------- kernel/rcu/tree_plugin.h | 23 ++++++++++------------- 2 files changed, 10 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 69bd6bec05bb..36a47c7bd882 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -43,13 +43,6 @@ struct rcu_dynticks { atomic_t dynticks; /* Even value for idle, else odd. */ bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ bool rcu_urgent_qs; /* GP old need light quiescent state. */ -#ifdef CONFIG_RCU_FAST_NO_HZ - bool all_lazy; /* Are all CPU's CBs lazy? */ - unsigned long nonlazy_posted; - /* # times non-lazy CBs posted to CPU. */ - unsigned long nonlazy_posted_snap; - /* idle-period nonlazy_posted snapshot. */ -#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ }; /* Communicate arguments to a workqueue handler. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 45708164ddf9..b5aeb2fe4cfe 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1634,16 +1634,15 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) int rcu_needs_cpu(u64 basemono, u64 *nextevt) { struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); unsigned long dj; lockdep_assert_irqs_disabled(); /* Snapshot to detect later posting of non-lazy callback. */ - rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; + rdp->nonlazy_posted_snap = rdp->nonlazy_posted; /* If no callbacks, RCU doesn't need the CPU. */ - if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) { + if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) { *nextevt = KTIME_MAX; return 0; } @@ -1657,7 +1656,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) rdp->last_accelerate = jiffies; /* Request timer delay depending on laziness, and round. */ - if (!rdtp->all_lazy) { + if (!rdp->all_lazy) { dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies; } else { @@ -1681,7 +1680,6 @@ static void rcu_prepare_for_idle(void) { bool needwake; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); struct rcu_node *rnp; int tne; @@ -1705,10 +1703,10 @@ static void rcu_prepare_for_idle(void) * callbacks, invoke RCU core for the side-effect of recalculating * idle duration on re-entry to idle. */ - if (rdtp->all_lazy && - rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { - rdtp->all_lazy = false; - rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; + if (rdp->all_lazy && + rdp->nonlazy_posted != rdp->nonlazy_posted_snap) { + rdp->all_lazy = false; + rdp->nonlazy_posted_snap = rdp->nonlazy_posted; invoke_rcu_core(); return; } @@ -1754,7 +1752,7 @@ static void rcu_cleanup_after_idle(void) */ static void rcu_idle_count_callbacks_posted(void) { - __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); + __this_cpu_add(rcu_data.nonlazy_posted, 1); } #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ @@ -1764,13 +1762,12 @@ static void rcu_idle_count_callbacks_posted(void) static void print_cpu_stall_fast_no_hz(char *cp, int cpu) { struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); - unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap; + unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap; sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c", rdp->last_accelerate & 0xffff, jiffies & 0xffff, ulong2long(nlpd), - rdtp->all_lazy ? 'L' : '.', + rdp->all_lazy ? 'L' : '.', rdp->tick_nohz_enabled_snap ? '.' : 'D'); } -- cgit v1.2.3 From 2dba13f0b6c2b26ff371b8927ac58d20a7d94713 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 3 Aug 2018 21:00:38 -0700 Subject: rcu: Switch urgent quiescent-state requests to rcu_data structure This commit removes ->rcu_need_heavy_qs and ->rcu_urgent_qs from the rcu_dynticks structure and updates the code to access them from the rcu_data structure. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 12 ++++++------ kernel/rcu/tree.h | 2 -- kernel/rcu/tree_exp.h | 2 +- kernel/rcu/tree_plugin.h | 14 +++++++------- 4 files changed, 14 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e778fd5546d1..7ec0ba885273 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -362,7 +362,7 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void) struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); int special; - raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false); + raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); /* It is illegal to call this from idle state. */ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); @@ -928,7 +928,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t) cpu = task_cpu(t); if (!task_curr(t)) return; /* This task is not running on that CPU. */ - smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true); + smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); } #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) @@ -1081,8 +1081,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) * is set way high. */ jtsq = READ_ONCE(jiffies_to_sched_qs); - ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); - rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); + ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); + rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); if (!READ_ONCE(*rnhqp) && (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || time_after(jiffies, rcu_state.jiffies_resched))) { @@ -2499,13 +2499,13 @@ void rcu_check_callbacks(int user) trace_rcu_utilization(TPS("Start scheduler-tick")); raw_cpu_inc(rcu_data.ticks_this_gp); /* The load-acquire pairs with the store-release setting to true. */ - if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { + if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { /* Idle and userspace execution already are quiescent states. */ if (!rcu_is_cpu_rrupt_from_idle() && !user) { set_tsk_need_resched(current); set_preempt_need_resched(); } - __this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); + __this_cpu_write(rcu_data.rcu_urgent_qs, false); } rcu_flavor_check_callbacks(user); if (rcu_pending()) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 36a47c7bd882..4c31066ddb94 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -41,8 +41,6 @@ struct rcu_dynticks { long dynticks_nesting; /* Track process nesting level. */ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ atomic_t dynticks; /* Even value for idle, else odd. */ - bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ - bool rcu_urgent_qs; /* GP old need light quiescent state. */ }; /* Communicate arguments to a workqueue handler. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 030df96e0d3c..11387fcd4d85 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -780,7 +780,7 @@ static void sync_sched_exp_handler(void *unused) } __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); /* Store .exp before .rcu_urgent_qs. */ - smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); + smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); set_tsk_need_resched(current); set_preempt_need_resched(); } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b5aeb2fe4cfe..161760957a07 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -967,17 +967,17 @@ void rcu_all_qs(void) { unsigned long flags; - if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs)) + if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) return; preempt_disable(); /* Load rcu_urgent_qs before other flags. */ - if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { + if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { preempt_enable(); return; } - this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); + this_cpu_write(rcu_data.rcu_urgent_qs, false); barrier(); /* Avoid RCU read-side critical sections leaking down. */ - if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) { + if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { local_irq_save(flags); rcu_momentary_dyntick_idle(); local_irq_restore(flags); @@ -997,10 +997,10 @@ void rcu_note_context_switch(bool preempt) trace_rcu_utilization(TPS("Start context switch")); rcu_qs(); /* Load rcu_urgent_qs before other flags. */ - if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) + if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) goto out; - this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); - if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) + this_cpu_write(rcu_data.rcu_urgent_qs, false); + if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) rcu_momentary_dyntick_idle(); if (!preempt) rcu_tasks_qs(current); -- cgit v1.2.3 From 4c5273bf2b5ed9b585e470dda19c09c875a9fbbd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 3 Aug 2018 21:00:38 -0700 Subject: rcu: Switch dyntick nesting counters to rcu_data structure This commit removes ->dynticks_nesting and ->dynticks_nmi_nesting from the rcu_dynticks structure and updates the code to access them from the rcu_data structure. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 79 +++++++++++++++++++++++++----------------------- kernel/rcu/tree.h | 2 -- kernel/rcu/tree_plugin.h | 2 +- 3 files changed, 43 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7ec0ba885273..bfa264a6f3fc 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -73,7 +73,10 @@ /* Data structures. */ -static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { + .dynticks_nesting = 1, + .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, +}; struct rcu_state rcu_state = { .level = { &rcu_state.node[0] }, .gp_state = RCU_GP_IDLE, @@ -210,8 +213,6 @@ void rcu_softirq_qs(void) #endif static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { - .dynticks_nesting = 1, - .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), }; @@ -378,8 +379,8 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void) */ static int rcu_is_cpu_rrupt_from_idle(void) { - return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 && - __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1; + return __this_cpu_read(rcu_data.dynticks_nesting) <= 0 && + __this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 1; } #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */ @@ -571,27 +572,27 @@ static struct rcu_node *rcu_get_root(void) */ static void rcu_eqs_enter(bool user) { - struct rcu_data *rdp; + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_dynticks *rdtp; rdtp = this_cpu_ptr(&rcu_dynticks); - WARN_ON_ONCE(rdtp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); - WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); + WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); + WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && - rdtp->dynticks_nesting == 0); - if (rdtp->dynticks_nesting != 1) { - rdtp->dynticks_nesting--; + rdp->dynticks_nesting == 0); + if (rdp->dynticks_nesting != 1) { + rdp->dynticks_nesting--; return; } lockdep_assert_irqs_disabled(); - trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks); + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdtp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); rdp = this_cpu_ptr(&rcu_data); do_nocb_deferred_wakeup(rdp); rcu_prepare_for_idle(); rcu_preempt_deferred_qs(current); - WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ + WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ rcu_dynticks_eqs_enter(); rcu_dynticks_task_enter(); } @@ -634,7 +635,7 @@ void rcu_user_enter(void) /* * If we are returning from the outermost NMI handler that interrupted an - * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting + * RCU-idle period, update rdtp->dynticks and rdp->dynticks_nmi_nesting * to let the RCU grace-period handling know that the CPU is back to * being RCU-idle. * @@ -643,30 +644,31 @@ void rcu_user_enter(void) */ static __always_inline void rcu_nmi_exit_common(bool irq) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks); /* * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. * (We are exiting an NMI handler, so RCU better be paying attention * to us!) */ - WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); + WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); /* * If the nesting level is not 1, the CPU wasn't RCU-idle, so * leave it in non-RCU-idle state. */ - if (rdtp->dynticks_nmi_nesting != 1) { - trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks); - WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */ - rdtp->dynticks_nmi_nesting - 2); + if (rdp->dynticks_nmi_nesting != 1) { + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdtp->dynticks); + WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ + rdp->dynticks_nmi_nesting - 2); return; } /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ - trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks); - WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdtp->dynticks); + WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ if (irq) rcu_prepare_for_idle(); @@ -739,25 +741,27 @@ void rcu_irq_exit_irqson(void) */ static void rcu_eqs_exit(bool user) { + struct rcu_data *rdp; struct rcu_dynticks *rdtp; long oldval; lockdep_assert_irqs_disabled(); rdtp = this_cpu_ptr(&rcu_dynticks); - oldval = rdtp->dynticks_nesting; + rdp = this_cpu_ptr(&rcu_data); + oldval = rdp->dynticks_nesting; WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); if (oldval) { - rdtp->dynticks_nesting++; + rdp->dynticks_nesting++; return; } rcu_dynticks_task_exit(); rcu_dynticks_eqs_exit(); rcu_cleanup_after_idle(); - trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks); + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdtp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); - WRITE_ONCE(rdtp->dynticks_nesting, 1); - WARN_ON_ONCE(rdtp->dynticks_nmi_nesting); - WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); + WRITE_ONCE(rdp->dynticks_nesting, 1); + WARN_ON_ONCE(rdp->dynticks_nmi_nesting); + WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); } /** @@ -799,7 +803,7 @@ void rcu_user_exit(void) * @irq: Is this call from rcu_irq_enter? * * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and - * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know + * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know * that the CPU is active. This implementation permits nested NMIs, as * long as the nesting level does not overflow an int. (You will probably * run out of stack space first.) @@ -809,11 +813,12 @@ void rcu_user_exit(void) */ static __always_inline void rcu_nmi_enter_common(bool irq) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks); long incby = 2; /* Complain about underflow. */ - WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0); + WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); /* * If idle from RCU viewpoint, atomically increment ->dynticks @@ -836,10 +841,10 @@ static __always_inline void rcu_nmi_enter_common(bool irq) incby = 1; } trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), - rdtp->dynticks_nmi_nesting, - rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks); - WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */ - rdtp->dynticks_nmi_nesting + incby); + rdp->dynticks_nmi_nesting, + rdp->dynticks_nmi_nesting + incby, rdtp->dynticks); + WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ + rdp->dynticks_nmi_nesting + incby); barrier(); } @@ -3194,7 +3199,7 @@ rcu_boot_init_percpu_data(int cpu) /* Set up local state, ensuring consistent view of global state. */ rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); rdp->dynticks = &per_cpu(rcu_dynticks, cpu); - WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); + WARN_ON_ONCE(rdp->dynticks_nesting != 1); WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; @@ -3227,7 +3232,7 @@ int rcutree_prepare_cpu(unsigned int cpu) if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ !init_nocb_callback_list(rdp)) rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ - rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */ + rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ rcu_dynticks_eqs_online(); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4c31066ddb94..2e5eec48a94a 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -38,8 +38,6 @@ * Dynticks per-CPU state. */ struct rcu_dynticks { - long dynticks_nesting; /* Track process nesting level. */ - long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ atomic_t dynticks; /* Even value for idle, else odd. */ }; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 161760957a07..7087ee3e1ea5 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1831,7 +1831,7 @@ static void print_cpu_stall_info(int cpu) "!."[!delta], ticks_value, ticks_title, rcu_dynticks_snap(rdtp) & 0xfff, - rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, + rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, fast_no_hz); -- cgit v1.2.3 From dc5a4f2932f18568bb9d8cdbe2139a8ddbc28bb8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 3 Aug 2018 21:00:38 -0700 Subject: rcu: Switch ->dynticks to rcu_data structure, remove rcu_dynticks This commit move ->dynticks from the rcu_dynticks structure to the rcu_data structure, replacing the field of the same name. It also updates the code to access ->dynticks from the rcu_data structure and to use the rcu_data structure rather than following to now-gone ->dynticks field to the now-gone rcu_dynticks structure. While in the area, this commit also fixes up comments. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 92 +++++++++++++++++++++--------------------------- kernel/rcu/tree.h | 35 +++++++----------- kernel/rcu/tree_exp.h | 6 ++-- kernel/rcu/tree_plugin.h | 3 +- 4 files changed, 56 insertions(+), 80 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index bfa264a6f3fc..32f500fb24d3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -73,9 +73,20 @@ /* Data structures. */ +/* + * Steal a bit from the bottom of ->dynticks for idle entry/exit + * control. Initially this is for TLB flushing. + */ +#define RCU_DYNTICK_CTRL_MASK 0x1 +#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) +#ifndef rcu_eqs_special_exit +#define rcu_eqs_special_exit() do { } while (0) +#endif + static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { .dynticks_nesting = 1, .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, + .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), }; struct rcu_state rcu_state = { .level = { &rcu_state.node[0] }, @@ -202,27 +213,13 @@ void rcu_softirq_qs(void) rcu_preempt_deferred_qs(current); } -/* - * Steal a bit from the bottom of ->dynticks for idle entry/exit - * control. Initially this is for TLB flushing. - */ -#define RCU_DYNTICK_CTRL_MASK 0x1 -#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) -#ifndef rcu_eqs_special_exit -#define rcu_eqs_special_exit() do { } while (0) -#endif - -static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { - .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), -}; - /* * Record entry into an extended quiescent state. This is only to be * called when not already in an extended quiescent state. */ static void rcu_dynticks_eqs_enter(void) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); int seq; /* @@ -230,7 +227,7 @@ static void rcu_dynticks_eqs_enter(void) * critical sections, and we also must force ordering with the * next idle sojourn. */ - seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); + seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); /* Better be in an extended quiescent state! */ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICK_CTRL_CTR)); @@ -245,7 +242,7 @@ static void rcu_dynticks_eqs_enter(void) */ static void rcu_dynticks_eqs_exit(void) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); int seq; /* @@ -253,11 +250,11 @@ static void rcu_dynticks_eqs_exit(void) * and we also must force ordering with the next RCU read-side * critical section. */ - seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); + seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & RCU_DYNTICK_CTRL_CTR)); if (seq & RCU_DYNTICK_CTRL_MASK) { - atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks); + atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); smp_mb__after_atomic(); /* _exit after clearing mask. */ /* Prefer duplicate flushes to losing a flush. */ rcu_eqs_special_exit(); @@ -276,11 +273,11 @@ static void rcu_dynticks_eqs_exit(void) */ static void rcu_dynticks_eqs_online(void) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR) + if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) return; - atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); + atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); } /* @@ -290,18 +287,18 @@ static void rcu_dynticks_eqs_online(void) */ bool rcu_dynticks_curr_cpu_in_eqs(void) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR); + return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); } /* * Snapshot the ->dynticks counter with full ordering so as to allow * stable comparison of this counter with past and future snapshots. */ -int rcu_dynticks_snap(struct rcu_dynticks *rdtp) +int rcu_dynticks_snap(struct rcu_data *rdp) { - int snap = atomic_add_return(0, &rdtp->dynticks); + int snap = atomic_add_return(0, &rdp->dynticks); return snap & ~RCU_DYNTICK_CTRL_MASK; } @@ -316,13 +313,13 @@ static bool rcu_dynticks_in_eqs(int snap) } /* - * Return true if the CPU corresponding to the specified rcu_dynticks + * Return true if the CPU corresponding to the specified rcu_data * structure has spent some time in an extended quiescent state since * rcu_dynticks_snap() returned the specified snapshot. */ -static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap) +static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) { - return snap != rcu_dynticks_snap(rdtp); + return snap != rcu_dynticks_snap(rdp); } /* @@ -336,14 +333,14 @@ bool rcu_eqs_special_set(int cpu) { int old; int new; - struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); do { - old = atomic_read(&rdtp->dynticks); + old = atomic_read(&rdp->dynticks); if (old & RCU_DYNTICK_CTRL_CTR) return false; new = old | RCU_DYNTICK_CTRL_MASK; - } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old); + } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old); return true; } @@ -360,11 +357,11 @@ bool rcu_eqs_special_set(int cpu) */ static void __maybe_unused rcu_momentary_dyntick_idle(void) { - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); int special; raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); - special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); + special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, + &this_cpu_ptr(&rcu_data)->dynticks); /* It is illegal to call this from idle state. */ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); rcu_preempt_deferred_qs(current); @@ -573,9 +570,7 @@ static struct rcu_node *rcu_get_root(void) static void rcu_eqs_enter(bool user) { struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_dynticks *rdtp; - rdtp = this_cpu_ptr(&rcu_dynticks); WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && @@ -586,7 +581,7 @@ static void rcu_eqs_enter(bool user) } lockdep_assert_irqs_disabled(); - trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdtp->dynticks); + trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); rdp = this_cpu_ptr(&rcu_data); do_nocb_deferred_wakeup(rdp); @@ -635,7 +630,7 @@ void rcu_user_enter(void) /* * If we are returning from the outermost NMI handler that interrupted an - * RCU-idle period, update rdtp->dynticks and rdp->dynticks_nmi_nesting + * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting * to let the RCU grace-period handling know that the CPU is back to * being RCU-idle. * @@ -645,7 +640,6 @@ void rcu_user_enter(void) static __always_inline void rcu_nmi_exit_common(bool irq) { struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks); /* * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. @@ -660,14 +654,14 @@ static __always_inline void rcu_nmi_exit_common(bool irq) * leave it in non-RCU-idle state. */ if (rdp->dynticks_nmi_nesting != 1) { - trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdtp->dynticks); + trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks); WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ rdp->dynticks_nmi_nesting - 2); return; } /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ - trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdtp->dynticks); + trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks); WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ if (irq) @@ -742,11 +736,9 @@ void rcu_irq_exit_irqson(void) static void rcu_eqs_exit(bool user) { struct rcu_data *rdp; - struct rcu_dynticks *rdtp; long oldval; lockdep_assert_irqs_disabled(); - rdtp = this_cpu_ptr(&rcu_dynticks); rdp = this_cpu_ptr(&rcu_data); oldval = rdp->dynticks_nesting; WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); @@ -757,7 +749,7 @@ static void rcu_eqs_exit(bool user) rcu_dynticks_task_exit(); rcu_dynticks_eqs_exit(); rcu_cleanup_after_idle(); - trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdtp->dynticks); + trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); WRITE_ONCE(rdp->dynticks_nesting, 1); WARN_ON_ONCE(rdp->dynticks_nmi_nesting); @@ -802,7 +794,7 @@ void rcu_user_exit(void) * rcu_nmi_enter_common - inform RCU of entry to NMI context * @irq: Is this call from rcu_irq_enter? * - * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and + * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know * that the CPU is active. This implementation permits nested NMIs, as * long as the nesting level does not overflow an int. (You will probably @@ -814,7 +806,6 @@ void rcu_user_exit(void) static __always_inline void rcu_nmi_enter_common(bool irq) { struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks); long incby = 2; /* Complain about underflow. */ @@ -842,7 +833,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq) } trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), rdp->dynticks_nmi_nesting, - rdp->dynticks_nmi_nesting + incby, rdtp->dynticks); + rdp->dynticks_nmi_nesting + incby, rdp->dynticks); WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ rdp->dynticks_nmi_nesting + incby); barrier(); @@ -995,7 +986,7 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) */ static int dyntick_save_progress_counter(struct rcu_data *rdp) { - rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); + rdp->dynticks_snap = rcu_dynticks_snap(rdp); if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); rcu_gpnum_ovf(rdp->mynode, rdp); @@ -1046,7 +1037,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) * read-side critical section that started before the beginning * of the current RCU grace period. */ - if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { + if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); rdp->dynticks_fqs++; rcu_gpnum_ovf(rnp, rdp); @@ -3198,9 +3189,8 @@ rcu_boot_init_percpu_data(int cpu) /* Set up local state, ensuring consistent view of global state. */ rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); - rdp->dynticks = &per_cpu(rcu_dynticks, cpu); WARN_ON_ONCE(rdp->dynticks_nesting != 1); - WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); + WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; rdp->rcu_onl_gp_seq = rcu_state.gp_seq; diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 2e5eec48a94a..af8681fec23b 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -34,13 +34,6 @@ #include "rcu_segcblist.h" -/* - * Dynticks per-CPU state. - */ -struct rcu_dynticks { - atomic_t dynticks; /* Even value for idle, else odd. */ -}; - /* Communicate arguments to a workqueue handler. */ struct rcu_exp_work { smp_call_func_t rew_func; @@ -194,24 +187,20 @@ struct rcu_data { long blimit; /* Upper limit on a processed batch */ /* 3) dynticks interface. */ - struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ int dynticks_snap; /* Per-GP tracking for dynticks. */ - long dynticks_nesting; /* Track process nesting level. */ - long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ - // atomic_t dynticks; /* Even value for idle, else odd. */ - bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ - bool rcu_urgent_qs; /* GP old need light quiescent state. */ + long dynticks_nesting; /* Track process nesting level. */ + long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ + atomic_t dynticks; /* Even value for idle, else odd. */ + bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ + bool rcu_urgent_qs; /* GP old need light quiescent state. */ #ifdef CONFIG_RCU_FAST_NO_HZ - bool all_lazy; /* Are all CPU's CBs lazy? */ - unsigned long nonlazy_posted; - /* # times non-lazy CBs posted to CPU. */ + bool all_lazy; /* Are all CPU's CBs lazy? */ + unsigned long nonlazy_posted; /* # times non-lazy CB posted to CPU. */ unsigned long nonlazy_posted_snap; - /* idle-period nonlazy_posted snapshot. */ - unsigned long last_accelerate; - /* Last jiffy CBs were accelerated. */ - unsigned long last_advance_all; - /* Last jiffy CBs were all advanced. */ - int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ + /* Nonlazy_posted snapshot. */ + unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */ + unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */ + int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ @@ -426,7 +415,7 @@ extern struct rcu_state rcu_bh_state; extern struct rcu_state rcu_preempt_state; #endif /* #ifdef CONFIG_PREEMPT_RCU */ -int rcu_dynticks_snap(struct rcu_dynticks *rdtp); +int rcu_dynticks_snap(struct rcu_data *rdp); #ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 11387fcd4d85..8d18c1014e2b 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -360,14 +360,13 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { unsigned long mask = leaf_node_cpu_bit(rnp, cpu); struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu); int snap; if (raw_smp_processor_id() == cpu || !(rnp->qsmaskinitnext & mask)) { mask_ofl_test |= mask; } else { - snap = rcu_dynticks_snap(rdtp); + snap = rcu_dynticks_snap(rdp); if (rcu_dynticks_in_eqs(snap)) mask_ofl_test |= mask; else @@ -393,8 +392,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) if (!(mask_ofl_ipi & mask)) continue; retry_ipi: - if (rcu_dynticks_in_eqs_since(rdp->dynticks, - rdp->exp_dynticks_snap)) { + if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { mask_ofl_test |= mask; continue; } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 7087ee3e1ea5..05915e536336 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1802,7 +1802,6 @@ static void print_cpu_stall_info(int cpu) unsigned long delta; char fast_no_hz[72]; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - struct rcu_dynticks *rdtp = rdp->dynticks; char *ticks_title; unsigned long ticks_value; @@ -1830,7 +1829,7 @@ static void print_cpu_stall_info(int cpu) rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : "!."[!delta], ticks_value, ticks_title, - rcu_dynticks_snap(rdtp) & 0xfff, + rcu_dynticks_snap(rdp) & 0xfff, rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, -- cgit v1.2.3 From 8d8a9d0e7eda9feeee4af7be31932e14b512d3ad Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 4 Aug 2018 20:32:07 -0700 Subject: rcu: Remove obsolete ->dynticks_fqs and ->cond_resched_completed The rcu_data structure's ->dynticks_fqs is incremented but never accesses. Its ->cond_resched_completed field isn't used at all. This commit therefore removes both fields. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 1 - kernel/rcu/tree.h | 12 +++--------- 2 files changed, 3 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 32f500fb24d3..85c2c2dc4c4a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1039,7 +1039,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) */ if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); - rdp->dynticks_fqs++; rcu_gpnum_ovf(rnp, rdp); return 1; } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index af8681fec23b..bfbf97a1c29d 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -203,17 +203,11 @@ struct rcu_data { int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ - /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ - unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ - unsigned long cond_resched_completed; - /* Grace period that needs help */ - /* from cond_resched(). */ - - /* 5) rcu_barrier(), OOM callbacks, and expediting. */ + /* 4) rcu_barrier(), OOM callbacks, and expediting. */ struct rcu_head barrier_head; int exp_dynticks_snap; /* Double-check need for IPI. */ - /* 6) Callback offloading. */ + /* 5) Callback offloading. */ #ifdef CONFIG_RCU_NOCB_CPU struct rcu_head *nocb_head; /* CBs waiting for kthread. */ struct rcu_head **nocb_tail; @@ -240,7 +234,7 @@ struct rcu_data { /* Leader CPU takes GP-end wakeups. */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ - /* 7) Diagnostic data, including RCU CPU stall warnings. */ + /* 6) Diagnostic data, including RCU CPU stall warnings. */ unsigned int softirq_snap; /* Snapshot of softirq activity. */ /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ struct irq_work rcu_iw; /* Check for non-irq activity. */ -- cgit v1.2.3 From 894d45bbf7e7569ec2aa845155801fd503b5f1bf Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 15 Aug 2018 09:05:29 -0700 Subject: rcu: Convert rcu_state.ofl_lock to raw_spinlock_t 1e64b15a4b10 ("rcu: Fix grace-period hangs due to race with CPU offline") added spinlock_t ofl_lock to the rcu_state structure, then takes it with preemption disabled during CPU offline, which gives the -rt patchset's sleeping spinlock heartburn. This commit therefore converts ->ofl_lock to raw_spinlock_t. Signed-off-by: Mike Galbraith Signed-off-by: Paul E. McKenney Cc: Sebastian Andrzej Siewior --- kernel/rcu/tree.c | 12 ++++++------ kernel/rcu/tree.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 85c2c2dc4c4a..58aa6c2fd7fa 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -97,7 +97,7 @@ struct rcu_state rcu_state = { .abbr = RCU_ABBR, .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), - .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), + .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), }; /* Dump rcu_node combining tree at boot to verify correct setup. */ @@ -1776,13 +1776,13 @@ static bool rcu_gp_init(void) */ rcu_state.gp_state = RCU_GP_ONOFF; rcu_for_each_leaf_node(rnp) { - spin_lock(&rcu_state.ofl_lock); + raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irq_rcu_node(rnp); if (rnp->qsmaskinit == rnp->qsmaskinitnext && !rnp->wait_blkd_tasks) { /* Nothing to do on this leaf rcu_node structure. */ raw_spin_unlock_irq_rcu_node(rnp); - spin_unlock(&rcu_state.ofl_lock); + raw_spin_unlock(&rcu_state.ofl_lock); continue; } @@ -1818,7 +1818,7 @@ static bool rcu_gp_init(void) } raw_spin_unlock_irq_rcu_node(rnp); - spin_unlock(&rcu_state.ofl_lock); + raw_spin_unlock(&rcu_state.ofl_lock); } rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ @@ -3377,7 +3377,7 @@ void rcu_report_dead(unsigned int cpu) /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ mask = rdp->grpmask; - spin_lock(&rcu_state.ofl_lock); + raw_spin_lock(&rcu_state.ofl_lock); raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); @@ -3388,7 +3388,7 @@ void rcu_report_dead(unsigned int cpu) } rnp->qsmaskinitnext &= ~mask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - spin_unlock(&rcu_state.ofl_lock); + raw_spin_unlock(&rcu_state.ofl_lock); per_cpu(rcu_cpu_started, cpu) = 0; } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index bfbf97a1c29d..703e19ff532d 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -343,7 +343,7 @@ struct rcu_state { const char *name; /* Name of structure. */ char abbr; /* Abbreviated name. */ - spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; + raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; /* Synchronize offline with */ /* GP pre-initialization. */ }; -- cgit v1.2.3 From e0fcba9ac02af5aeb1e1c3e842eab987f817c309 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 14 Aug 2018 08:45:54 -0700 Subject: srcu: Make call_srcu() available during very early boot Event tracing is moving to SRCU in order to take advantage of the fact that SRCU may be safely used from idle and even offline CPUs. However, event tracing can invoke call_srcu() very early in the boot process, even before workqueue_init_early() is invoked (let alone rcu_init()). Therefore, call_srcu()'s attempts to queue work fail miserably. This commit therefore detects this situation, and refrains from attempting to queue work before rcu_init() time, but does everything else that it would have done, and in addition, adds the srcu_struct to a global list. The rcu_init() function now invokes a new srcu_init() function, which is empty if CONFIG_SRCU=n. Otherwise, srcu_init() queues work for each srcu_struct on the list. This all happens early enough in boot that there is but a single CPU with interrupts disabled, which allows synchronization to be dispensed with. Of course, the queued work won't actually be invoked until after workqueue_init() is invoked, which happens shortly after the scheduler is up and running. This means that although call_srcu() may be invoked any time after per-CPU variables have been set up, there is still a very narrow window when synchronize_srcu() won't work, and this window extends from the time that the scheduler starts until the time that workqueue_init() returns. This can be fixed in a manner similar to the fix for synchronize_rcu_expedited() and friends, but until someone actually needs to use synchronize_srcu() during this window, this fix is added churn for no benefit. Finally, note that Tree SRCU's new srcu_init() function invokes queue_work() rather than the queue_delayed_work() function that is invoked post-boot. The reason is that queue_delayed_work() will (as you would expect) post a timer, and timers have not yet been initialized. So use of queue_work() avoids the complaints about use of uninitialized spinlocks that would otherwise result. Besides, some delay is already provide by the aforementioned fact that the queued work won't actually be invoked until after the scheduler is up and running. Requested-by: Steven Rostedt Signed-off-by: Paul E. McKenney Tested-by: Steven Rostedt (VMware) --- include/linux/srcutiny.h | 2 ++ include/linux/srcutree.h | 14 ++++++++------ kernel/rcu/rcu.h | 6 ++++++ kernel/rcu/srcutiny.c | 29 +++++++++++++++++++++++++++-- kernel/rcu/srcutree.c | 26 ++++++++++++++++++++++++-- kernel/rcu/tiny.c | 1 + kernel/rcu/tree.c | 1 + kernel/rcu/update.c | 9 +++++++++ 8 files changed, 78 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index f41d2fb09f87..2b5c0822e683 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -36,6 +36,7 @@ struct srcu_struct { struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */ struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */ struct work_struct srcu_work; /* For driving grace periods. */ + struct list_head srcu_boot_entry; /* Early-boot callbacks. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -48,6 +49,7 @@ void srcu_drive_gp(struct work_struct *wp); .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ .srcu_cb_tail = &name.srcu_cb_head, \ .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ + .srcu_boot_entry = LIST_HEAD_INIT(name.srcu_boot_entry), \ __SRCU_DEP_MAP_INIT(name) \ } diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 745d4ca4dd50..9cfa4610113a 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -94,6 +94,7 @@ struct srcu_struct { /* callback for the barrier */ /* operation. */ struct delayed_work work; + struct list_head srcu_boot_entry; /* Early-boot callbacks. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -105,12 +106,13 @@ struct srcu_struct { #define SRCU_STATE_SCAN2 2 #define __SRCU_STRUCT_INIT(name, pcpu_name) \ - { \ - .sda = &pcpu_name, \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ - .srcu_gp_seq_needed = 0 - 1, \ - __SRCU_DEP_MAP_INIT(name) \ - } +{ \ + .sda = &pcpu_name, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .srcu_gp_seq_needed = -1UL, \ + .srcu_boot_entry = LIST_HEAD_INIT(name.srcu_boot_entry), \ + __SRCU_DEP_MAP_INIT(name) \ +} /* * Define and initialize a srcu struct at build time. diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 4d04683c31b2..e1b5aec5ec1c 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -435,6 +435,12 @@ do { \ #endif /* #if defined(SRCU) || !defined(TINY_RCU) */ +#ifdef CONFIG_SRCU +void srcu_init(void); +#else /* #ifdef CONFIG_SRCU */ +static inline void srcu_init(void) { } +#endif /* #else #ifdef CONFIG_SRCU */ + #ifdef CONFIG_TINY_RCU /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ static inline bool rcu_gp_is_normal(void) { return true; } diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index 04fc2ed71af8..d233f0c63f6f 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -34,6 +34,8 @@ #include "rcu.h" int rcu_scheduler_active __read_mostly; +static LIST_HEAD(srcu_boot_list); +static bool srcu_init_done; static int init_srcu_struct_fields(struct srcu_struct *sp) { @@ -46,6 +48,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp) sp->srcu_gp_waiting = false; sp->srcu_idx = 0; INIT_WORK(&sp->srcu_work, srcu_drive_gp); + INIT_LIST_HEAD(&sp->srcu_boot_entry); return 0; } @@ -179,8 +182,12 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, *sp->srcu_cb_tail = rhp; sp->srcu_cb_tail = &rhp->next; local_irq_restore(flags); - if (!READ_ONCE(sp->srcu_gp_running)) - schedule_work(&sp->srcu_work); + if (!READ_ONCE(sp->srcu_gp_running)) { + if (likely(srcu_init_done)) + schedule_work(&sp->srcu_work); + else if (list_empty(&sp->srcu_boot_entry)) + list_add(&sp->srcu_boot_entry, &srcu_boot_list); + } } EXPORT_SYMBOL_GPL(call_srcu); @@ -204,3 +211,21 @@ void __init rcu_scheduler_starting(void) { rcu_scheduler_active = RCU_SCHEDULER_RUNNING; } + +/* + * Queue work for srcu_struct structures with early boot callbacks. + * The work won't actually execute until the workqueue initialization + * phase that takes place after the scheduler starts. + */ +void __init srcu_init(void) +{ + struct srcu_struct *sp; + + srcu_init_done = true; + while (!list_empty(&srcu_boot_list)) { + sp = list_first_entry(&srcu_boot_list, + struct srcu_struct, srcu_boot_entry); + list_del_init(&sp->srcu_boot_entry); + schedule_work(&sp->srcu_work); + } +} diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 6c9866a854b1..2e7f6b460150 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -51,6 +51,10 @@ module_param(exp_holdoff, ulong, 0444); static ulong counter_wrap_check = (ULONG_MAX >> 2); module_param(counter_wrap_check, ulong, 0444); +/* Early-boot callback-management, so early that no lock is required! */ +static LIST_HEAD(srcu_boot_list); +static bool __read_mostly srcu_init_done; + static void srcu_invoke_callbacks(struct work_struct *work); static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); static void process_srcu(struct work_struct *work); @@ -182,6 +186,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) mutex_init(&sp->srcu_barrier_mutex); atomic_set(&sp->srcu_barrier_cpu_cnt, 0); INIT_DELAYED_WORK(&sp->work, process_srcu); + INIT_LIST_HEAD(&sp->srcu_boot_entry); if (!is_static) sp->sda = alloc_percpu(struct srcu_data); init_srcu_struct_nodes(sp, is_static); @@ -235,7 +240,6 @@ static void check_init_srcu_struct(struct srcu_struct *sp) { unsigned long flags; - WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); /* The smp_load_acquire() pairs with the smp_store_release(). */ if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ return; /* Already initialized. */ @@ -701,7 +705,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); srcu_gp_start(sp); - queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp)); + if (likely(srcu_init_done)) + queue_delayed_work(rcu_gp_wq, &sp->work, + srcu_get_delay(sp)); + else if (list_empty(&sp->srcu_boot_entry)) + list_add(&sp->srcu_boot_entry, &srcu_boot_list); } spin_unlock_irqrestore_rcu_node(sp, flags); } @@ -1308,3 +1316,17 @@ static int __init srcu_bootup_announce(void) return 0; } early_initcall(srcu_bootup_announce); + +void __init srcu_init(void) +{ + struct srcu_struct *sp; + + srcu_init_done = true; + while (!list_empty(&srcu_boot_list)) { + sp = list_first_entry(&srcu_boot_list, + struct srcu_struct, srcu_boot_entry); + check_init_srcu_struct(sp); + list_del_init(&sp->srcu_boot_entry); + queue_work(rcu_gp_wq, &sp->work.work); + } +} diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index befc9321a89c..101ed5bb836c 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -236,4 +236,5 @@ void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); rcu_early_boot_tests(); + srcu_init(); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0b760c1369f7..43c806291208 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4164,6 +4164,7 @@ void __init rcu_init(void) WARN_ON(!rcu_gp_wq); rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); WARN_ON(!rcu_par_gp_wq); + srcu_init(); } #include "tree_exp.h" diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 39cb23d22109..7d057d0aaec4 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -888,11 +888,16 @@ static void test_callback(struct rcu_head *r) pr_info("RCU test callback executed %d\n", rcu_self_test_counter); } +DEFINE_STATIC_SRCU(early_srcu); + static void early_boot_test_call_rcu(void) { static struct rcu_head head; + static struct rcu_head shead; call_rcu(&head, test_callback); + if (IS_ENABLED(CONFIG_SRCU)) + call_srcu(&early_srcu, &shead, test_callback); } static void early_boot_test_call_rcu_bh(void) @@ -930,6 +935,10 @@ static int rcu_verify_early_boot_tests(void) if (rcu_self_test) { early_boot_test_counter++; rcu_barrier(); + if (IS_ENABLED(CONFIG_SRCU)) { + early_boot_test_counter++; + srcu_barrier(&early_srcu); + } } if (rcu_self_test_bh) { early_boot_test_counter++; -- cgit v1.2.3 From 4e6ea4ef56f9425cd239ffdb6be45b3aeeb347fd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 14 Aug 2018 14:41:49 -0700 Subject: srcu: Make early-boot call_srcu() reuse workqueue lists Allocating a list_head structure that is almost never used, and, when used, is used only during early boot (rcu_init() and earlier), is a bit wasteful. This commit therefore eliminates that list_head in favor of the one in the work_struct structure. This is safe because the work_struct structure cannot be used until after rcu_init() returns. Reported-by: Steven Rostedt Signed-off-by: Paul E. McKenney Cc: Tejun Heo Cc: Lai Jiangshan Tested-by: Steven Rostedt (VMware) --- include/linux/srcutiny.h | 2 -- include/linux/srcutree.h | 3 +-- kernel/rcu/srcutiny.c | 10 +++++----- kernel/rcu/srcutree.c | 11 +++++------ 4 files changed, 11 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 2b5c0822e683..f41d2fb09f87 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -36,7 +36,6 @@ struct srcu_struct { struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */ struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */ struct work_struct srcu_work; /* For driving grace periods. */ - struct list_head srcu_boot_entry; /* Early-boot callbacks. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -49,7 +48,6 @@ void srcu_drive_gp(struct work_struct *wp); .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ .srcu_cb_tail = &name.srcu_cb_head, \ .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ - .srcu_boot_entry = LIST_HEAD_INIT(name.srcu_boot_entry), \ __SRCU_DEP_MAP_INIT(name) \ } diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 9cfa4610113a..0ae91b3a7406 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -94,7 +94,6 @@ struct srcu_struct { /* callback for the barrier */ /* operation. */ struct delayed_work work; - struct list_head srcu_boot_entry; /* Early-boot callbacks. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -110,7 +109,7 @@ struct srcu_struct { .sda = &pcpu_name, \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .srcu_gp_seq_needed = -1UL, \ - .srcu_boot_entry = LIST_HEAD_INIT(name.srcu_boot_entry), \ + .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \ __SRCU_DEP_MAP_INIT(name) \ } diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index d233f0c63f6f..b46e6683f8c9 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -48,7 +48,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp) sp->srcu_gp_waiting = false; sp->srcu_idx = 0; INIT_WORK(&sp->srcu_work, srcu_drive_gp); - INIT_LIST_HEAD(&sp->srcu_boot_entry); + INIT_LIST_HEAD(&sp->srcu_work.entry); return 0; } @@ -185,8 +185,8 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, if (!READ_ONCE(sp->srcu_gp_running)) { if (likely(srcu_init_done)) schedule_work(&sp->srcu_work); - else if (list_empty(&sp->srcu_boot_entry)) - list_add(&sp->srcu_boot_entry, &srcu_boot_list); + else if (list_empty(&sp->srcu_work.entry)) + list_add(&sp->srcu_work.entry, &srcu_boot_list); } } EXPORT_SYMBOL_GPL(call_srcu); @@ -224,8 +224,8 @@ void __init srcu_init(void) srcu_init_done = true; while (!list_empty(&srcu_boot_list)) { sp = list_first_entry(&srcu_boot_list, - struct srcu_struct, srcu_boot_entry); - list_del_init(&sp->srcu_boot_entry); + struct srcu_struct, srcu_work.entry); + list_del_init(&sp->srcu_work.entry); schedule_work(&sp->srcu_work); } } diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 2e7f6b460150..86c7fd0a1bfe 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -186,7 +186,6 @@ static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) mutex_init(&sp->srcu_barrier_mutex); atomic_set(&sp->srcu_barrier_cpu_cnt, 0); INIT_DELAYED_WORK(&sp->work, process_srcu); - INIT_LIST_HEAD(&sp->srcu_boot_entry); if (!is_static) sp->sda = alloc_percpu(struct srcu_data); init_srcu_struct_nodes(sp, is_static); @@ -708,8 +707,8 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, if (likely(srcu_init_done)) queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp)); - else if (list_empty(&sp->srcu_boot_entry)) - list_add(&sp->srcu_boot_entry, &srcu_boot_list); + else if (list_empty(&sp->work.work.entry)) + list_add(&sp->work.work.entry, &srcu_boot_list); } spin_unlock_irqrestore_rcu_node(sp, flags); } @@ -1323,10 +1322,10 @@ void __init srcu_init(void) srcu_init_done = true; while (!list_empty(&srcu_boot_list)) { - sp = list_first_entry(&srcu_boot_list, - struct srcu_struct, srcu_boot_entry); + sp = list_first_entry(&srcu_boot_list, struct srcu_struct, + work.work.entry); check_init_srcu_struct(sp); - list_del_init(&sp->srcu_boot_entry); + list_del_init(&sp->work.work.entry); queue_work(rcu_gp_wq, &sp->work.work); } } -- cgit v1.2.3 From c1d0af1a1d5dfde880f588eceb4c00710e0f60ff Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Aug 2018 08:07:52 +0200 Subject: kernel/dma/direct: take DMA offset into account in dma_direct_supported When a device has a DMA offset the dma capable result will change due to the difference between the physical and DMA address. Take that into account. Signed-off-by: Christoph Hellwig Reviewed-by: Benjamin Herrenschmidt Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 1c35b7b945d0..de87b0282e74 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, int dma_direct_supported(struct device *dev, u64 mask) { #ifdef CONFIG_ZONE_DMA - if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) + if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))) return 0; #else /* @@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask) * memory, or by providing a ZONE_DMA32. If neither is the case, the * architecture needs to use an IOMMU instead of the direct mapping. */ - if (mask < DMA_BIT_MASK(32)) + if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) return 0; #endif /* -- cgit v1.2.3 From 597222f72a94118f593e4f32bf58ae7e049a0df1 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 30 Aug 2018 21:25:02 -0700 Subject: bpf: avoid misuse of psock when TCP_ULP_BPF collides with another ULP Currently we check sk_user_data is non NULL to determine if the sk exists in a map. However, this is not sufficient to ensure the psock or the ULP ops are not in use by another user, such as kcm or TLS. To avoid this when adding a sock to a map also verify it is of the correct ULP type. Additionally, when releasing a psock verify that it is the TCP_ULP_BPF type before releasing the ULP. The error case where we abort an update due to ULP collision can cause this error path. For example, __sock_map_ctx_update_elem() [...] err = tcp_set_ulp_id(sock, TCP_ULP_BPF) <- collides with TLS if (err) <- so err out here goto out_free [...] out_free: smap_release_sock() <- calling tcp_cleanup_ulp releases the TLS ULP incorrectly. Fixes: 2f857d04601a ("bpf: sockmap, remove STRPARSER map_flags and add multi-map support") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index ce63e5801746..488ef9663c01 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -1462,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu) schedule_work(&psock->gc_work); } +static bool psock_is_smap_sk(struct sock *sk) +{ + return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops; +} + static void smap_release_sock(struct smap_psock *psock, struct sock *sock) { if (refcount_dec_and_test(&psock->refcnt)) { - tcp_cleanup_ulp(sock); + if (psock_is_smap_sk(sock)) + tcp_cleanup_ulp(sock); write_lock_bh(&sock->sk_callback_lock); smap_stop_sock(psock, sock); write_unlock_bh(&sock->sk_callback_lock); @@ -1892,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, * doesn't update user data. */ if (psock) { + if (!psock_is_smap_sk(sock)) { + err = -EBUSY; + goto out_progs; + } if (READ_ONCE(psock->bpf_parse) && parse) { err = -EBUSY; goto out_progs; -- cgit v1.2.3 From afaef01c001537fa97a25092d7f54d764dc7d8c1 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Fri, 17 Aug 2018 01:16:58 +0300 Subject: x86/entry: Add STACKLEAK erasing the kernel stack at the end of syscalls The STACKLEAK feature (initially developed by PaX Team) has the following benefits: 1. Reduces the information that can be revealed through kernel stack leak bugs. The idea of erasing the thread stack at the end of syscalls is similar to CONFIG_PAGE_POISONING and memzero_explicit() in kernel crypto, which all comply with FDP_RIP.2 (Full Residual Information Protection) of the Common Criteria standard. 2. Blocks some uninitialized stack variable attacks (e.g. CVE-2017-17712, CVE-2010-2963). That kind of bugs should be killed by improving C compilers in future, which might take a long time. This commit introduces the code filling the used part of the kernel stack with a poison value before returning to userspace. Full STACKLEAK feature also contains the gcc plugin which comes in a separate commit. The STACKLEAK feature is ported from grsecurity/PaX. More information at: https://grsecurity.net/ https://pax.grsecurity.net/ This code is modified from Brad Spengler/PaX Team's code in the last public patch of grsecurity/PaX based on our understanding of the code. Changes or omissions from the original code are ours and don't reflect the original grsecurity/PaX code. Performance impact: Hardware: Intel Core i7-4770, 16 GB RAM Test #1: building the Linux kernel on a single core 0.91% slowdown Test #2: hackbench -s 4096 -l 2000 -g 15 -f 25 -P 4.2% slowdown So the STACKLEAK description in Kconfig includes: "The tradeoff is the performance impact: on a single CPU system kernel compilation sees a 1% slowdown, other systems and workloads may vary and you are advised to test this feature on your expected workload before deploying it". Signed-off-by: Alexander Popov Acked-by: Thomas Gleixner Reviewed-by: Dave Hansen Acked-by: Ingo Molnar Signed-off-by: Kees Cook --- Documentation/x86/x86_64/mm.txt | 2 ++ arch/Kconfig | 7 +++++ arch/x86/Kconfig | 1 + arch/x86/entry/calling.h | 14 +++++++++ arch/x86/entry/entry_32.S | 7 +++++ arch/x86/entry/entry_64.S | 3 ++ arch/x86/entry/entry_64_compat.S | 5 ++++ include/linux/sched.h | 4 +++ include/linux/stackleak.h | 26 +++++++++++++++++ kernel/Makefile | 4 +++ kernel/fork.c | 3 ++ kernel/stackleak.c | 62 ++++++++++++++++++++++++++++++++++++++++ scripts/gcc-plugins/Kconfig | 19 ++++++++++++ 13 files changed, 157 insertions(+) create mode 100644 include/linux/stackleak.h create mode 100644 kernel/stackleak.c (limited to 'kernel') diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 5432a96d31ff..600bc2afa27d 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -24,6 +24,7 @@ ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space [fixmap start] - ffffffffff5fffff kernel-internal fixmap range ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole +STACKLEAK_POISON value in this last hole: ffffffffffff4111 Virtual memory map with 5 level page tables: @@ -50,6 +51,7 @@ ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space [fixmap start] - ffffffffff5fffff kernel-internal fixmap range ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole +STACKLEAK_POISON value in this last hole: ffffffffffff4111 Architecture defines a 64-bit virtual address. Implementations can support less. Currently supported are 48- and 57-bit virtual addresses. Bits 63 diff --git a/arch/Kconfig b/arch/Kconfig index 6801123932a5..ee79ff56faab 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -419,6 +419,13 @@ config SECCOMP_FILTER See Documentation/userspace-api/seccomp_filter.rst for details. +config HAVE_ARCH_STACKLEAK + bool + help + An architecture should select this if it has the code which + fills the used part of the kernel stack with the STACKLEAK_POISON + value before returning from system calls. + config HAVE_STACKPROTECTOR bool help diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1a0be022f91d..662cb2cc9630 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -127,6 +127,7 @@ config X86 select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_THREAD_STRUCT_WHITELIST + select HAVE_ARCH_STACKLEAK select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 352e70cd33e8..20d0885b00fb 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -329,8 +329,22 @@ For 32-bit we have the following conventions - kernel is built with #endif +.macro STACKLEAK_ERASE_NOCLOBBER +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + PUSH_AND_CLEAR_REGS + call stackleak_erase + POP_REGS +#endif +.endm + #endif /* CONFIG_X86_64 */ +.macro STACKLEAK_ERASE +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + call stackleak_erase +#endif +.endm + /* * This does 'call enter_from_user_mode' unless we can avoid it based on * kernel config or using the static jump infrastructure. diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 2767c625a52c..dfb975b4c981 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -46,6 +46,8 @@ #include #include +#include "calling.h" + .section .entry.text, "ax" /* @@ -711,6 +713,7 @@ ENTRY(ret_from_fork) /* When we fork, we trace the syscall return in the child, too. */ movl %esp, %eax call syscall_return_slowpath + STACKLEAK_ERASE jmp restore_all /* kernel thread */ @@ -885,6 +888,8 @@ ENTRY(entry_SYSENTER_32) ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV + STACKLEAK_ERASE + /* Opportunistic SYSEXIT */ TRACE_IRQS_ON /* User mode traces as IRQs on. */ @@ -996,6 +1001,8 @@ ENTRY(entry_INT80_32) call do_int80_syscall_32 .Lsyscall_32_done: + STACKLEAK_ERASE + restore_all: TRACE_IRQS_IRET SWITCH_TO_ENTRY_STACK diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 957dfb693ecc..a5dd28093020 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -329,6 +329,8 @@ syscall_return_via_sysret: * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ + STACKLEAK_ERASE_NOCLOBBER + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi popq %rdi @@ -688,6 +690,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ + STACKLEAK_ERASE_NOCLOBBER SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 7d0df78db727..8eaf8952c408 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -261,6 +261,11 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) /* Opportunistic SYSRET */ sysret32_from_system_call: + /* + * We are not going to return to userspace from the trampoline + * stack. So let's erase the thread stack right now. + */ + STACKLEAK_ERASE TRACE_IRQS_ON /* User mode traces as IRQs on. */ movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 977cb57d7bc9..c1a23acd24e7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1192,6 +1192,10 @@ struct task_struct { void *security; #endif +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + unsigned long lowest_stack; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h new file mode 100644 index 000000000000..628c2b947b89 --- /dev/null +++ b/include/linux/stackleak.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STACKLEAK_H +#define _LINUX_STACKLEAK_H + +#include +#include + +/* + * Check that the poison value points to the unused hole in the + * virtual memory map for your platform. + */ +#define STACKLEAK_POISON -0xBEEF +#define STACKLEAK_SEARCH_DEPTH 128 + +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK +#include + +static inline void stackleak_task_init(struct task_struct *t) +{ + t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long); +} +#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ +static inline void stackleak_task_init(struct task_struct *t) { } +#endif + +#endif diff --git a/kernel/Makefile b/kernel/Makefile index 7a63d567fdb5..7343b3a9bff0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -117,6 +117,10 @@ obj-$(CONFIG_HAS_IOMEM) += iomem.o obj-$(CONFIG_ZONE_DEVICE) += memremap.o obj-$(CONFIG_RSEQ) += rseq.o +obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o +KASAN_SANITIZE_stackleak.o := n +KCOV_INSTRUMENT_stackleak.o := n + $(obj)/configs.o: $(obj)/config_data.h targets += config_data.gz diff --git a/kernel/fork.c b/kernel/fork.c index d896e9ca38b0..47911e49c2b1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include @@ -1880,6 +1881,8 @@ static __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_cleanup_io; + stackleak_task_init(p); + if (pid != &init_struct_pid) { pid = alloc_pid(p->nsproxy->pid_ns_for_children); if (IS_ERR(pid)) { diff --git a/kernel/stackleak.c b/kernel/stackleak.c new file mode 100644 index 000000000000..deba0d8992f9 --- /dev/null +++ b/kernel/stackleak.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code fills the used part of the kernel stack with a poison value + * before returning to userspace. It's part of the STACKLEAK feature + * ported from grsecurity/PaX. + * + * Author: Alexander Popov + * + * STACKLEAK reduces the information which kernel stack leak bugs can + * reveal and blocks some uninitialized stack variable attacks. + */ + +#include + +asmlinkage void stackleak_erase(void) +{ + /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */ + unsigned long kstack_ptr = current->lowest_stack; + unsigned long boundary = (unsigned long)end_of_stack(current); + unsigned int poison_count = 0; + const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); + + /* Check that 'lowest_stack' value is sane */ + if (unlikely(kstack_ptr - boundary >= THREAD_SIZE)) + kstack_ptr = boundary; + + /* Search for the poison value in the kernel stack */ + while (kstack_ptr > boundary && poison_count <= depth) { + if (*(unsigned long *)kstack_ptr == STACKLEAK_POISON) + poison_count++; + else + poison_count = 0; + + kstack_ptr -= sizeof(unsigned long); + } + + /* + * One 'long int' at the bottom of the thread stack is reserved and + * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y). + */ + if (kstack_ptr == boundary) + kstack_ptr += sizeof(unsigned long); + + /* + * Now write the poison value to the kernel stack. Start from + * 'kstack_ptr' and move up till the new 'boundary'. We assume that + * the stack pointer doesn't change when we write poison. + */ + if (on_thread_stack()) + boundary = current_stack_pointer; + else + boundary = current_top_of_stack(); + + while (kstack_ptr < boundary) { + *(unsigned long *)kstack_ptr = STACKLEAK_POISON; + kstack_ptr += sizeof(unsigned long); + } + + /* Reset the 'lowest_stack' value for the next syscall */ + current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; +} + diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index cb0c889e13aa..977b84e69787 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -139,4 +139,23 @@ config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE in structures. This reduces the performance hit of RANDSTRUCT at the cost of weakened randomization. +config GCC_PLUGIN_STACKLEAK + bool "Erase the kernel stack before returning from syscalls" + depends on GCC_PLUGINS + depends on HAVE_ARCH_STACKLEAK + help + This option makes the kernel erase the kernel stack before + returning from system calls. That reduces the information which + kernel stack leak bugs can reveal and blocks some uninitialized + stack variable attacks. + + The tradeoff is the performance impact: on a single CPU system kernel + compilation sees a 1% slowdown, other systems and workloads may vary + and you are advised to test this feature on your expected workload + before deploying it. + + This plugin was ported from grsecurity/PaX. More information at: + * https://grsecurity.net/ + * https://pax.grsecurity.net/ + endif -- cgit v1.2.3 From 10e9ae9fabaf96c8e5227c1cd4827d58b3aa406d Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Fri, 17 Aug 2018 01:16:59 +0300 Subject: gcc-plugins: Add STACKLEAK plugin for tracking the kernel stack The STACKLEAK feature erases the kernel stack before returning from syscalls. That reduces the information which kernel stack leak bugs can reveal and blocks some uninitialized stack variable attacks. This commit introduces the STACKLEAK gcc plugin. It is needed for tracking the lowest border of the kernel stack, which is important for the code erasing the used part of the kernel stack at the end of syscalls (comes in a separate commit). The STACKLEAK feature is ported from grsecurity/PaX. More information at: https://grsecurity.net/ https://pax.grsecurity.net/ This code is modified from Brad Spengler/PaX Team's code in the last public patch of grsecurity/PaX based on our understanding of the code. Changes or omissions from the original code are ours and don't reflect the original grsecurity/PaX code. Signed-off-by: Alexander Popov Tested-by: Laura Abbott Signed-off-by: Kees Cook --- kernel/stackleak.c | 28 +++ scripts/Makefile.gcc-plugins | 10 + scripts/gcc-plugins/Kconfig | 12 + scripts/gcc-plugins/stackleak_plugin.c | 427 +++++++++++++++++++++++++++++++++ 4 files changed, 477 insertions(+) create mode 100644 scripts/gcc-plugins/stackleak_plugin.c (limited to 'kernel') diff --git a/kernel/stackleak.c b/kernel/stackleak.c index deba0d8992f9..628485db37ba 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -60,3 +60,31 @@ asmlinkage void stackleak_erase(void) current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; } +void __used stackleak_track_stack(void) +{ + /* + * N.B. stackleak_erase() fills the kernel stack with the poison value, + * which has the register width. That code assumes that the value + * of 'lowest_stack' is aligned on the register width boundary. + * + * That is true for x86 and x86_64 because of the kernel stack + * alignment on these platforms (for details, see 'cc_stack_align' in + * arch/x86/Makefile). Take care of that when you port STACKLEAK to + * new platforms. + */ + unsigned long sp = (unsigned long)&sp; + + /* + * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than + * STACKLEAK_SEARCH_DEPTH makes the poison search in + * stackleak_erase() unreliable. Let's prevent that. + */ + BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH); + + if (sp < current->lowest_stack && + sp >= (unsigned long)task_stack_page(current) + + sizeof(unsigned long)) { + current->lowest_stack = sp; + } +} +EXPORT_SYMBOL(stackleak_track_stack); diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 0a482f341576..46c5c6809806 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -26,6 +26,16 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT) \ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE) \ += -fplugin-arg-randomize_layout_plugin-performance-mode +gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak_plugin.so +gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ + += -DSTACKLEAK_PLUGIN +gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \ + += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE) +ifdef CONFIG_GCC_PLUGIN_STACKLEAK + DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable +endif +export DISABLE_STACKLEAK_PLUGIN + # All the plugin CFLAGS are collected here in case a build target needs to # filter them out of the KBUILD_CFLAGS. GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index 977b84e69787..c65fdd823591 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -158,4 +158,16 @@ config GCC_PLUGIN_STACKLEAK * https://grsecurity.net/ * https://pax.grsecurity.net/ +config STACKLEAK_TRACK_MIN_SIZE + int "Minimum stack frame size of functions tracked by STACKLEAK" + default 100 + range 0 4096 + depends on GCC_PLUGIN_STACKLEAK + help + The STACKLEAK gcc plugin instruments the kernel code for tracking + the lowest border of the kernel stack (and for some other purposes). + It inserts the stackleak_track_stack() call for the functions with + a stack frame size greater than or equal to this parameter. + If unsure, leave the default value 100. + endif diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c new file mode 100644 index 000000000000..2f48da98b5d4 --- /dev/null +++ b/scripts/gcc-plugins/stackleak_plugin.c @@ -0,0 +1,427 @@ +/* + * Copyright 2011-2017 by the PaX Team + * Modified by Alexander Popov + * Licensed under the GPL v2 + * + * Note: the choice of the license means that the compilation process is + * NOT 'eligible' as defined by gcc's library exception to the GPL v3, + * but for the kernel it doesn't matter since it doesn't link against + * any of the gcc libraries + * + * This gcc plugin is needed for tracking the lowest border of the kernel stack. + * It instruments the kernel code inserting stackleak_track_stack() calls: + * - after alloca(); + * - for the functions with a stack frame size greater than or equal + * to the "track-min-size" plugin parameter. + * + * This plugin is ported from grsecurity/PaX. For more information see: + * https://grsecurity.net/ + * https://pax.grsecurity.net/ + * + * Debugging: + * - use fprintf() to stderr, debug_generic_expr(), debug_gimple_stmt(), + * print_rtl() and print_simple_rtl(); + * - add "-fdump-tree-all -fdump-rtl-all" to the plugin CFLAGS in + * Makefile.gcc-plugins to see the verbose dumps of the gcc passes; + * - use gcc -E to understand the preprocessing shenanigans; + * - use gcc with enabled CFG/GIMPLE/SSA verification (--enable-checking). + */ + +#include "gcc-common.h" + +__visible int plugin_is_GPL_compatible; + +static int track_frame_size = -1; +static const char track_function[] = "stackleak_track_stack"; + +/* + * Mark these global variables (roots) for gcc garbage collector since + * they point to the garbage-collected memory. + */ +static GTY(()) tree track_function_decl; + +static struct plugin_info stackleak_plugin_info = { + .version = "201707101337", + .help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n" + "disable\t\tdo not activate the plugin\n" +}; + +static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after) +{ + gimple stmt; + gcall *stackleak_track_stack; + cgraph_node_ptr node; + int frequency; + basic_block bb; + + /* Insert call to void stackleak_track_stack(void) */ + stmt = gimple_build_call(track_function_decl, 0); + stackleak_track_stack = as_a_gcall(stmt); + if (after) { + gsi_insert_after(gsi, stackleak_track_stack, + GSI_CONTINUE_LINKING); + } else { + gsi_insert_before(gsi, stackleak_track_stack, GSI_SAME_STMT); + } + + /* Update the cgraph */ + bb = gimple_bb(stackleak_track_stack); + node = cgraph_get_create_node(track_function_decl); + gcc_assert(node); + frequency = compute_call_stmt_bb_frequency(current_function_decl, bb); + cgraph_create_edge(cgraph_get_node(current_function_decl), node, + stackleak_track_stack, bb->count, frequency); +} + +static bool is_alloca(gimple stmt) +{ + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) + return true; + +#if BUILDING_GCC_VERSION >= 4007 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) + return true; +#endif + + return false; +} + +/* + * Work with the GIMPLE representation of the code. Insert the + * stackleak_track_stack() call after alloca() and into the beginning + * of the function if it is not instrumented. + */ +static unsigned int stackleak_instrument_execute(void) +{ + basic_block bb, entry_bb; + bool prologue_instrumented = false, is_leaf = true; + gimple_stmt_iterator gsi; + + /* + * ENTRY_BLOCK_PTR is a basic block which represents possible entry + * point of a function. This block does not contain any code and + * has a CFG edge to its successor. + */ + gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun))); + entry_bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); + + /* + * Loop through the GIMPLE statements in each of cfun basic blocks. + * cfun is a global variable which represents the function that is + * currently processed. + */ + FOR_EACH_BB_FN(bb, cfun) { + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { + gimple stmt; + + stmt = gsi_stmt(gsi); + + /* Leaf function is a function which makes no calls */ + if (is_gimple_call(stmt)) + is_leaf = false; + + if (!is_alloca(stmt)) + continue; + + /* Insert stackleak_track_stack() call after alloca() */ + stackleak_add_track_stack(&gsi, true); + if (bb == entry_bb) + prologue_instrumented = true; + } + } + + if (prologue_instrumented) + return 0; + + /* + * Special cases to skip the instrumentation. + * + * Taking the address of static inline functions materializes them, + * but we mustn't instrument some of them as the resulting stack + * alignment required by the function call ABI will break other + * assumptions regarding the expected (but not otherwise enforced) + * register clobbering ABI. + * + * Case in point: native_save_fl on amd64 when optimized for size + * clobbers rdx if it were instrumented here. + * + * TODO: any more special cases? + */ + if (is_leaf && + !TREE_PUBLIC(current_function_decl) && + DECL_DECLARED_INLINE_P(current_function_decl)) { + return 0; + } + + if (is_leaf && + !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), + "_paravirt_", 10)) { + return 0; + } + + /* Insert stackleak_track_stack() call at the function beginning */ + bb = entry_bb; + if (!single_pred_p(bb)) { + /* gcc_assert(bb_loop_depth(bb) || + (bb->flags & BB_IRREDUCIBLE_LOOP)); */ + split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun))); + gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun))); + bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun)); + } + gsi = gsi_after_labels(bb); + stackleak_add_track_stack(&gsi, false); + + return 0; +} + +static bool large_stack_frame(void) +{ +#if BUILDING_GCC_VERSION >= 8000 + return maybe_ge(get_frame_size(), track_frame_size); +#else + return (get_frame_size() >= track_frame_size); +#endif +} + +/* + * Work with the RTL representation of the code. + * Remove the unneeded stackleak_track_stack() calls from the functions + * which don't call alloca() and don't have a large enough stack frame size. + */ +static unsigned int stackleak_cleanup_execute(void) +{ + rtx_insn *insn, *next; + + if (cfun->calls_alloca) + return 0; + + if (large_stack_frame()) + return 0; + + /* + * Find stackleak_track_stack() calls. Loop through the chain of insns, + * which is an RTL representation of the code for a function. + * + * The example of a matching insn: + * (call_insn 8 4 10 2 (call (mem (symbol_ref ("stackleak_track_stack") + * [flags 0x41] ) + * [0 stackleak_track_stack S1 A8]) (0)) 675 {*call} (expr_list + * (symbol_ref ("stackleak_track_stack") [flags 0x41] ) (expr_list (0) (nil))) (nil)) + */ + for (insn = get_insns(); insn; insn = next) { + rtx body; + + next = NEXT_INSN(insn); + + /* Check the expression code of the insn */ + if (!CALL_P(insn)) + continue; + + /* + * Check the expression code of the insn body, which is an RTL + * Expression (RTX) describing the side effect performed by + * that insn. + */ + body = PATTERN(insn); + + if (GET_CODE(body) == PARALLEL) + body = XVECEXP(body, 0, 0); + + if (GET_CODE(body) != CALL) + continue; + + /* + * Check the first operand of the call expression. It should + * be a mem RTX describing the needed subroutine with a + * symbol_ref RTX. + */ + body = XEXP(body, 0); + if (GET_CODE(body) != MEM) + continue; + + body = XEXP(body, 0); + if (GET_CODE(body) != SYMBOL_REF) + continue; + + if (SYMBOL_REF_DECL(body) != track_function_decl) + continue; + + /* Delete the stackleak_track_stack() call */ + delete_insn_and_edges(insn); +#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION < 8000 + if (GET_CODE(next) == NOTE && + NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) { + insn = next; + next = NEXT_INSN(insn); + delete_insn_and_edges(insn); + } +#endif + } + + return 0; +} + +static bool stackleak_gate(void) +{ + tree section; + + section = lookup_attribute("section", + DECL_ATTRIBUTES(current_function_decl)); + if (section && TREE_VALUE(section)) { + section = TREE_VALUE(TREE_VALUE(section)); + + if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10)) + return false; + if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13)) + return false; + if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13)) + return false; + if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13)) + return false; + } + + return track_frame_size >= 0; +} + +/* Build the function declaration for stackleak_track_stack() */ +static void stackleak_start_unit(void *gcc_data __unused, + void *user_data __unused) +{ + tree fntype; + + /* void stackleak_track_stack(void) */ + fntype = build_function_type_list(void_type_node, NULL_TREE); + track_function_decl = build_fn_decl(track_function, fntype); + DECL_ASSEMBLER_NAME(track_function_decl); /* for LTO */ + TREE_PUBLIC(track_function_decl) = 1; + TREE_USED(track_function_decl) = 1; + DECL_EXTERNAL(track_function_decl) = 1; + DECL_ARTIFICIAL(track_function_decl) = 1; + DECL_PRESERVE_P(track_function_decl) = 1; +} + +/* + * Pass gate function is a predicate function that gets executed before the + * corresponding pass. If the return value is 'true' the pass gets executed, + * otherwise, it is skipped. + */ +static bool stackleak_instrument_gate(void) +{ + return stackleak_gate(); +} + +#define PASS_NAME stackleak_instrument +#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg +#define TODO_FLAGS_START TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts +#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \ + | TODO_update_ssa | TODO_rebuild_cgraph_edges +#include "gcc-generate-gimple-pass.h" + +static bool stackleak_cleanup_gate(void) +{ + return stackleak_gate(); +} + +#define PASS_NAME stackleak_cleanup +#define TODO_FLAGS_FINISH TODO_dump_func +#include "gcc-generate-rtl-pass.h" + +/* + * Every gcc plugin exports a plugin_init() function that is called right + * after the plugin is loaded. This function is responsible for registering + * the plugin callbacks and doing other required initialization. + */ +__visible int plugin_init(struct plugin_name_args *plugin_info, + struct plugin_gcc_version *version) +{ + const char * const plugin_name = plugin_info->base_name; + const int argc = plugin_info->argc; + const struct plugin_argument * const argv = plugin_info->argv; + int i = 0; + + /* Extra GGC root tables describing our GTY-ed data */ + static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = { + { + .base = &track_function_decl, + .nelt = 1, + .stride = sizeof(track_function_decl), + .cb = >_ggc_mx_tree_node, + .pchw = >_pch_nx_tree_node + }, + LAST_GGC_ROOT_TAB + }; + + /* + * The stackleak_instrument pass should be executed before the + * "optimized" pass, which is the control flow graph cleanup that is + * performed just before expanding gcc trees to the RTL. In former + * versions of the plugin this new pass was inserted before the + * "tree_profile" pass, which is currently called "profile". + */ + PASS_INFO(stackleak_instrument, "optimized", 1, + PASS_POS_INSERT_BEFORE); + + /* + * The stackleak_cleanup pass should be executed after the + * "reload" pass, when the stack frame size is final. + */ + PASS_INFO(stackleak_cleanup, "reload", 1, PASS_POS_INSERT_AFTER); + + if (!plugin_default_version_check(version, &gcc_version)) { + error(G_("incompatible gcc/plugin versions")); + return 1; + } + + /* Parse the plugin arguments */ + for (i = 0; i < argc; i++) { + if (!strcmp(argv[i].key, "disable")) + return 0; + + if (!strcmp(argv[i].key, "track-min-size")) { + if (!argv[i].value) { + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), + plugin_name, argv[i].key); + return 1; + } + + track_frame_size = atoi(argv[i].value); + if (track_frame_size < 0) { + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), + plugin_name, argv[i].key, argv[i].value); + return 1; + } + } else { + error(G_("unknown option '-fplugin-arg-%s-%s'"), + plugin_name, argv[i].key); + return 1; + } + } + + /* Give the information about the plugin */ + register_callback(plugin_name, PLUGIN_INFO, NULL, + &stackleak_plugin_info); + + /* Register to be called before processing a translation unit */ + register_callback(plugin_name, PLUGIN_START_UNIT, + &stackleak_start_unit, NULL); + + /* Register an extra GCC garbage collector (GGC) root table */ + register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, + (void *)>_ggc_r_gt_stackleak); + + /* + * Hook into the Pass Manager to register new gcc passes. + * + * The stack frame size info is available only at the last RTL pass, + * when it's too late to insert complex code like a function call. + * So we register two gcc passes to instrument every function at first + * and remove the unneeded instrumentation later. + */ + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, + &stackleak_instrument_pass_info); + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, + &stackleak_cleanup_pass_info); + + return 0; +} -- cgit v1.2.3 From c8d126275a5fa59394fe17109bdb9812fed296b8 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Fri, 17 Aug 2018 01:17:01 +0300 Subject: fs/proc: Show STACKLEAK metrics in the /proc file system Introduce CONFIG_STACKLEAK_METRICS providing STACKLEAK information about tasks via the /proc file system. In particular, /proc//stack_depth shows the maximum kernel stack consumption for the current and previous syscalls. Although this information is not precise, it can be useful for estimating the STACKLEAK performance impact for your workloads. Suggested-by: Ingo Molnar Signed-off-by: Alexander Popov Tested-by: Laura Abbott Signed-off-by: Kees Cook --- fs/proc/base.c | 18 ++++++++++++++++++ include/linux/sched.h | 1 + include/linux/stackleak.h | 3 +++ kernel/stackleak.c | 4 ++++ scripts/gcc-plugins/Kconfig | 12 ++++++++++++ 5 files changed, 38 insertions(+) (limited to 'kernel') diff --git a/fs/proc/base.c b/fs/proc/base.c index ccf86f16d9f0..2a238d68610e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2891,6 +2891,21 @@ static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns, } #endif /* CONFIG_LIVEPATCH */ +#ifdef CONFIG_STACKLEAK_METRICS +static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + unsigned long prev_depth = THREAD_SIZE - + (task->prev_lowest_stack & (THREAD_SIZE - 1)); + unsigned long depth = THREAD_SIZE - + (task->lowest_stack & (THREAD_SIZE - 1)); + + seq_printf(m, "previous stack depth: %lu\nstack depth: %lu\n", + prev_depth, depth); + return 0; +} +#endif /* CONFIG_STACKLEAK_METRICS */ + /* * Thread groups */ @@ -2992,6 +3007,9 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_LIVEPATCH ONE("patch_state", S_IRUSR, proc_pid_patch_state), #endif +#ifdef CONFIG_STACKLEAK_METRICS + ONE("stack_depth", S_IRUGO, proc_stack_depth), +#endif }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/include/linux/sched.h b/include/linux/sched.h index c1a23acd24e7..ae9d10e14b82 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1194,6 +1194,7 @@ struct task_struct { #ifdef CONFIG_GCC_PLUGIN_STACKLEAK unsigned long lowest_stack; + unsigned long prev_lowest_stack; #endif /* diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h index 628c2b947b89..b911b973d328 100644 --- a/include/linux/stackleak.h +++ b/include/linux/stackleak.h @@ -18,6 +18,9 @@ static inline void stackleak_task_init(struct task_struct *t) { t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long); +# ifdef CONFIG_STACKLEAK_METRICS + t->prev_lowest_stack = t->lowest_stack; +# endif } #else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ static inline void stackleak_task_init(struct task_struct *t) { } diff --git a/kernel/stackleak.c b/kernel/stackleak.c index 628485db37ba..f66239572c89 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -41,6 +41,10 @@ asmlinkage void stackleak_erase(void) if (kstack_ptr == boundary) kstack_ptr += sizeof(unsigned long); +#ifdef CONFIG_STACKLEAK_METRICS + current->prev_lowest_stack = kstack_ptr; +#endif + /* * Now write the poison value to the kernel stack. Start from * 'kstack_ptr' and move up till the new 'boundary'. We assume that diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index c65fdd823591..b0a015ef5268 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -170,4 +170,16 @@ config STACKLEAK_TRACK_MIN_SIZE a stack frame size greater than or equal to this parameter. If unsure, leave the default value 100. +config STACKLEAK_METRICS + bool "Show STACKLEAK metrics in the /proc file system" + depends on GCC_PLUGIN_STACKLEAK + depends on PROC_FS + help + If this is set, STACKLEAK metrics for every task are available in + the /proc file system. In particular, /proc//stack_depth + shows the maximum kernel stack consumption for the current and + previous syscalls. Although this information is not precise, it + can be useful for estimating the STACKLEAK performance impact for + your workloads. + endif -- cgit v1.2.3 From 964c9dff0091893a9a74a88edf984c6da0b779f7 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Fri, 17 Aug 2018 01:17:03 +0300 Subject: stackleak: Allow runtime disabling of kernel stack erasing Introduce CONFIG_STACKLEAK_RUNTIME_DISABLE option, which provides 'stack_erasing' sysctl. It can be used in runtime to control kernel stack erasing for kernels built with CONFIG_GCC_PLUGIN_STACKLEAK. Suggested-by: Ingo Molnar Signed-off-by: Alexander Popov Tested-by: Laura Abbott Signed-off-by: Kees Cook --- Documentation/sysctl/kernel.txt | 18 ++++++++++++++++++ include/linux/stackleak.h | 6 ++++++ kernel/stackleak.c | 38 ++++++++++++++++++++++++++++++++++++++ kernel/sysctl.c | 15 ++++++++++++++- scripts/gcc-plugins/Kconfig | 8 ++++++++ 5 files changed, 84 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 37a679501ddc..1b8775298cf7 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -89,6 +89,7 @@ show up in /proc/sys/kernel: - shmmni - softlockup_all_cpu_backtrace - soft_watchdog +- stack_erasing - stop-a [ SPARC only ] - sysrq ==> Documentation/admin-guide/sysrq.rst - sysctl_writes_strict @@ -987,6 +988,23 @@ detect a hard lockup condition. ============================================================== +stack_erasing + +This parameter can be used to control kernel stack erasing at the end +of syscalls for kernels built with CONFIG_GCC_PLUGIN_STACKLEAK. + +That erasing reduces the information which kernel stack leak bugs +can reveal and blocks some uninitialized stack variable attacks. +The tradeoff is the performance impact: on a single CPU system kernel +compilation sees a 1% slowdown, other systems and workloads may vary. + + 0: kernel stack erasing is disabled, STACKLEAK_METRICS are not updated. + + 1: kernel stack erasing is enabled (default), it is performed before + returning to the userspace at the end of syscalls. + +============================================================== + tainted: Non-zero if the kernel has been tainted. Numeric values, which can be diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h index b911b973d328..3d5c3271a9a8 100644 --- a/include/linux/stackleak.h +++ b/include/linux/stackleak.h @@ -22,6 +22,12 @@ static inline void stackleak_task_init(struct task_struct *t) t->prev_lowest_stack = t->lowest_stack; # endif } + +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE +int stack_erasing_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +#endif + #else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ static inline void stackleak_task_init(struct task_struct *t) { } #endif diff --git a/kernel/stackleak.c b/kernel/stackleak.c index f66239572c89..e42892926244 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -12,6 +12,41 @@ #include +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE +#include +#include + +static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass); + +int stack_erasing_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret = 0; + int state = !static_branch_unlikely(&stack_erasing_bypass); + int prev_state = state; + + table->data = &state; + table->maxlen = sizeof(int); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + state = !!state; + if (ret || !write || state == prev_state) + return ret; + + if (state) + static_branch_disable(&stack_erasing_bypass); + else + static_branch_enable(&stack_erasing_bypass); + + pr_warn("stackleak: kernel stack erasing is %s\n", + state ? "enabled" : "disabled"); + return ret; +} + +#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass) +#else +#define skip_erasing() false +#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */ + asmlinkage void stackleak_erase(void) { /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */ @@ -20,6 +55,9 @@ asmlinkage void stackleak_erase(void) unsigned int poison_count = 0; const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); + if (skip_erasing()) + return; + /* Check that 'lowest_stack' value is sane */ if (unlikely(kstack_ptr - boundary >= THREAD_SIZE)) kstack_ptr = boundary; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index cc02050fd0c4..3ae223f7b5df 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -91,7 +91,9 @@ #ifdef CONFIG_CHR_DEV_SG #include #endif - +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE +#include +#endif #ifdef CONFIG_LOCKUP_DETECTOR #include #endif @@ -1232,6 +1234,17 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, +#endif +#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE + { + .procname = "stack_erasing", + .data = NULL, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = stack_erasing_sysctl, + .extra1 = &zero, + .extra2 = &one, + }, #endif { } }; diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig index b0a015ef5268..0d5c799688f0 100644 --- a/scripts/gcc-plugins/Kconfig +++ b/scripts/gcc-plugins/Kconfig @@ -182,4 +182,12 @@ config STACKLEAK_METRICS can be useful for estimating the STACKLEAK performance impact for your workloads. +config STACKLEAK_RUNTIME_DISABLE + bool "Allow runtime disabling of kernel stack erasing" + depends on GCC_PLUGIN_STACKLEAK + help + This option provides 'stack_erasing' sysctl, which can be used in + runtime to control kernel stack erasing for kernels built with + CONFIG_GCC_PLUGIN_STACKLEAK. + endif -- cgit v1.2.3 From 1ed0cc5a01a4d868d9907ce96468c4b4c6709556 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Tue, 4 Sep 2018 15:45:41 -0700 Subject: mm: respect arch_dup_mmap() return value Commit d70f2a14b72a ("include/linux/sched/mm.h: uninline mmdrop_async(), etc") ignored the return value of arch_dup_mmap(). As a result, on x86, a failure to duplicate the LDT (e.g. due to memory allocation error) would leave the duplicated memory mapping in an inconsistent state. Fix by using the return value, as it was before the change. Link: http://lkml.kernel.org/r/20180823051229.211856-1-namit@vmware.com Fixes: d70f2a14b72a4 ("include/linux/sched/mm.h: uninline mmdrop_async(), etc") Signed-off-by: Nadav Amit Acked-by: Michal Hocko Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index d896e9ca38b0..f0b58479534f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto out; } /* a new mm has just been created */ - arch_dup_mmap(oldmm, mm); - retval = 0; + retval = arch_dup_mmap(oldmm, mm); out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); -- cgit v1.2.3 From a9c676bc8fc58d00eea9836fb14ee43c0346416a Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 4 Sep 2018 19:13:44 -0700 Subject: bpf/verifier: fix verifier instability Edward Cree says: In check_mem_access(), for the PTR_TO_CTX case, after check_ctx_access() has supplied a reg_type, the other members of the register state are set appropriately. Previously reg.range was set to 0, but as it is in a union with reg.map_ptr, which is larger, upper bytes of the latter were left in place. This then caused the memcmp() in regsafe() to fail, preventing some branches from being pruned (and occasionally causing the same program to take a varying number of processed insns on repeated verifier runs). Fix the instability by clearing bpf_reg_state in __mark_reg_[un]known() Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Debugged-by: Edward Cree Acked-by: Edward Cree Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f4ff0c569e54..6ff1bac1795d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -570,7 +570,9 @@ static void __mark_reg_not_init(struct bpf_reg_state *reg); */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { - reg->id = 0; + /* Clear id, off, and union(map_ptr, range) */ + memset(((u8 *)reg) + sizeof(reg->type), 0, + offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; @@ -589,7 +591,6 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg) static void __mark_reg_const_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); - reg->off = 0; reg->type = SCALAR_VALUE; } @@ -700,9 +701,12 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg) /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { + /* + * Clear type, id, off, and union(map_ptr, range) and + * padding between 'type' and union + */ + memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); reg->type = SCALAR_VALUE; - reg->id = 0; - reg->off = 0; reg->var_off = tnum_unknown; reg->frameno = 0; __mark_reg_unbounded(reg); @@ -1640,9 +1644,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn else mark_reg_known_zero(env, regs, value_regno); - regs[value_regno].id = 0; - regs[value_regno].off = 0; - regs[value_regno].range = 0; regs[value_regno].type = reg_type; } @@ -2495,7 +2496,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() -- cgit v1.2.3 From f8b7530aa0a1def79c93101216b5b17cf408a70a Mon Sep 17 00:00:00 2001 From: Neeraj Upadhyay Date: Wed, 5 Sep 2018 11:22:07 +0530 Subject: cpu/hotplug: Adjust misplaced smb() in cpuhp_thread_fun() The smp_mb() in cpuhp_thread_fun() is misplaced. It needs to be after the load of st->should_run to prevent reordering of the later load/stores w.r.t. the load of st->should_run. Fixes: 4dddfb5faa61 ("smp/hotplug: Rewrite AP state machine core") Signed-off-by: Neeraj Upadhyay Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: josh@joshtriplett.org Cc: peterz@infradead.org Cc: jiangshanlai@gmail.com Cc: dzickus@redhat.com Cc: brendan.jackman@arm.com Cc: malat@debian.org Cc: mojha@codeaurora.org Cc: sramana@codeaurora.org Cc: linux-arm-msm@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/1536126727-11629-1-git-send-email-neeraju@codeaurora.org --- kernel/cpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index aa7fe85ad62e..eb4041f78073 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -607,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu) bool bringup = st->bringup; enum cpuhp_state state; + if (WARN_ON_ONCE(!st->should_run)) + return; + /* * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures * that if we see ->should_run we also see the rest of the state. */ smp_mb(); - if (WARN_ON_ONCE(!st->should_run)) - return; - cpuhp_lock_acquire(bringup); if (st->single) { -- cgit v1.2.3 From 69fa6eb7d6a64801ea261025cce9723d9442d773 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 6 Sep 2018 15:21:38 +0200 Subject: cpu/hotplug: Prevent state corruption on error rollback When a teardown callback fails, the CPU hotplug code brings the CPU back to the previous state. The previous state becomes the new target state. The rollback happens in undo_cpu_down() which increments the state unconditionally even if the state is already the same as the target. As a consequence the next CPU hotplug operation will start at the wrong state. This is easily to observe when __cpu_disable() fails. Prevent the unconditional undo by checking the state vs. target before incrementing state and fix up the consequently wrong conditional in the unplug code which handles the failure of the final CPU take down on the control CPU side. Fixes: 4dddfb5faa61 ("smp/hotplug: Rewrite AP state machine core") Reported-by: Neeraj Upadhyay Signed-off-by: Thomas Gleixner Tested-by: Geert Uytterhoeven Tested-by: Sudeep Holla Tested-by: Neeraj Upadhyay Cc: josh@joshtriplett.org Cc: peterz@infradead.org Cc: jiangshanlai@gmail.com Cc: dzickus@redhat.com Cc: brendan.jackman@arm.com Cc: malat@debian.org Cc: sramana@codeaurora.org Cc: linux-arm-msm@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1809051419580.1416@nanos.tec.linutronix.de ---- --- kernel/cpu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index eb4041f78073..0097acec1c71 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -916,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); if (ret) { st->target = prev_state; - undo_cpu_down(cpu, st); + if (st->state < prev_state) + undo_cpu_down(cpu, st); break; } } @@ -969,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, * to do the further cleanups. */ ret = cpuhp_down_callbacks(cpu, st, target); - if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { + if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { cpuhp_reset_state(st, prev_state); __cpuhp_kick_ap(st); } -- cgit v1.2.3 From d1c392c9e2a301f38998a353f467f76414e38725 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 5 Sep 2018 16:29:49 -0400 Subject: printk/tracing: Do not trace printk_nmi_enter() I hit the following splat in my tests: ------------[ cut here ]------------ IRQs not enabled as expected WARNING: CPU: 3 PID: 0 at kernel/time/tick-sched.c:982 tick_nohz_idle_enter+0x44/0x8c Modules linked in: ip6t_REJECT nf_reject_ipv6 ip6table_filter ip6_tables ipv6 CPU: 3 PID: 0 Comm: swapper/3 Not tainted 4.19.0-rc2-test+ #2 Hardware name: MSI MS-7823/CSM-H87M-G43 (MS-7823), BIOS V1.6 02/22/2014 EIP: tick_nohz_idle_enter+0x44/0x8c Code: ec 05 00 00 00 75 26 83 b8 c0 05 00 00 00 75 1d 80 3d d0 36 3e c1 00 75 14 68 94 63 12 c1 c6 05 d0 36 3e c1 01 e8 04 ee f8 ff <0f> 0b 58 fa bb a0 e5 66 c1 e8 25 0f 04 00 64 03 1d 28 31 52 c1 8b EAX: 0000001c EBX: f26e7f8c ECX: 00000006 EDX: 00000007 ESI: f26dd1c0 EDI: 00000000 EBP: f26e7f40 ESP: f26e7f38 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010296 CR0: 80050033 CR2: 0813c6b0 CR3: 2f342000 CR4: 001406f0 Call Trace: do_idle+0x33/0x202 cpu_startup_entry+0x61/0x63 start_secondary+0x18e/0x1ed startup_32_smp+0x164/0x168 irq event stamp: 18773830 hardirqs last enabled at (18773829): [] trace_hardirqs_on_thunk+0xc/0x10 hardirqs last disabled at (18773830): [] trace_hardirqs_off_thunk+0xc/0x10 softirqs last enabled at (18773824): [] __do_softirq+0x25f/0x2bf softirqs last disabled at (18773767): [] call_on_stack+0x45/0x4b ---[ end trace b7c64aa79e17954a ]--- After a bit of debugging, I found what was happening. This would trigger when performing "perf" with a high NMI interrupt rate, while enabling and disabling function tracer. Ftrace uses breakpoints to convert the nops at the start of functions to calls to the function trampolines. The breakpoint traps disable interrupts and this makes calls into lockdep via the trace_hardirqs_off_thunk in the entry.S code. What happens is the following: do_idle { [interrupts enabled] [interrupts disabled] TRACE_IRQS_OFF [lockdep says irqs off] [...] TRACE_IRQS_IRET test if pt_regs say return to interrupts enabled [yes] TRACE_IRQS_ON [lockdep says irqs are on] nmi_enter() { printk_nmi_enter() [traced by ftrace] [ hit ftrace breakpoint ] TRACE_IRQS_OFF [lockdep says irqs off] [...] TRACE_IRQS_IRET [return from breakpoint] test if pt_regs say interrupts enabled [no] [iret back to interrupt] [iret back to code] tick_nohz_idle_enter() { lockdep_assert_irqs_enabled() [lockdep say no!] Although interrupts are indeed enabled, lockdep thinks it is not, and since we now do asserts via lockdep, it gives a false warning. The issue here is that printk_nmi_enter() is called before lockdep_off(), which disables lockdep (for this reason) in NMIs. By simply not allowing ftrace to see printk_nmi_enter() (via notrace annotation) we keep lockdep from getting confused. Cc: stable@vger.kernel.org Fixes: 42a0bb3f71383 ("printk/nmi: generic solution for safe printk in NMI") Acked-by: Sergey Senozhatsky Acked-by: Petr Mladek Signed-off-by: Steven Rostedt (VMware) --- kernel/printk/printk_safe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index a0a74c533e4b..0913b4d385de 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) return printk_safe_log_store(s, fmt, args); } -void printk_nmi_enter(void) +void notrace printk_nmi_enter(void) { this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); } -void printk_nmi_exit(void) +void notrace printk_nmi_exit(void) { this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); } -- cgit v1.2.3 From e2c631ba75a7e727e8db0a9d30a06bfd434adb3a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 5 Sep 2018 10:41:58 +0200 Subject: clocksource: Revert "Remove kthread" I turns out that the silly spawn kthread from worker was actually needed. clocksource_watchdog_kthread() cannot be called directly from clocksource_watchdog_work(), because clocksource_select() calls timekeeping_notify() which uses stop_machine(). One cannot use stop_machine() from a workqueue() due lock inversions wrt CPU hotplug. Revert the patch but add a comment that explain why we jump through such apparently silly hoops. Fixes: 7197e77abcb6 ("clocksource: Remove kthread") Reported-by: Siegfried Metz Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Tested-by: Niklas Cassel Tested-by: Kevin Shanahan Tested-by: viktor_jaegerskuepper@freenet.de Tested-by: Siegfried Metz Cc: rafael.j.wysocki@intel.com Cc: len.brown@intel.com Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Cc: bjorn.andersson@linaro.org Link: https://lkml.kernel.org/r/20180905084158.GR24124@hirez.programming.kicks-ass.net --- kernel/time/clocksource.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f74fb00d8064..0e6e97a01942 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags) spin_unlock_irqrestore(&watchdog_lock, *flags); } +static int clocksource_watchdog_kthread(void *data); +static void __clocksource_change_rating(struct clocksource *cs, int rating); + /* * Interval: 0.5sec Threshold: 0.0625s */ #define WATCHDOG_INTERVAL (HZ >> 1) #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) +static void clocksource_watchdog_work(struct work_struct *work) +{ + /* + * We cannot directly run clocksource_watchdog_kthread() here, because + * clocksource_select() calls timekeeping_notify() which uses + * stop_machine(). One cannot use stop_machine() from a workqueue() due + * lock inversions wrt CPU hotplug. + * + * Also, we only ever run this work once or twice during the lifetime + * of the kernel, so there is no point in creating a more permanent + * kthread for this. + * + * If kthread_run fails the next watchdog scan over the + * watchdog_list will find the unstable clock again. + */ + kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); +} + static void __clocksource_unstable(struct clocksource *cs) { cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; /* - * If the clocksource is registered clocksource_watchdog_work() will + * If the clocksource is registered clocksource_watchdog_kthread() will * re-rate and re-select. */ if (list_empty(&cs->list)) { @@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs) if (cs->mark_unstable) cs->mark_unstable(cs); - /* kick clocksource_watchdog_work() */ + /* kick clocksource_watchdog_kthread() */ if (finished_booting) schedule_work(&watchdog_work); } @@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs) * @cs: clocksource to be marked unstable * * This function is called by the x86 TSC code to mark clocksources as unstable; - * it defers demotion and re-selection to a work. + * it defers demotion and re-selection to a kthread. */ void clocksource_mark_unstable(struct clocksource *cs) { @@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) } } -static void __clocksource_change_rating(struct clocksource *cs, int rating); - -static int __clocksource_watchdog_work(void) +static int __clocksource_watchdog_kthread(void) { struct clocksource *cs, *tmp; unsigned long flags; @@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void) return select; } -static void clocksource_watchdog_work(struct work_struct *work) +static int clocksource_watchdog_kthread(void *data) { mutex_lock(&clocksource_mutex); - if (__clocksource_watchdog_work()) + if (__clocksource_watchdog_kthread()) clocksource_select(); mutex_unlock(&clocksource_mutex); + return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) @@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) static void clocksource_select_watchdog(bool fallback) { } static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_resume_watchdog(void) { } -static inline int __clocksource_watchdog_work(void) { return 0; } +static inline int __clocksource_watchdog_kthread(void) { return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } void clocksource_mark_unstable(struct clocksource *cs) { } @@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void) /* * Run the watchdog first to eliminate unstable clock sources */ - __clocksource_watchdog_work(); + __clocksource_watchdog_kthread(); clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; -- cgit v1.2.3 From ccf640f4c9988653ef884672381b03b9be247bec Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Aug 2018 09:40:24 +0200 Subject: dma-mapping: remove dma_configure There is no good reason for this indirection given that the method always exists. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- drivers/base/dd.c | 8 +++++--- include/linux/dma-mapping.h | 6 ------ kernel/dma/mapping.c | 10 ---------- 3 files changed, 5 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/drivers/base/dd.c b/drivers/base/dd.c index edfc9f0b1180..65128cf8427c 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -480,9 +480,11 @@ re_probe: if (ret) goto pinctrl_bind_failed; - ret = dma_configure(dev); - if (ret) - goto dma_failed; + if (dev->bus->dma_configure) { + ret = dev->bus->dma_configure(dev); + if (ret) + goto dma_failed; + } if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1db6a6b46d0d..1c6c7c09bcf2 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -754,14 +754,8 @@ dma_mark_declared_memory_occupied(struct device *dev, #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ #ifdef CONFIG_HAS_DMA -int dma_configure(struct device *dev); void dma_deconfigure(struct device *dev); #else -static inline int dma_configure(struct device *dev) -{ - return 0; -} - static inline void dma_deconfigure(struct device *dev) {} #endif diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index d2a92ddaac4d..25607ceb4a50 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -328,16 +328,6 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) } #endif -/* - * enables DMA API use for a device - */ -int dma_configure(struct device *dev) -{ - if (dev->bus->dma_configure) - return dev->bus->dma_configure(dev); - return 0; -} - void dma_deconfigure(struct device *dev) { of_dma_deconfigure(dev); -- cgit v1.2.3 From dc3c05504d38849f77149cb962caeaedd1efa127 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Aug 2018 10:28:18 +0200 Subject: dma-mapping: remove dma_deconfigure This goes through a lot of hooks just to call arch_teardown_dma_ops. Replace it with a direct call instead. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- drivers/acpi/arm64/iort.c | 2 +- drivers/acpi/scan.c | 10 ---------- drivers/base/dd.c | 4 ++-- drivers/of/device.c | 12 ------------ include/acpi/acpi_bus.h | 1 - include/linux/acpi.h | 2 -- include/linux/dma-mapping.h | 6 ------ include/linux/of_device.h | 3 --- kernel/dma/mapping.c | 6 ------ 9 files changed, 3 insertions(+), 43 deletions(-) (limited to 'kernel') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 08f26db2da7e..2a361e22d38d 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1428,7 +1428,7 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, return 0; dma_deconfigure: - acpi_dma_deconfigure(&pdev->dev); + arch_teardown_dma_ops(&pdev->dev); dev_put: platform_device_put(pdev); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e1b6231cfa1c..56676a56b3e3 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1469,16 +1469,6 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) } EXPORT_SYMBOL_GPL(acpi_dma_configure); -/** - * acpi_dma_deconfigure - Tear-down DMA configuration for the device. - * @dev: The pointer to the device - */ -void acpi_dma_deconfigure(struct device *dev) -{ - arch_teardown_dma_ops(dev); -} -EXPORT_SYMBOL_GPL(acpi_dma_deconfigure); - static void acpi_init_coherency(struct acpi_device *adev) { unsigned long long cca = 0; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 65128cf8427c..169412ee4ae8 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -539,7 +539,7 @@ re_probe: goto done; probe_failed: - dma_deconfigure(dev); + arch_teardown_dma_ops(dev); dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, @@ -968,7 +968,7 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); - dma_deconfigure(dev); + arch_teardown_dma_ops(dev); devres_release_all(dev); dev->driver = NULL; diff --git a/drivers/of/device.c b/drivers/of/device.c index 5957cd4fa262..c7fa5a9697c9 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -170,18 +170,6 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) } EXPORT_SYMBOL_GPL(of_dma_configure); -/** - * of_dma_deconfigure - Clean up DMA configuration - * @dev: Device for which to clean up DMA configuration - * - * Clean up all configuration performed by of_dma_configure_ops() and free all - * resources that have been allocated. - */ -void of_dma_deconfigure(struct device *dev) -{ - arch_teardown_dma_ops(dev); -} - int of_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ba4dd54f2c82..53600f527a70 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -595,7 +595,6 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, u64 *size); int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); -void acpi_dma_deconfigure(struct device *dev); struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index de8d3d3fa651..af4628979d13 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -831,8 +831,6 @@ static inline int acpi_dma_configure(struct device *dev, return 0; } -static inline void acpi_dma_deconfigure(struct device *dev) { } - #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1c6c7c09bcf2..1423b69f3cc9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -753,12 +753,6 @@ dma_mark_declared_memory_occupied(struct device *dev, } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ -#ifdef CONFIG_HAS_DMA -void dma_deconfigure(struct device *dev); -#else -static inline void dma_deconfigure(struct device *dev) {} -#endif - /* * Managed DMA API */ diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 165fd302b442..8d31e39dd564 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -58,7 +58,6 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma); -void of_dma_deconfigure(struct device *dev); #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -113,8 +112,6 @@ static inline int of_dma_configure(struct device *dev, { return 0; } -static inline void of_dma_deconfigure(struct device *dev) -{} #endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 25607ceb4a50..3540cb399bd2 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -327,9 +327,3 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) vunmap(cpu_addr); } #endif - -void dma_deconfigure(struct device *dev) -{ - of_dma_deconfigure(dev); - acpi_dma_deconfigure(dev); -} -- cgit v1.2.3 From 34e12b864e617ca7e4f6a3108460aa143bf20056 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 9 Sep 2018 13:42:52 +0200 Subject: jump_label: Use static_key_linked() accessor ... instead of open-coding it, in static_key_mod(). No functional changes. Signed-off-by: Borislav Petkov Cc: Andrew Morton Cc: Jason Baron Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Steven Rostedt (VMware) Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180909114252.17575-1-bp@alien8.de Signed-off-by: Ingo Molnar --- kernel/jump_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 01ebdf1f9f40..61d1c0178731 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -455,7 +455,7 @@ struct static_key_mod { static inline struct static_key_mod *static_key_mod(struct static_key *key) { - WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED)); + WARN_ON_ONCE(!static_key_linked(key)); return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); } -- cgit v1.2.3 From e13e2366d8415e029fe96a62502955083e272cef Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 3 Sep 2018 16:07:08 +0200 Subject: locking/mutex: Fix mutex debug call and ww_mutex documentation The following commit: 08295b3b5bee ("Implement an algorithm choice for Wound-Wait mutexes") introduced a reference in the documentation to a function that was removed in an earlier commit. It also forgot to remove a call to debug_mutex_add_waiter() which is now unconditionally called by __mutex_add_waiter(). Fix those bugs. Signed-off-by: Thomas Hellstrom Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dri-devel@lists.freedesktop.org Fixes: 08295b3b5bee ("Implement an algorithm choice for Wound-Wait mutexes") Link: http://lkml.kernel.org/r/20180903140708.2401-1-thellstrom@vmware.com Signed-off-by: Ingo Molnar --- kernel/locking/mutex.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 1a81a1257b3f..3f8a35104285 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock, /* * wake_up_process() paired with set_current_state() * inserts sufficient barriers to make sure @owner either sees - * it's wounded in __ww_mutex_lock_check_stamp() or has a + * it's wounded in __ww_mutex_check_kill() or has a * wakeup pending to re-read the wounded state. */ if (owner != current) @@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, } debug_mutex_lock_common(lock, &waiter); - debug_mutex_add_waiter(lock, &waiter, current); lock_contended(&lock->dep_map, ip); -- cgit v1.2.3 From e73e81975f2447e6f556100cada64a18ec631cbb Mon Sep 17 00:00:00 2001 From: Jiada Wang Date: Tue, 31 Jul 2018 21:12:22 +0900 Subject: sched/debug: Fix potential deadlock when writing to sched_features The following lockdep report can be triggered by writing to /sys/kernel/debug/sched_features: ====================================================== WARNING: possible circular locking dependency detected 4.18.0-rc6-00152-gcd3f77d74ac3-dirty #18 Not tainted ------------------------------------------------------ sh/3358 is trying to acquire lock: 000000004ad3989d (cpu_hotplug_lock.rw_sem){++++}, at: static_key_enable+0x14/0x30 but task is already holding lock: 00000000c1b31a88 (&sb->s_type->i_mutex_key#3){+.+.}, at: sched_feat_write+0x160/0x428 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (&sb->s_type->i_mutex_key#3){+.+.}: lock_acquire+0xb8/0x148 down_write+0xac/0x140 start_creating+0x5c/0x168 debugfs_create_dir+0x18/0x220 opp_debug_register+0x8c/0x120 _add_opp_dev+0x104/0x1f8 dev_pm_opp_get_opp_table+0x174/0x340 _of_add_opp_table_v2+0x110/0x760 dev_pm_opp_of_add_table+0x5c/0x240 dev_pm_opp_of_cpumask_add_table+0x5c/0x100 cpufreq_init+0x160/0x430 cpufreq_online+0x1cc/0xe30 cpufreq_add_dev+0x78/0x198 subsys_interface_register+0x168/0x270 cpufreq_register_driver+0x1c8/0x278 dt_cpufreq_probe+0xdc/0x1b8 platform_drv_probe+0xb4/0x168 driver_probe_device+0x318/0x4b0 __device_attach_driver+0xfc/0x1f0 bus_for_each_drv+0xf8/0x180 __device_attach+0x164/0x200 device_initial_probe+0x10/0x18 bus_probe_device+0x110/0x178 device_add+0x6d8/0x908 platform_device_add+0x138/0x3d8 platform_device_register_full+0x1cc/0x1f8 cpufreq_dt_platdev_init+0x174/0x1bc do_one_initcall+0xb8/0x310 kernel_init_freeable+0x4b8/0x56c kernel_init+0x10/0x138 ret_from_fork+0x10/0x18 -> #2 (opp_table_lock){+.+.}: lock_acquire+0xb8/0x148 __mutex_lock+0x104/0xf50 mutex_lock_nested+0x1c/0x28 _of_add_opp_table_v2+0xb4/0x760 dev_pm_opp_of_add_table+0x5c/0x240 dev_pm_opp_of_cpumask_add_table+0x5c/0x100 cpufreq_init+0x160/0x430 cpufreq_online+0x1cc/0xe30 cpufreq_add_dev+0x78/0x198 subsys_interface_register+0x168/0x270 cpufreq_register_driver+0x1c8/0x278 dt_cpufreq_probe+0xdc/0x1b8 platform_drv_probe+0xb4/0x168 driver_probe_device+0x318/0x4b0 __device_attach_driver+0xfc/0x1f0 bus_for_each_drv+0xf8/0x180 __device_attach+0x164/0x200 device_initial_probe+0x10/0x18 bus_probe_device+0x110/0x178 device_add+0x6d8/0x908 platform_device_add+0x138/0x3d8 platform_device_register_full+0x1cc/0x1f8 cpufreq_dt_platdev_init+0x174/0x1bc do_one_initcall+0xb8/0x310 kernel_init_freeable+0x4b8/0x56c kernel_init+0x10/0x138 ret_from_fork+0x10/0x18 -> #1 (subsys mutex#6){+.+.}: lock_acquire+0xb8/0x148 __mutex_lock+0x104/0xf50 mutex_lock_nested+0x1c/0x28 subsys_interface_register+0xd8/0x270 cpufreq_register_driver+0x1c8/0x278 dt_cpufreq_probe+0xdc/0x1b8 platform_drv_probe+0xb4/0x168 driver_probe_device+0x318/0x4b0 __device_attach_driver+0xfc/0x1f0 bus_for_each_drv+0xf8/0x180 __device_attach+0x164/0x200 device_initial_probe+0x10/0x18 bus_probe_device+0x110/0x178 device_add+0x6d8/0x908 platform_device_add+0x138/0x3d8 platform_device_register_full+0x1cc/0x1f8 cpufreq_dt_platdev_init+0x174/0x1bc do_one_initcall+0xb8/0x310 kernel_init_freeable+0x4b8/0x56c kernel_init+0x10/0x138 ret_from_fork+0x10/0x18 -> #0 (cpu_hotplug_lock.rw_sem){++++}: __lock_acquire+0x203c/0x21d0 lock_acquire+0xb8/0x148 cpus_read_lock+0x58/0x1c8 static_key_enable+0x14/0x30 sched_feat_write+0x314/0x428 full_proxy_write+0xa0/0x138 __vfs_write+0xd8/0x388 vfs_write+0xdc/0x318 ksys_write+0xb4/0x138 sys_write+0xc/0x18 __sys_trace_return+0x0/0x4 other info that might help us debug this: Chain exists of: cpu_hotplug_lock.rw_sem --> opp_table_lock --> &sb->s_type->i_mutex_key#3 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#3); lock(opp_table_lock); lock(&sb->s_type->i_mutex_key#3); lock(cpu_hotplug_lock.rw_sem); *** DEADLOCK *** 2 locks held by sh/3358: #0: 00000000a8c4b363 (sb_writers#10){.+.+}, at: vfs_write+0x238/0x318 #1: 00000000c1b31a88 (&sb->s_type->i_mutex_key#3){+.+.}, at: sched_feat_write+0x160/0x428 stack backtrace: CPU: 5 PID: 3358 Comm: sh Not tainted 4.18.0-rc6-00152-gcd3f77d74ac3-dirty #18 Hardware name: Renesas H3ULCB Kingfisher board based on r8a7795 ES2.0+ (DT) Call trace: dump_backtrace+0x0/0x288 show_stack+0x14/0x20 dump_stack+0x13c/0x1ac print_circular_bug.isra.10+0x270/0x438 check_prev_add.constprop.16+0x4dc/0xb98 __lock_acquire+0x203c/0x21d0 lock_acquire+0xb8/0x148 cpus_read_lock+0x58/0x1c8 static_key_enable+0x14/0x30 sched_feat_write+0x314/0x428 full_proxy_write+0xa0/0x138 __vfs_write+0xd8/0x388 vfs_write+0xdc/0x318 ksys_write+0xb4/0x138 sys_write+0xc/0x18 __sys_trace_return+0x0/0x4 This is because when loading the cpufreq_dt module we first acquire cpu_hotplug_lock.rw_sem lock, then in cpufreq_init(), we are taking the &sb->s_type->i_mutex_key lock. But when writing to /sys/kernel/debug/sched_features, the cpu_hotplug_lock.rw_sem lock depends on the &sb->s_type->i_mutex_key lock. To fix this bug, reverse the lock acquisition order when writing to sched_features, this way cpu_hotplug_lock.rw_sem no longer depends on &sb->s_type->i_mutex_key. Tested-by: Dietmar Eggemann Signed-off-by: Jiada Wang Signed-off-by: Peter Zijlstra (Intel) Cc: Eugeniu Rosca Cc: George G. Davis Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180731121222.26195-1-jiada_wang@mentor.com Signed-off-by: Ingo Molnar --- kernel/sched/debug.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 60caf1fb94e0..6383aa6a60ca 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { static void sched_feat_disable(int i) { - static_key_disable(&sched_feat_keys[i]); + static_key_disable_cpuslocked(&sched_feat_keys[i]); } static void sched_feat_enable(int i) { - static_key_enable(&sched_feat_keys[i]); + static_key_enable_cpuslocked(&sched_feat_keys[i]); } #else static void sched_feat_disable(int i) { }; @@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf, /* Ensure the static_key remains in a consistent state */ inode = file_inode(filp); + cpus_read_lock(); inode_lock(inode); ret = sched_feat_set(cmp); inode_unlock(inode); + cpus_read_unlock(); if (ret < 0) return ret; -- cgit v1.2.3 From e5e96fafd9028b1478b165db78c52d981c14f471 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 10 Aug 2018 22:30:18 +0530 Subject: sched/topology: Set correct NUMA topology type With the following commit: 051f3ca02e46 ("sched/topology: Introduce NUMA identity node sched domain") the scheduler introduced a new NUMA level. However this leads to the NUMA topology on 2 node systems to not be marked as NUMA_DIRECT anymore. After this commit, it gets reported as NUMA_BACKPLANE, because sched_domains_numa_level is now 2 on 2 node systems. Fix this by allowing setting systems that have up to 2 NUMA levels as NUMA_DIRECT. While here remove code that assumes that level can be 0. Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Cc: Andre Wild Cc: Heiko Carstens Cc: Linus Torvalds Cc: Mel Gorman Cc: Michael Ellerman Cc: Peter Zijlstra Cc: Rik van Riel Cc: Suravee Suthikulpanit Cc: Thomas Gleixner Cc: linuxppc-dev Fixes: 051f3ca02e46 "Introduce NUMA identity node sched domain" Link: http://lkml.kernel.org/r/1533920419-17410-1-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 56a0fed30c0a..505a41c42b96 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void) n = sched_max_numa_distance; - if (sched_domains_numa_levels <= 1) { + if (sched_domains_numa_levels <= 2) { sched_numa_topology_type = NUMA_DIRECT; return; } @@ -1380,9 +1380,6 @@ void sched_init_numa(void) break; } - if (!level) - return; - /* * 'level' contains the number of unique distances * -- cgit v1.2.3 From 12b04875d666e83d27511df25580de84505bc758 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 31 Aug 2018 17:22:55 +0200 Subject: sched/pelt: Fix update_blocked_averages() for RT and DL classes update_blocked_averages() is called to periodiccally decay the stalled load of idle CPUs and to sync all loads before running load balance. When cfs rq is idle, it trigs a load balance during pick_next_task_fair() in order to potentially pull tasks and to use this newly idle CPU. This load balance happens whereas prev task from another class has not been put and its utilization updated yet. This may lead to wrongly account running time as idle time for RT or DL classes. Test that no RT or DL task is running when updating their utilization in update_blocked_averages(). We still update RT and DL utilization instead of simply skipping them to make sure that all metrics are synced when used during load balance. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking") Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking") Link: http://lkml.kernel.org/r/1535728975-22799-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b39fb596f6c1..8cff8d55ee95 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7263,6 +7263,7 @@ static void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq, *pos; + const struct sched_class *curr_class; struct rq_flags rf; bool done = true; @@ -7299,8 +7300,10 @@ static void update_blocked_averages(int cpu) if (cfs_rq_has_blocked(cfs_rq)) done = false; } - update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); - update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); + + curr_class = rq->curr->sched_class; + update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); + update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); /* Don't need periodic decay once load/util_avg are null */ if (others_have_blocked(rq)) @@ -7365,13 +7368,16 @@ static inline void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq = &rq->cfs; + const struct sched_class *curr_class; struct rq_flags rf; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); - update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); - update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); + + curr_class = rq->curr->sched_class; + update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); + update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); #ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick = jiffies; -- cgit v1.2.3 From d0cdb3ce8834332d918fc9c8ff74f8a169ec9abe Mon Sep 17 00:00:00 2001 From: Steve Muckle Date: Fri, 31 Aug 2018 15:42:17 -0700 Subject: sched/fair: Fix vruntime_normalized() for remote non-migration wakeup When a task which previously ran on a given CPU is remotely queued to wake up on that same CPU, there is a period where the task's state is TASK_WAKING and its vruntime is not normalized. This is not accounted for in vruntime_normalized() which will cause an error in the task's vruntime if it is switched from the fair class during this time. For example if it is boosted to RT priority via rt_mutex_setprio(), rq->min_vruntime will not be subtracted from the task's vruntime but it will be added again when the task returns to the fair class. The task's vruntime will have been erroneously doubled and the effective priority of the task will be reduced. Note this will also lead to inflation of all vruntimes since the doubled vruntime value will become the rq's min_vruntime when other tasks leave the rq. This leads to repeated doubling of the vruntime and priority penalty. Fix this by recognizing a WAKING task's vruntime as normalized only if sched_remote_wakeup is true. This indicates a migration, in which case the vruntime would have been normalized in migrate_task_rq_fair(). Based on a similar patch from John Dias . Suggested-by: Peter Zijlstra Tested-by: Dietmar Eggemann Signed-off-by: Steve Muckle Signed-off-by: Peter Zijlstra (Intel) Cc: Chris Redpath Cc: John Dias Cc: Linus Torvalds Cc: Miguel de Dios Cc: Morten Rasmussen Cc: Patrick Bellasi Cc: Paul Turner Cc: Quentin Perret Cc: Thomas Gleixner Cc: Todd Kjos Cc: kernel-team@android.com Fixes: b5179ac70de8 ("sched/fair: Prepare to fix fairness problems on migration") Link: http://lkml.kernel.org/r/20180831224217.169476-1-smuckle@google.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8cff8d55ee95..c6b7d6daab20 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9644,7 +9644,8 @@ static inline bool vruntime_normalized(struct task_struct *p) * - A task which has been woken up by try_to_wake_up() and * waiting for actually being woken up by sched_ttwu_pending(). */ - if (!se->sum_exec_runtime || p->state == TASK_WAKING) + if (!se->sum_exec_runtime || + (p->state == TASK_WAKING && p->sched_remote_wakeup)) return true; return false; -- cgit v1.2.3 From 287cdaac5700c5b8970d739f73d742d863d3e2ca Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 4 Sep 2018 11:36:26 +0200 Subject: sched/fair: Fix scale_rt_capacity() for SMT Since commit: 523e979d3164 ("sched/core: Use PELT for scale_rt_capacity()") scale_rt_capacity() returns the remaining capacity and not a scale factor to apply on cpu_capacity_orig. arch_scale_cpu() is directly called by scale_rt_capacity() so we must take the sched_domain argument. Reported-by: Srikar Dronamraju Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Srikar Dronamraju Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 523e979d3164 ("sched/core: Use PELT for scale_rt_capacity()") Link: http://lkml.kernel.org/r/20180904093626.GA23936@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c6b7d6daab20..f12d004be6a1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7488,10 +7488,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd, return load_idx; } -static unsigned long scale_rt_capacity(int cpu) +static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); - unsigned long max = arch_scale_cpu_capacity(NULL, cpu); + unsigned long max = arch_scale_cpu_capacity(sd, cpu); unsigned long used, free; unsigned long irq; @@ -7513,7 +7513,7 @@ static unsigned long scale_rt_capacity(int cpu) static void update_cpu_capacity(struct sched_domain *sd, int cpu) { - unsigned long capacity = scale_rt_capacity(cpu); + unsigned long capacity = scale_rt_capacity(sd, cpu); struct sched_group *sdg = sd->groups; cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu); -- cgit v1.2.3 From bb3485c8ace6475c269b1aa2da674490f455f412 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 7 Sep 2018 09:51:04 +0200 Subject: sched/fair: Fix load_balance redo for !imbalance It can happen that load_balance() finds a busiest group and then a busiest rq but the calculated imbalance is in fact 0. In such situation, detach_tasks() returns immediately and lets the flag LBF_ALL_PINNED set. The busiest CPU is then wrongly assumed to have pinned tasks and removed from the load balance mask. then, we redo a load balance without the busiest CPU. This creates wrong load balance situation and generates wrong task migration. If the calculated imbalance is 0, it's useless to try to find a busiest rq as no task will be migrated and we can return immediately. This situation can happen with heterogeneous system or smp system when RT tasks are decreasing the capacity of some CPUs. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: jhugo@codeaurora.org Link: http://lkml.kernel.org/r/1536306664-29827-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f12d004be6a1..fc9a484ef82b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8275,7 +8275,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) force_balance: /* Looks like there is an imbalance. Compute it */ calculate_imbalance(env, &sds); - return sds.busiest; + return env->imbalance ? sds.busiest : NULL; out_balanced: env->imbalance = 0; -- cgit v1.2.3 From da260fe12330be8b003c2ab07a112704163ea675 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 7 Sep 2018 12:35:21 +0200 Subject: jump_label: Fix typo in warning message There's no 'allocatote' - use the next best thing: 'allocate' :-) Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Jason Baron Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt (VMware) Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180907103521.31344-1-bp@alien8.de Signed-off-by: Ingo Molnar --- kernel/jump_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 01ebdf1f9f40..2e62503bea0d 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, case MODULE_STATE_COMING: ret = jump_label_add_module(mod); if (ret) { - WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); + WARN(1, "Failed to allocate memory: jump_label may not work properly.\n"); jump_label_del_module(mod); } break; -- cgit v1.2.3 From cb538267ea1e9e025ec692577c9ae75797261889 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 31 Jul 2018 14:35:32 +0200 Subject: jump_label/lockdep: Assert we hold the hotplug lock for _cpuslocked() operations Weirdly we seem to have forgotten this... Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/jump_label.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index de97d8753b62..966a9e9c0f04 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -85,6 +85,7 @@ void static_key_slow_inc_cpuslocked(struct static_key *key) int v, v1; STATIC_KEY_CHECK_USE(key); + lockdep_assert_cpus_held(); /* * Careful if we get concurrent static_key_slow_inc() calls; @@ -130,6 +131,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); void static_key_enable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); + lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) > 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); @@ -160,6 +162,7 @@ EXPORT_SYMBOL_GPL(static_key_enable); void static_key_disable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); + lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); @@ -185,6 +188,8 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key, unsigned long rate_limit, struct delayed_work *work) { + lockdep_assert_cpus_held(); + /* * The negative count check is valid even when a negative * key->enabled is in use by static_key_slow_inc(); a -- cgit v1.2.3 From 4b486b535c33ef354ecf02a2650919004fd7d2b0 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 24 Jul 2018 15:10:25 -0400 Subject: locking/rwsem: Exit read lock slowpath if queue empty & no writer It was discovered that a constant stream of readers with occassional writers pounding on a rwsem may cause many of the readers to enter the slowpath unnecessarily thus increasing latency and lowering performance. In the current code, a reader entering the slowpath critical section will unconditionally set the WAITING_BIAS, if not set yet, and clear its active count even if no one is in the wait queue and no writer is present. This causes some incoming readers to observe the presence of waiters in the wait queue and hence have to go into the slowpath themselves. With sufficient numbers of readers and a relatively short lock hold time, the WAITING_BIAS may be repeatedly turned on and off and a substantial portion of the readers will go into the slowpath sustaining a rather long queue in the wait queue spinlock and repeated WAITING_BIAS on/off cycle until the logjam is broken opportunistically. To avoid this situation from happening, an additional check is added to detect the special case that the reader in the critical section is the only one in the wait queue and no writer is present. When that happens, it can just exit the slowpath and return immediately as its active count has already been set in the lock. Other incoming readers won't observe the presence of waiters and so will not be forced into the slowpath. The issue was found in a customer site where they had an application that pounded on the pread64 syscalls heavily on an XFS filesystem. The application was run in a recent 4-socket boxes with a lot of CPUs. They saw significant spinlock contention in the rwsem_down_read_failed() call. With this patch applied, the system CPU usage went down from 85% to 57%, and the spinlock contention in the pread64 syscalls was gone. Signed-off-by: Waiman Long Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Davidlohr Bueso Acked-by: Will Deacon Cc: Joe Mario Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1532459425-19204-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/rwsem-xadd.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 3064c50e181e..01fcb807598c 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -233,8 +233,19 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) waiter.type = RWSEM_WAITING_FOR_READ; raw_spin_lock_irq(&sem->wait_lock); - if (list_empty(&sem->wait_list)) + if (list_empty(&sem->wait_list)) { + /* + * In case the wait queue is empty and the lock isn't owned + * by a writer, this reader can exit the slowpath and return + * immediately as its RWSEM_ACTIVE_READ_BIAS has already + * been set in the count. + */ + if (atomic_long_read(&sem->count) >= 0) { + raw_spin_unlock_irq(&sem->wait_lock); + return sem; + } adjustment += RWSEM_WAITING_BIAS; + } list_add_tail(&waiter.list, &sem->wait_list); /* we're now waiting on the lock, but no longer actively locking */ -- cgit v1.2.3 From 882a78a9f39f5535b209b4aa0a1741e35b8c67fb Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Sep 2018 12:53:17 -0700 Subject: sched/fair: Fix kernel-doc notation warning Fix kernel-doc warning for missing 'flags' parameter description: ../kernel/sched/fair.c:3371: warning: Function parameter or member 'flags' not described in 'attach_entity_load_avg' Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: ea14b57e8a18 ("sched/cpufreq: Provide migration hint") Link: http://lkml.kernel.org/r/cdda0d42-880d-4229-a9f7-5899c977a063@infradead.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fc9a484ef82b..f808ddf2a868 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) * attach_entity_load_avg - attach this entity to its cfs_rq load avg * @cfs_rq: cfs_rq to attach to * @se: sched_entity to attach + * @flags: migration hints * * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. -- cgit v1.2.3 From 05484e0984487d42e97c417cbb0697fa9d16e7e9 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 20 Jul 2018 14:32:31 +0100 Subject: sched/topology: Add SD_ASYM_CPUCAPACITY flag detection The SD_ASYM_CPUCAPACITY sched_domain flag is supposed to mark the sched_domain in the hierarchy where all CPU capacities are visible for any CPU's point of view on asymmetric CPU capacity systems. The scheduler can then take to take capacity asymmetry into account when balancing at this level. It also serves as an indicator for how wide task placement heuristics have to search to consider all available CPU capacities as asymmetric systems might often appear symmetric at smallest level(s) of the sched_domain hierarchy. The flag has been around for while but so far only been set by out-of-tree code in Android kernels. One solution is to let each architecture provide the flag through a custom sched_domain topology array and associated mask and flag functions. However, SD_ASYM_CPUCAPACITY is special in the sense that it depends on the capacity and presence of all CPUs in the system, i.e. when hotplugging all CPUs out except those with one particular CPU capacity the flag should disappear even if the sched_domains don't collapse. Similarly, the flag is affected by cpusets where load-balancing is turned off. Detecting when the flags should be set therefore depends not only on topology information but also the cpuset configuration and hotplug state. The arch code doesn't have easy access to the cpuset configuration. Instead, this patch implements the flag detection in generic code where cpusets and hotplug state is already taken care of. All the arch is responsible for is to implement arch_scale_cpu_capacity() and force a full rebuild of the sched_domain hierarchy if capacities are updated, e.g. later in the boot process when cpufreq has initialized. Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1532093554-30504-2-git-send-email-morten.rasmussen@arm.com [ Fixed 'CPU' capitalization. ] Signed-off-by: Ingo Molnar --- include/linux/sched/topology.h | 6 ++-- kernel/sched/topology.c | 81 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 78 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 26347741ba50..6b9976180c1e 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -23,10 +23,10 @@ #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */ #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 505a41c42b96..5c4d583d53ee 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1061,7 +1061,6 @@ static struct cpumask ***sched_domains_numa_masks; * SD_SHARE_PKG_RESOURCES - describes shared caches * SD_NUMA - describes NUMA topologies * SD_SHARE_POWERDOMAIN - describes shared power domain - * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies * * Odd one out, which beside describing the topology has a quirk also * prescribes the desired behaviour that goes along with it: @@ -1073,13 +1072,12 @@ static struct cpumask ***sched_domains_numa_masks; SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING | \ - SD_ASYM_CPUCAPACITY | \ SD_SHARE_POWERDOMAIN) static struct sched_domain * sd_init(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, - struct sched_domain *child, int cpu) + struct sched_domain *child, int dflags, int cpu) { struct sd_data *sdd = &tl->data; struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); @@ -1100,6 +1098,9 @@ sd_init(struct sched_domain_topology_level *tl, "wrong sd_flags in topology description\n")) sd_flags &= ~TOPOLOGY_SD_FLAGS; + /* Apply detected topology flags */ + sd_flags |= dflags; + *sd = (struct sched_domain){ .min_interval = sd_weight, .max_interval = 2*sd_weight, @@ -1604,9 +1605,9 @@ static void __sdt_free(const struct cpumask *cpu_map) static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, - struct sched_domain *child, int cpu) + struct sched_domain *child, int dflags, int cpu) { - struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu); + struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu); if (child) { sd->level = child->level + 1; @@ -1632,6 +1633,65 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve return sd; } +/* + * Find the sched_domain_topology_level where all CPU capacities are visible + * for all CPUs. + */ +static struct sched_domain_topology_level +*asym_cpu_capacity_level(const struct cpumask *cpu_map) +{ + int i, j, asym_level = 0; + bool asym = false; + struct sched_domain_topology_level *tl, *asym_tl = NULL; + unsigned long cap; + + /* Is there any asymmetry? */ + cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map)); + + for_each_cpu(i, cpu_map) { + if (arch_scale_cpu_capacity(NULL, i) != cap) { + asym = true; + break; + } + } + + if (!asym) + return NULL; + + /* + * Examine topology from all CPU's point of views to detect the lowest + * sched_domain_topology_level where a highest capacity CPU is visible + * to everyone. + */ + for_each_cpu(i, cpu_map) { + unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i); + int tl_id = 0; + + for_each_sd_topology(tl) { + if (tl_id < asym_level) + goto next_level; + + for_each_cpu_and(j, tl->mask(i), cpu_map) { + unsigned long capacity; + + capacity = arch_scale_cpu_capacity(NULL, j); + + if (capacity <= max_capacity) + continue; + + max_capacity = capacity; + asym_level = tl_id; + asym_tl = tl; + } +next_level: + tl_id++; + } + } + + return asym_tl; +} + + /* * Build sched domains for a given set of CPUs and attach the sched domains * to the individual CPUs @@ -1644,18 +1704,27 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att struct s_data d; struct rq *rq = NULL; int i, ret = -ENOMEM; + struct sched_domain_topology_level *tl_asym; alloc_state = __visit_domain_allocation_hell(&d, cpu_map); if (alloc_state != sa_rootdomain) goto error; + tl_asym = asym_cpu_capacity_level(cpu_map); + /* Set up domains for CPUs specified by the cpu_map: */ for_each_cpu(i, cpu_map) { struct sched_domain_topology_level *tl; sd = NULL; for_each_sd_topology(tl) { - sd = build_sched_domain(tl, cpu_map, attr, sd, i); + int dflags = 0; + + if (tl == tl_asym) + dflags |= SD_ASYM_CPUCAPACITY; + + sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); + if (tl == sched_domain_topology) *per_cpu_ptr(d.sd, i) = sd; if (tl->flags & SDTL_OVERLAP) -- cgit v1.2.3 From df054e8445a4011e3d693c2268129c0456108663 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 4 Jul 2018 11:17:39 +0100 Subject: sched/topology: Add static_key for asymmetric CPU capacity optimizations The existing asymmetric CPU capacity code should cause minimal overhead for others. Putting it behind a static_key, it has been done for SMT optimizations, would make it easier to extend and improve without causing harm to others moving forward. Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-2-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 3 +++ kernel/sched/sched.h | 1 + kernel/sched/topology.c | 9 ++++++++- 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f808ddf2a868..3e5071aeb117 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6188,6 +6188,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) { long min_cap, max_cap; + if (!static_branch_unlikely(&sched_asym_cpucapacity)) + return 0; + min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4a2e8cae63c4..0f36adc31ba5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1185,6 +1185,7 @@ DECLARE_PER_CPU(int, sd_llc_id); DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); DECLARE_PER_CPU(struct sched_domain *, sd_numa); DECLARE_PER_CPU(struct sched_domain *, sd_asym); +extern struct static_key_false sched_asym_cpucapacity; struct sched_group_capacity { atomic_t ref; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 5c4d583d53ee..b0cdf5e95bda 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -398,6 +398,7 @@ DEFINE_PER_CPU(int, sd_llc_id); DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain *, sd_numa); DEFINE_PER_CPU(struct sched_domain *, sd_asym); +DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); static void update_top_cache_domain(int cpu) { @@ -1705,6 +1706,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att struct rq *rq = NULL; int i, ret = -ENOMEM; struct sched_domain_topology_level *tl_asym; + bool has_asym = false; alloc_state = __visit_domain_allocation_hell(&d, cpu_map); if (alloc_state != sa_rootdomain) @@ -1720,8 +1722,10 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att for_each_sd_topology(tl) { int dflags = 0; - if (tl == tl_asym) + if (tl == tl_asym) { dflags |= SD_ASYM_CPUCAPACITY; + has_asym = true; + } sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); @@ -1773,6 +1777,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } rcu_read_unlock(); + if (has_asym) + static_branch_enable_cpuslocked(&sched_asym_cpucapacity); + if (rq && sched_debug_enabled) { pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); -- cgit v1.2.3 From 3b1baa6496e6b7ad016342a9d256bdfb072ce902 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 4 Jul 2018 11:17:40 +0100 Subject: sched/fair: Add 'group_misfit_task' load-balance type To maximize throughput in systems with asymmetric CPU capacities (e.g. ARM big.LITTLE) load-balancing has to consider task and CPU utilization as well as per-CPU compute capacity when load-balancing in addition to the current average load based load-balancing policy. Tasks with high utilization that are scheduled on a lower capacity CPU need to be identified and migrated to a higher capacity CPU if possible to maximize throughput. To implement this additional policy an additional group_type (load-balance scenario) is added: 'group_misfit_task'. This represents scenarios where a sched_group has one or more tasks that are not suitable for its per-CPU capacity. 'group_misfit_task' is only considered if the system is not overloaded or imbalanced ('group_imbalanced' or 'group_overloaded'). Identifying misfit tasks requires the rq lock to be held. To avoid taking remote rq locks to examine source sched_groups for misfit tasks, each CPU is responsible for tracking misfit tasks themselves and update the rq->misfit_task flag. This means checking task utilization when tasks are scheduled and on sched_tick. Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-3-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 54 ++++++++++++++++++++++++++++++++++++++++++++-------- kernel/sched/sched.h | 2 ++ 2 files changed, 48 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3e5071aeb117..6e04bea5b11a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -693,6 +693,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); static unsigned long task_h_load(struct task_struct *p); +static unsigned long capacity_of(int cpu); /* Give new sched_entity start runnable values to heavy its load in infant time */ void init_entity_runnable_average(struct sched_entity *se) @@ -1446,7 +1447,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, static unsigned long weighted_cpuload(struct rq *rq); static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); -static unsigned long capacity_of(int cpu); /* Cached statistics for all CPUs within a node */ struct numa_stats { @@ -3647,6 +3647,29 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) WRITE_ONCE(p->se.avg.util_est, ue); } +static inline int task_fits_capacity(struct task_struct *p, long capacity) +{ + return capacity * 1024 > task_util_est(p) * capacity_margin; +} + +static inline void update_misfit_status(struct task_struct *p, struct rq *rq) +{ + if (!static_branch_unlikely(&sched_asym_cpucapacity)) + return; + + if (!p) { + rq->misfit_task_load = 0; + return; + } + + if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { + rq->misfit_task_load = 0; + return; + } + + rq->misfit_task_load = task_h_load(p); +} + #else /* CONFIG_SMP */ #define UPDATE_TG 0x0 @@ -3676,6 +3699,7 @@ util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} static inline void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) {} +static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} #endif /* CONFIG_SMP */ @@ -6201,7 +6225,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) /* Bring task utilization in sync with prev_cpu */ sync_entity_load_avg(&p->se); - return min_cap * 1024 < task_util(p) * capacity_margin; + return !task_fits_capacity(p, min_cap); } /* @@ -6618,9 +6642,12 @@ done: __maybe_unused; if (hrtick_enabled(rq)) hrtick_start_fair(rq, p); + update_misfit_status(p, rq); + return p; idle: + update_misfit_status(NULL, rq); new_tasks = idle_balance(rq, rf); /* @@ -6826,6 +6853,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10; enum fbq_type { regular, remote, all }; +enum group_type { + group_other = 0, + group_misfit_task, + group_imbalanced, + group_overloaded, +}; + #define LBF_ALL_PINNED 0x01 #define LBF_NEED_BREAK 0x02 #define LBF_DST_PINNED 0x04 @@ -7399,12 +7433,6 @@ static unsigned long task_h_load(struct task_struct *p) /********** Helpers for find_busiest_group ************************/ -enum group_type { - group_other = 0, - group_imbalanced, - group_overloaded, -}; - /* * sg_lb_stats - stats of a sched_group required for load_balancing */ @@ -7420,6 +7448,7 @@ struct sg_lb_stats { unsigned int group_weight; enum group_type group_type; int group_no_capacity; + unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ #ifdef CONFIG_NUMA_BALANCING unsigned int nr_numa_running; unsigned int nr_preferred_running; @@ -7712,6 +7741,9 @@ group_type group_classify(struct sched_group *group, if (sg_imbalanced(group)) return group_imbalanced; + if (sgs->group_misfit_task_load) + return group_misfit_task; + return group_other; } @@ -7786,6 +7818,10 @@ static inline void update_sg_lb_stats(struct lb_env *env, */ if (!nr_running && idle_cpu(i)) sgs->idle_cpus++; + + if (env->sd->flags & SD_ASYM_CPUCAPACITY && + sgs->group_misfit_task_load < rq->misfit_task_load) + sgs->group_misfit_task_load = rq->misfit_task_load; } /* Adjust by relative CPU capacity of the group */ @@ -9567,6 +9603,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) if (static_branch_unlikely(&sched_numa_balancing)) task_tick_numa(rq, curr); + + update_misfit_status(curr, rq); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0f36adc31ba5..7dbf67d147a2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -842,6 +842,8 @@ struct rq { unsigned char idle_balance; + unsigned long misfit_task_load; + /* For active balancing */ int active_balance; int push_cpu; -- cgit v1.2.3 From e3d6d0cb66f2351cbfd09fbae04eb9804afe9577 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 4 Jul 2018 11:17:41 +0100 Subject: sched/fair: Add sched_group per-CPU max capacity The current sg->min_capacity tracks the lowest per-CPU compute capacity available in the sched_group when rt/irq pressure is taken into account. Minimum capacity isn't the ideal metric for tracking if a sched_group needs offloading to another sched_group for some scenarios, e.g. a sched_group with multiple CPUs if only one is under heavy pressure. Tracking maximum capacity isn't perfect either but a better choice for some situations as it indicates that the sched_group definitely compute capacity constrained either due to rt/irq pressure on all CPUs or asymmetric CPU capacities (e.g. big.LITTLE). Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-4-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 24 ++++++++++++++++++++---- kernel/sched/sched.h | 1 + kernel/sched/topology.c | 2 ++ 3 files changed, 23 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6e04bea5b11a..fe04315d57b3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7557,13 +7557,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) cpu_rq(cpu)->cpu_capacity = capacity; sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = capacity; + sdg->sgc->max_capacity = capacity; } void update_group_capacity(struct sched_domain *sd, int cpu) { struct sched_domain *child = sd->child; struct sched_group *group, *sdg = sd->groups; - unsigned long capacity, min_capacity; + unsigned long capacity, min_capacity, max_capacity; unsigned long interval; interval = msecs_to_jiffies(sd->balance_interval); @@ -7577,6 +7578,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu) capacity = 0; min_capacity = ULONG_MAX; + max_capacity = 0; if (child->flags & SD_OVERLAP) { /* @@ -7607,6 +7609,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu) } min_capacity = min(capacity, min_capacity); + max_capacity = max(capacity, max_capacity); } } else { /* @@ -7620,12 +7623,14 @@ void update_group_capacity(struct sched_domain *sd, int cpu) capacity += sgc->capacity; min_capacity = min(sgc->min_capacity, min_capacity); + max_capacity = max(sgc->max_capacity, max_capacity); group = group->next; } while (group != child->groups); } sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = min_capacity; + sdg->sgc->max_capacity = max_capacity; } /* @@ -7721,16 +7726,27 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) } /* - * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller + * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller * per-CPU capacity than sched_group ref. */ static inline bool -group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref) +group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref) { return sg->sgc->min_capacity * capacity_margin < ref->sgc->min_capacity * 1024; } +/* + * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller + * per-CPU capacity_orig than sched_group ref. + */ +static inline bool +group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref) +{ + return sg->sgc->max_capacity * capacity_margin < + ref->sgc->max_capacity * 1024; +} + static inline enum group_type group_classify(struct sched_group *group, struct sg_lb_stats *sgs) @@ -7876,7 +7892,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, * power/energy consequences are not considered. */ if (sgs->sum_nr_running <= sgs->group_weight && - group_smaller_cpu_capacity(sds->local, sg)) + group_smaller_min_cpu_capacity(sds->local, sg)) return false; asym_packing: diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7dbf67d147a2..fe17e0be2d7b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1197,6 +1197,7 @@ struct sched_group_capacity { */ unsigned long capacity; unsigned long min_capacity; /* Min per-CPU capacity in group */ + unsigned long max_capacity; /* Max per-CPU capacity in group */ unsigned long next_update; int imbalance; /* XXX unrelated to capacity but shared group state */ diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index b0cdf5e95bda..2536e1b938f9 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -693,6 +693,7 @@ static void init_overlap_sched_group(struct sched_domain *sd, sg_span = sched_group_span(sg); sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; + sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; } static int @@ -852,6 +853,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; + sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; return sg; } -- cgit v1.2.3 From cad68e552e7774b68ae6a2c5fedb792936098b72 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 4 Jul 2018 11:17:42 +0100 Subject: sched/fair: Consider misfit tasks when load-balancing On asymmetric CPU capacity systems load intensive tasks can end up on CPUs that don't suit their compute demand. In this scenarios 'misfit' tasks should be migrated to CPUs with higher compute capacity to ensure better throughput. group_misfit_task indicates this scenario, but tweaks to the load-balance code are needed to make the migrations happen. Misfit balancing only makes sense between a source group of lower per-CPU capacity and destination group of higher compute capacity. Otherwise, misfit balancing is ignored. group_misfit_task has lowest priority so any imbalance due to overload is dealt with first. The modifications are: 1. Only pick a group containing misfit tasks as the busiest group if the destination group has higher capacity and has spare capacity. 2. When the busiest group is a 'misfit' group, skip the usual average load and group capacity checks. 3. Set the imbalance for 'misfit' balancing sufficiently high for a task to be pulled ignoring average load. 4. Pick the CPU with the highest misfit load as the source CPU. 5. If the misfit task is alone on the source CPU, go for active balancing. Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-5-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fe04315d57b3..24fe39e57bc3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6890,6 +6890,7 @@ struct lb_env { unsigned int loop_max; enum fbq_type fbq_type; + enum group_type src_grp_type; struct list_head tasks; }; @@ -7873,6 +7874,17 @@ static bool update_sd_pick_busiest(struct lb_env *env, { struct sg_lb_stats *busiest = &sds->busiest_stat; + /* + * Don't try to pull misfit tasks we can't help. + * We can use max_capacity here as reduction in capacity on some + * CPUs in the group should either be possible to resolve + * internally or be covered by avg_load imbalance (eventually). + */ + if (sgs->group_type == group_misfit_task && + (!group_smaller_max_cpu_capacity(sg, sds->local) || + !group_has_capacity(env, &sds->local_stat))) + return false; + if (sgs->group_type > busiest->group_type) return true; @@ -7895,6 +7907,13 @@ static bool update_sd_pick_busiest(struct lb_env *env, group_smaller_min_cpu_capacity(sds->local, sg)) return false; + /* + * If we have more than one misfit sg go with the biggest misfit. + */ + if (sgs->group_type == group_misfit_task && + sgs->group_misfit_task_load < busiest->group_misfit_task_load) + return false; + asym_packing: /* This is the busiest node in its class. */ if (!(env->sd->flags & SD_ASYM_PACKING)) @@ -8192,8 +8211,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * factors in sg capacity and sgs with smaller group_type are * skipped when updating the busiest sg: */ - if (busiest->avg_load <= sds->avg_load || - local->avg_load >= sds->avg_load) { + if (busiest->group_type != group_misfit_task && + (busiest->avg_load <= sds->avg_load || + local->avg_load >= sds->avg_load)) { env->imbalance = 0; return fix_small_imbalance(env, sds); } @@ -8227,6 +8247,12 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s (sds->avg_load - local->avg_load) * local->group_capacity ) / SCHED_CAPACITY_SCALE; + /* Boost imbalance to allow misfit task to be balanced. */ + if (busiest->group_type == group_misfit_task) { + env->imbalance = max_t(long, env->imbalance, + busiest->group_misfit_task_load); + } + /* * if *imbalance is less than the average load per runnable task * there is no guarantee that any tasks will be moved so we'll have @@ -8293,6 +8319,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env) busiest->group_no_capacity) goto force_balance; + /* Misfit tasks should be dealt with regardless of the avg load */ + if (busiest->group_type == group_misfit_task) + goto force_balance; + /* * If the local group is busier than the selected busiest group * don't try and pull any tasks. @@ -8330,6 +8360,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) force_balance: /* Looks like there is an imbalance. Compute it */ + env->src_grp_type = busiest->group_type; calculate_imbalance(env, &sds); return env->imbalance ? sds.busiest : NULL; @@ -8377,6 +8408,19 @@ static struct rq *find_busiest_queue(struct lb_env *env, if (rt > env->fbq_type) continue; + /* + * For ASYM_CPUCAPACITY domains with misfit tasks we simply + * seek the "biggest" misfit task. + */ + if (env->src_grp_type == group_misfit_task) { + if (rq->misfit_task_load > busiest_load) { + busiest_load = rq->misfit_task_load; + busiest = rq; + } + + continue; + } + capacity = capacity_of(i); wl = weighted_cpuload(rq); @@ -8446,6 +8490,9 @@ static int need_active_balance(struct lb_env *env) return 1; } + if (env->src_grp_type == group_misfit_task) + return 1; + return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); } -- cgit v1.2.3 From 5fbdfae5221a5208ed8e7653fc1c4b31de420f74 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Wed, 4 Jul 2018 11:17:43 +0100 Subject: sched/fair: Kick nohz balance if rq->misfit_task_load There already are a few conditions in nohz_kick_needed() to ensure a nohz kick is triggered, but they are not enough for some misfit task scenarios. Excluding asym packing, those are: - rq->nr_running >=2: Not relevant here because we are running a misfit task, it needs to be migrated regardless and potentially through active balance. - sds->nr_busy_cpus > 1: If there is only the misfit task being run on a group of low capacity CPUs, this will be evaluated to False. - rq->cfs.h_nr_running >=1 && check_cpu_capacity(): Not relevant here, misfit task needs to be migrated regardless of rt/IRQ pressure As such, this commit adds an rq->misfit_task_load condition to trigger a nohz kick. The idea to kick a nohz balance for misfit tasks originally came from Leo Yan , and a similar patch was submitted for the Android Common Kernel - see: https://lists.linaro.org/pipermail/eas-dev/2016-September/000551.html Signed-off-by: Valentin Schneider Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-6-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 24fe39e57bc3..e08287d3806f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9135,7 +9135,7 @@ static void nohz_balancer_kick(struct rq *rq) if (time_before(now, nohz.next_balance)) goto out; - if (rq->nr_running >= 2) { + if (rq->nr_running >= 2 || rq->misfit_task_load) { flags = NOHZ_KICK_MASK; goto out; } -- cgit v1.2.3 From dbbad719449e06d73db21598d6eee178f7a54b3b Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Wed, 4 Jul 2018 11:17:44 +0100 Subject: sched/fair: Change 'prefer_sibling' type to bool This variable is entirely local to update_sd_lb_stats, so we can safely change its type and slightly clean up its initialisation. Signed-off-by: Valentin Schneider Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-7-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e08287d3806f..23017939ecab 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7982,11 +7982,9 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd struct sched_group *sg = env->sd->groups; struct sg_lb_stats *local = &sds->local_stat; struct sg_lb_stats tmp_sgs; - int load_idx, prefer_sibling = 0; + int load_idx; bool overload = false; - - if (child && child->flags & SD_PREFER_SIBLING) - prefer_sibling = 1; + bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING; #ifdef CONFIG_NO_HZ_COMMON if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) -- cgit v1.2.3 From 575638d1047eb057a5cdf95cc0b3c084e1279508 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Wed, 4 Jul 2018 11:17:45 +0100 Subject: sched/core: Change root_domain->overload type to int sizeof(_Bool) is implementation defined, so let's just go with 'int' as is done for other structures e.g. sched_domain_shared->has_idle_cores. The local 'overload' variable used in update_sd_lb_stats can remain bool, as it won't impact any struct layout and can be assigned to the root_domain field. Signed-off-by: Valentin Schneider Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-8-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/sched.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fe17e0be2d7b..4d181478c5b8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -716,7 +716,7 @@ struct root_domain { cpumask_var_t online; /* Indicate more than one runnable task for any CPU */ - bool overload; + int overload; /* * The bit corresponding to a CPU gets set here if such CPU has more @@ -1698,7 +1698,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count) if (prev_nr < 2 && rq->nr_running >= 2) { #ifdef CONFIG_SMP if (!rq->rd->overload) - rq->rd->overload = true; + rq->rd->overload = 1; #endif } -- cgit v1.2.3 From e90c8fe15a3bf93a23088bcf1a56a0fa391d4e50 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Wed, 4 Jul 2018 11:17:46 +0100 Subject: sched/fair: Wrap rq->rd->overload accesses with READ/WRITE_ONCE() This variable can be read and set locklessly within update_sd_lb_stats(). As such, READ/WRITE_ONCE() are added to make sure nothing terribly wrong can happen because of the compiler. Signed-off-by: Valentin Schneider Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-9-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 6 +++--- kernel/sched/sched.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 23017939ecab..d9c4e97bfebd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8058,8 +8058,8 @@ next_group: if (!env->sd->parent) { /* update overload indicator if we are at root domain */ - if (env->dst_rq->rd->overload != overload) - env->dst_rq->rd->overload = overload; + if (READ_ONCE(env->dst_rq->rd->overload) != overload) + WRITE_ONCE(env->dst_rq->rd->overload, overload); } } @@ -9502,7 +9502,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf) rq_unpin_lock(this_rq, rf); if (this_rq->avg_idle < sysctl_sched_migration_cost || - !this_rq->rd->overload) { + !READ_ONCE(this_rq->rd->overload)) { rcu_read_lock(); sd = rcu_dereference_check_sched_domain(this_rq->sd); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4d181478c5b8..938063639793 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1697,8 +1697,8 @@ static inline void add_nr_running(struct rq *rq, unsigned count) if (prev_nr < 2 && rq->nr_running >= 2) { #ifdef CONFIG_SMP - if (!rq->rd->overload) - rq->rd->overload = 1; + if (!READ_ONCE(rq->rd->overload)) + WRITE_ONCE(rq->rd->overload, 1); #endif } -- cgit v1.2.3 From 757ffdd705ee942fc8150b17942d968601d2a15b Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Wed, 4 Jul 2018 11:17:47 +0100 Subject: sched/fair: Set rq->rd->overload when misfit Idle balance is a great opportunity to pull a misfit task. However, there are scenarios where misfit tasks are present but idle balance is prevented by the overload flag. A good example of this is a workload of n identical tasks. Let's suppose we have a 2+2 Arm big.LITTLE system. We then spawn 4 fairly CPU-intensive tasks - for the sake of simplicity let's say they are just CPU hogs, even when running on big CPUs. They are identical tasks, so on an SMP system they should all end at (roughly) the same time. However, in our case the LITTLE CPUs are less performing than the big CPUs, so tasks running on the LITTLEs will have a longer completion time. This means that the big CPUs will complete their work earlier, at which point they should pull the tasks from the LITTLEs. What we want to happen is summarized as follows: a,b,c,d are our CPU-hogging tasks _ signifies idling LITTLE_0 | a a a a _ _ LITTLE_1 | b b b b _ _ ---------|------------- big_0 | c c c c a a big_1 | d d d d b b ^ ^ Tasks end on the big CPUs, idle balance happens and the misfit tasks are pulled straight away This however won't happen, because currently the overload flag is only set when there is any CPU that has more than one runnable task - which may very well not be the case here if our CPU-hogging workload is all there is to run. As such, this commit sets the overload flag in update_sg_lb_stats when a group is flagged as having a misfit task. Signed-off-by: Valentin Schneider Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-10-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 6 ++++-- kernel/sched/sched.h | 6 +++++- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d9c4e97bfebd..8b228c5b3eb4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7793,7 +7793,7 @@ static bool update_nohz_stats(struct rq *rq, bool force) * @load_idx: Load index of sched_domain of this_cpu for load calc. * @local_group: Does group contain this_cpu. * @sgs: variable to hold the statistics for this group. - * @overload: Indicate more than one runnable task for any CPU. + * @overload: Indicate pullable load (e.g. >1 runnable task). */ static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, int load_idx, @@ -7837,8 +7837,10 @@ static inline void update_sg_lb_stats(struct lb_env *env, sgs->idle_cpus++; if (env->sd->flags & SD_ASYM_CPUCAPACITY && - sgs->group_misfit_task_load < rq->misfit_task_load) + sgs->group_misfit_task_load < rq->misfit_task_load) { sgs->group_misfit_task_load = rq->misfit_task_load; + *overload = 1; + } } /* Adjust by relative CPU capacity of the group */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 938063639793..85b3a2bf6c2b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -715,7 +715,11 @@ struct root_domain { cpumask_var_t span; cpumask_var_t online; - /* Indicate more than one runnable task for any CPU */ + /* + * Indicate pullable load on at least one CPU, e.g: + * - More than one runnable task + * - Running task is misfit + */ int overload; /* -- cgit v1.2.3 From 4ad3831a9d4af5e36da5d44a3b9c6522d0353cee Mon Sep 17 00:00:00 2001 From: Chris Redpath Date: Wed, 4 Jul 2018 11:17:48 +0100 Subject: sched/fair: Don't move tasks to lower capacity CPUs unless necessary When lower capacity CPUs are load balancing and considering to pull something from a higher capacity group, we should not pull tasks from a CPU with only one task running as this is guaranteed to impede progress for that task. If there is more than one task running, load balance in the higher capacity group would have already made any possible moves to resolve imbalance and we should make better use of system compute capacity by moving a task if we still have more than one running. Signed-off-by: Chris Redpath Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-11-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8b228c5b3eb4..06ff75f4ac7b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8423,6 +8423,17 @@ static struct rq *find_busiest_queue(struct lb_env *env, capacity = capacity_of(i); + /* + * For ASYM_CPUCAPACITY domains, don't pick a CPU that could + * eventually lead to active_balancing high->low capacity. + * Higher per-CPU capacity is considered better than balancing + * average load. + */ + if (env->sd->flags & SD_ASYM_CPUCAPACITY && + capacity_of(env->dst_cpu) < capacity && + rq->nr_running == 1) + continue; + wl = weighted_cpuload(rq); /* -- cgit v1.2.3 From 9c63e84db29bcf584040931ad97c2edd11e35f6c Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 4 Jul 2018 11:17:50 +0100 Subject: sched/core: Disable SD_PREFER_SIBLING on asymmetric CPU capacity domains The 'prefer sibling' sched_domain flag is intended to encourage spreading tasks to sibling sched_domain to take advantage of more caches and core for SMT systems. It has recently been changed to be on all non-NUMA topology level. However, spreading across domains with CPU capacity asymmetry isn't desirable, e.g. spreading from high capacity to low capacity CPUs even if high capacity CPUs aren't overutilized might give access to more cache but the CPU will be slower and possibly lead to worse overall throughput. To prevent this, we need to remove SD_PREFER_SIBLING on the sched_domain level immediately below SD_ASYM_CPUCAPACITY. Signed-off-by: Morten Rasmussen Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: gaku.inami.xh@renesas.com Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1530699470-29808-13-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 2536e1b938f9..7ffad0d3a4eb 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1126,7 +1126,7 @@ sd_init(struct sched_domain_topology_level *tl, | 0*SD_SHARE_CPUCAPACITY | 0*SD_SHARE_PKG_RESOURCES | 0*SD_SERIALIZE - | 0*SD_PREFER_SIBLING + | 1*SD_PREFER_SIBLING | 0*SD_NUMA | sd_flags , @@ -1152,17 +1152,21 @@ sd_init(struct sched_domain_topology_level *tl, if (sd->flags & SD_ASYM_CPUCAPACITY) { struct sched_domain *t = sd; + /* + * Don't attempt to spread across CPUs of different capacities. + */ + if (sd->child) + sd->child->flags &= ~SD_PREFER_SIBLING; + for_each_lower_domain(t) t->flags |= SD_BALANCE_WAKE; } if (sd->flags & SD_SHARE_CPUCAPACITY) { - sd->flags |= SD_PREFER_SIBLING; sd->imbalance_pct = 110; sd->smt_gain = 1178; /* ~15% */ } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { - sd->flags |= SD_PREFER_SIBLING; sd->imbalance_pct = 117; sd->cache_nice_tries = 1; sd->busy_idx = 2; @@ -1173,6 +1177,7 @@ sd_init(struct sched_domain_topology_level *tl, sd->busy_idx = 3; sd->idle_idx = 2; + sd->flags &= ~SD_PREFER_SIBLING; sd->flags |= SD_SERIALIZE; if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { sd->flags &= ~(SD_BALANCE_EXEC | @@ -1182,7 +1187,6 @@ sd_init(struct sched_domain_topology_level *tl, #endif } else { - sd->flags |= SD_PREFER_SIBLING; sd->cache_nice_tries = 1; sd->busy_idx = 2; sd->idle_idx = 1; -- cgit v1.2.3 From 7e6f4c5d600c1c8e2a1d900e65cab319d9b6782e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Aug 2018 11:45:21 +0200 Subject: sched/debug: Explicitly cast sched_feat() to bool LLVM has a warning that tags expressions like: if (foo && non-bool-const) This pattern triggers for CONFIG_SCHED_DEBUG=n where sched_feat() ends up being whatever bit we select. Avoid the warning with an explicit cast to bool. Reported-by: Philipp Klocke Tested-by: Nick Desaulniers Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 85b3a2bf6c2b..3a4ef8f73f08 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1401,7 +1401,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features = 0; #undef SCHED_FEAT -#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) +#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ -- cgit v1.2.3 From d90707ebebe03596e19de3abbf79b766e72a3465 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 29 Aug 2018 15:19:09 +0200 Subject: sched/numa: Remove unused code from update_numa_stats() With: commit 2d4056fafa19 ("sched/numa: Remove numa_has_capacity()") the local variables 'smt', 'cpus' and 'capacity' and their results are not used anymore in numa_has_capacity() Remove this unused code. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Srikar Dronamraju Acked-by: Mel Gorman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1535548752-4434-2-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 06ff75f4ac7b..b65596fae06b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1463,8 +1463,7 @@ struct numa_stats { */ static void update_numa_stats(struct numa_stats *ns, int nid) { - int smt, cpu, cpus = 0; - unsigned long capacity; + int cpu; memset(ns, 0, sizeof(*ns)); for_each_cpu(cpu, cpumask_of_node(nid)) { @@ -1473,26 +1472,8 @@ static void update_numa_stats(struct numa_stats *ns, int nid) ns->nr_running += rq->nr_running; ns->load += weighted_cpuload(rq); ns->compute_capacity += capacity_of(cpu); - - cpus++; } - /* - * If we raced with hotplug and there are no CPUs left in our mask - * the @ns structure is NULL'ed and task_numa_compare() will - * not find this node attractive. - * - * We'll detect a huge imbalance and bail there. - */ - if (!cpus) - return; - - /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */ - smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity); - capacity = cpus / smt; /* cores */ - - capacity = min_t(unsigned, capacity, - DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE)); } struct task_numa_env { -- cgit v1.2.3 From 7477a3504e619768c9e972dafe2907e6b8ed9823 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 29 Aug 2018 15:19:10 +0200 Subject: sched/numa: Remove unused numa_stats::nr_running field nr_running in struct numa_stats is not used anywhere in the code. Remove it. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Srikar Dronamraju Acked-by: Mel Gorman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1535548752-4434-3-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b65596fae06b..6bd142d19549 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1454,8 +1454,6 @@ struct numa_stats { /* Total compute capacity of CPUs on a node */ unsigned long compute_capacity; - - unsigned int nr_running; }; /* @@ -1469,7 +1467,6 @@ static void update_numa_stats(struct numa_stats *ns, int nid) for_each_cpu(cpu, cpumask_of_node(nid)) { struct rq *rq = cpu_rq(cpu); - ns->nr_running += rq->nr_running; ns->load += weighted_cpuload(rq); ns->compute_capacity += capacity_of(cpu); } -- cgit v1.2.3 From 925b9cd1b89a94b7124d128c80dfc48f78a63098 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 6 Sep 2018 16:18:34 -0400 Subject: locking/rwsem: Make owner store task pointer of last owning reader Currently, when a reader acquires a lock, it only sets the RWSEM_READER_OWNED bit in the owner field. The other bits are simply not used. When debugging hanging cases involving rwsems and readers, the owner value does not provide much useful information at all. This patch modifies the current behavior to always store the task_struct pointer of the last rwsem-acquiring reader in a reader-owned rwsem. This may be useful in debugging rwsem hanging cases especially if only one reader is involved. However, the task in the owner field may not the real owner or one of the real owners at all when the owner value is examined, for example, in a crash dump. So it is just an additional hint about the past history. If CONFIG_DEBUG_RWSEMS=y is enabled, the owner field will be checked at unlock time too to make sure the task pointer value is valid. That does have a slight performance cost and so is only enabled as part of that debug option. From the performance point of view, it is expected that the changes shouldn't have any noticeable performance impact. A rwsem microbenchmark (with 48 worker threads and 1:1 reader/writer ratio) was ran on a 2-socket 24-core 48-thread Haswell system. The locking rates on a 4.19-rc1 based kernel were as follows: 1) Unpatched kernel: 543.3 kops/s 2) Patched kernel: 549.2 kops/s 3) Patched kernel (CONFIG_DEBUG_RWSEMS on): 546.6 kops/s There was actually a slight increase in performance (1.1%) in this particular case. Maybe it was caused by the elimination of a branch or just a testing noise. Turning on the CONFIG_DEBUG_RWSEMS option also had less than the expected impact on performance. The least significant 2 bits of the owner value are now used to designate the rwsem is readers owned and the owners are anonymous. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra Cc: Davidlohr Bueso Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1536265114-10842-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- include/linux/rwsem.h | 4 +- kernel/locking/rwsem-xadd.c | 2 +- kernel/locking/rwsem.c | 7 ++-- kernel/locking/rwsem.h | 95 +++++++++++++++++++++++++++++++++------------ 4 files changed, 78 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ab93b6eae696..67dbb57508b1 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -45,10 +45,10 @@ struct rw_semaphore { }; /* - * Setting bit 0 of the owner field with other non-zero bits will indicate + * Setting bit 1 of the owner field but not bit 0 will indicate * that the rwsem is writer-owned with an unknown owner. */ -#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) +#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 01fcb807598c..09b180063ee1 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -180,7 +180,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, * but it gives the spinners an early indication that the * readers now have the lock. */ - rwsem_set_reader_owned(sem); + __rwsem_set_reader_owned(sem, waiter->task); } /* diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 776308d2fa9e..e586f0d03ad3 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -117,8 +117,9 @@ EXPORT_SYMBOL(down_write_trylock); void up_read(struct rw_semaphore *sem) { rwsem_release(&sem->dep_map, 1, _RET_IP_); - DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED); + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED)); + rwsem_clear_reader_owned(sem); __up_read(sem); } @@ -181,7 +182,7 @@ void down_read_non_owner(struct rw_semaphore *sem) might_sleep(); __down_read(sem); - rwsem_set_reader_owned(sem); + __rwsem_set_reader_owned(sem, NULL); } EXPORT_SYMBOL(down_read_non_owner); @@ -215,7 +216,7 @@ EXPORT_SYMBOL(down_write_killable_nested); void up_read_non_owner(struct rw_semaphore *sem) { - DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED); + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED)); __up_read(sem); } diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h index b9d0e72aa80f..bad2bca0268b 100644 --- a/kernel/locking/rwsem.h +++ b/kernel/locking/rwsem.h @@ -1,24 +1,30 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * The owner field of the rw_semaphore structure will be set to - * RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear - * the owner field when it unlocks. A reader, on the other hand, will - * not touch the owner field when it unlocks. + * The least significant 2 bits of the owner value has the following + * meanings when set. + * - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers + * - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned, + * i.e. the owner(s) cannot be readily determined. It can be reader + * owned or the owning writer is indeterminate. * - * In essence, the owner field now has the following 4 states: - * 1) 0 - * - lock is free or the owner hasn't set the field yet - * 2) RWSEM_READER_OWNED - * - lock is currently or previously owned by readers (lock is free - * or not set by owner yet) - * 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well - * - lock is owned by an anonymous writer, so spinning on the lock - * owner should be disabled. - * 4) Other non-zero value - * - a writer owns the lock and other writers can spin on the lock owner. + * When a writer acquires a rwsem, it puts its task_struct pointer + * into the owner field. It is cleared after an unlock. + * + * When a reader acquires a rwsem, it will also puts its task_struct + * pointer into the owner field with both the RWSEM_READER_OWNED and + * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will + * largely be left untouched. So for a free or reader-owned rwsem, + * the owner value may contain information about the last reader that + * acquires the rwsem. The anonymous bit is set because that particular + * reader may or may not still own the lock. + * + * That information may be helpful in debugging cases where the system + * seems to hang on a reader owned rwsem especially if only one reader + * is involved. Ideally we would like to track all the readers that own + * a rwsem, but the overhead is simply too big. */ -#define RWSEM_ANONYMOUSLY_OWNED (1UL << 0) -#define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED) +#define RWSEM_READER_OWNED (1UL << 0) +#define RWSEM_ANONYMOUSLY_OWNED (1UL << 1) #ifdef CONFIG_DEBUG_RWSEMS # define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) @@ -44,15 +50,26 @@ static inline void rwsem_clear_owner(struct rw_semaphore *sem) WRITE_ONCE(sem->owner, NULL); } +/* + * The task_struct pointer of the last owning reader will be left in + * the owner field. + * + * Note that the owner value just indicates the task has owned the rwsem + * previously, it may not be the real owner or one of the real owners + * anymore when that field is examined, so take it with a grain of salt. + */ +static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, + struct task_struct *owner) +{ + unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED + | RWSEM_ANONYMOUSLY_OWNED; + + WRITE_ONCE(sem->owner, (struct task_struct *)val); +} + static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) { - /* - * We check the owner value first to make sure that we will only - * do a write to the rwsem cacheline when it is really necessary - * to minimize cacheline contention. - */ - if (READ_ONCE(sem->owner) != RWSEM_READER_OWNED) - WRITE_ONCE(sem->owner, RWSEM_READER_OWNED); + __rwsem_set_reader_owned(sem, current); } /* @@ -72,6 +89,25 @@ static inline bool rwsem_has_anonymous_owner(struct task_struct *owner) { return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED; } + +#ifdef CONFIG_DEBUG_RWSEMS +/* + * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there + * is a task pointer in owner of a reader-owned rwsem, it will be the + * real owner or one of the real owners. The only exception is when the + * unlock is done by up_read_non_owner(). + */ +#define rwsem_clear_reader_owned rwsem_clear_reader_owned +static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) +{ + unsigned long val = (unsigned long)current | RWSEM_READER_OWNED + | RWSEM_ANONYMOUSLY_OWNED; + if (READ_ONCE(sem->owner) == (struct task_struct *)val) + cmpxchg_relaxed((unsigned long *)&sem->owner, val, + RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED); +} +#endif + #else static inline void rwsem_set_owner(struct rw_semaphore *sem) { @@ -81,7 +117,18 @@ static inline void rwsem_clear_owner(struct rw_semaphore *sem) { } +static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, + struct task_struct *owner) +{ +} + static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) { } #endif + +#ifndef rwsem_clear_reader_owned +static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) +{ +} +#endif -- cgit v1.2.3 From 8412dbd642585107e29b20920876de98e7df5819 Mon Sep 17 00:00:00 2001 From: Todd Brandt Date: Wed, 22 Aug 2018 18:37:11 -0700 Subject: PM / sleep: Show freezing tasks that caused a suspend abort For debug purposes it would be nice to see which tasks caused a suspend abort, i.e. which tasks were still in the process of freezing when a wakeup event occurred. This patch adds the info to pm_debug_messages. Signed-off-by: Todd Brandt Signed-off-by: Rafael J. Wysocki --- kernel/power/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/process.c b/kernel/power/process.c index 7381d49a44db..4b6a54da7e65 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -96,7 +96,7 @@ static int try_to_freeze_tasks(bool user_only) if (wq_busy) show_workqueue_state(); - if (!wakeup) { + if (!wakeup || pm_debug_messages_on) { read_lock(&tasklist_lock); for_each_process_thread(g, p) { if (p != current && !freezer_should_skip(p) -- cgit v1.2.3 From dc5591a03f1d6dae6b11cdf1d74b023f7ac0fdbf Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 28 Aug 2018 21:33:15 +0100 Subject: locking/lockdep: Delete unnecessary #include Commit: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") added the inclusion of . liblockdep doesn't have a stub version of that header so now fails to build. However, commit: bff1b208a5d1 ("tracing: Partial revert of "tracing: Centralize preemptirq tracepoints and unify their usage"") removed the use of functions declared in that header. So delete the #include. Signed-off-by: Ben Hutchings Cc: Joel Fernandes Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt Cc: Thomas Gleixner Cc: Will Deacon Fixes: bff1b208a5d1 ("tracing: Partial revert of "tracing: Centralize ...") Fixes: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints ...") Link: http://lkml.kernel.org/r/20180828203315.GD18030@decadent.org.uk Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index e406c5fdb41e..dd13f865ad40 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -55,7 +55,6 @@ #include "lockdep_internals.h" -#include #define CREATE_TRACE_POINTS #include -- cgit v1.2.3 From 0b405c65ad459f5f4d3db1672246172bd19d946d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 24 Aug 2018 12:22:35 +0100 Subject: locking/ww_mutex: Fix spelling mistake "cylic" -> "cyclic" Trivial fix to spelling mistake in pr_err() error message Signed-off-by: Colin Ian King Acked-by: Will Deacon Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-janitors@vger.kernel.org Link: http://lkml.kernel.org/r/20180824112235.8842-1-colin.king@canonical.com Signed-off-by: Ingo Molnar --- kernel/locking/test-ww_mutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index 5b915b370d5a..0be047dbd897 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -324,7 +324,7 @@ static int __test_cycle(unsigned int nthreads) if (!cycle->result) continue; - pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n", + pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n", n, nthreads, cycle->result); ret = -EINVAL; break; -- cgit v1.2.3 From 02e184476eff848273826c1d6617bb37e5bcc7ad Mon Sep 17 00:00:00 2001 From: Yabin Cui Date: Thu, 23 Aug 2018 15:59:35 -0700 Subject: perf/core: Force USER_DS when recording user stack data Perf can record user stack data in response to a synchronous request, such as a tracepoint firing. If this happens under set_fs(KERNEL_DS), then we end up reading user stack data using __copy_from_user_inatomic() under set_fs(KERNEL_DS). I think this conflicts with the intention of using set_fs(KERNEL_DS). And it is explicitly forbidden by hardware on ARM64 when both CONFIG_ARM64_UAO and CONFIG_ARM64_PAN are used. So fix this by forcing USER_DS when recording user stack data. Signed-off-by: Yabin Cui Acked-by: Peter Zijlstra (Intel) Cc: Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 88b0193d9418 ("perf/callchain: Force USER_DS when invoking perf_callchain_user()") Link: http://lkml.kernel.org/r/20180823225935.27035-1-yabinc@google.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index abaed4f8bb7f..c80549bf82c6 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5943,6 +5943,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, unsigned long sp; unsigned int rem; u64 dyn_size; + mm_segment_t fs; /* * We dump: @@ -5960,7 +5961,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, /* Data. */ sp = perf_user_stack_pointer(regs); + fs = get_fs(); + set_fs(USER_DS); rem = __output_copy_user(handle, (void *) sp, dump_size); + set_fs(fs); dyn_size = dump_size - rem; perf_output_skip(handle, rem); -- cgit v1.2.3 From ace8031099f91480799b5929b4cccf2dcacc5136 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Fri, 3 Aug 2018 20:37:32 +0800 Subject: sched/topology: Make local variables static Fix the following warnings: kernel/sched/topology.c:10:15: warning: symbol 'sched_domains_tmpmask' was not declared. Should it be static? kernel/sched/topology.c:11:15: warning: symbol 'sched_domains_tmpmask2' was not declared. Should it be static? Signed-off-by: zhong jiang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1533299852-26941-1-git-send-email-zhongjiang@huawei.com Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 7ffad0d3a4eb..9d74371e4aad 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -7,8 +7,8 @@ DEFINE_MUTEX(sched_domains_mutex); /* Protected by sched_domains_mutex: */ -cpumask_var_t sched_domains_tmpmask; -cpumask_var_t sched_domains_tmpmask2; +static cpumask_var_t sched_domains_tmpmask; +static cpumask_var_t sched_domains_tmpmask2; #ifdef CONFIG_SCHED_DEBUG -- cgit v1.2.3 From 80ffbaa5b1bd98e80e3239a3b8cfda2da433009a Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 3 Sep 2018 06:09:34 -0600 Subject: kallsyms: reduce size a little on 64-bit Both kallsyms_num_syms and kallsyms_markers[] don't really need to use unsigned long as their (base) types; unsigned int fully suffices. Signed-off-by: Jan Beulich Signed-off-by: Masahiro Yamada --- kernel/kallsyms.c | 4 ++-- scripts/kallsyms.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 02a0b01380d8..f3a04994e063 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -37,7 +37,7 @@ extern const u8 kallsyms_names[] __weak; * Tell the compiler that the count isn't in the small data section if the arch * has one (eg: FRV). */ -extern const unsigned long kallsyms_num_syms +extern const unsigned int kallsyms_num_syms __attribute__((weak, section(".rodata"))); extern const unsigned long kallsyms_relative_base @@ -46,7 +46,7 @@ __attribute__((weak, section(".rodata"))); extern const u8 kallsyms_token_table[] __weak; extern const u16 kallsyms_token_index[] __weak; -extern const unsigned long kallsyms_markers[] __weak; +extern const unsigned int kallsyms_markers[] __weak; /* * Expand a compressed symbol data into the resulting uncompressed string, diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index a9186a98a37d..085b6a584fe0 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -405,7 +405,7 @@ static void write_src(void) } output_label("kallsyms_num_syms"); - printf("\tPTR\t%u\n", table_cnt); + printf("\t.long\t%u\n", table_cnt); printf("\n"); /* table of offset markers, that give the offset in the compressed stream @@ -434,7 +434,7 @@ static void write_src(void) output_label("kallsyms_markers"); for (i = 0; i < ((table_cnt + 255) >> 8); i++) - printf("\tPTR\t%d\n", markers[i]); + printf("\t.long\t%u\n", markers[i]); printf("\n"); free(markers); -- cgit v1.2.3 From 84818af26350c245c8e1994493b7459c0822be5b Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 10 Sep 2018 21:46:38 -0400 Subject: locking/rtmutex: Fix the preprocessor logic with normal #ifdef #else #endif Merging v4.14.68 into v4.14-rt I tripped over a conflict in the rtmutex.c code. There I found that we had: #ifdef CONFIG_DEBUG_LOCK_ALLOC [..] #endif #ifndef CONFIG_DEBUG_LOCK_ALLOC [..] #endif Really this should be: #ifdef CONFIG_DEBUG_LOCK_ALLOC [..] #else [..] #endif This cleans up that logic. Signed-off-by: Steven Rostedt (VMware) Cc: Linus Torvalds Cc: Peter Rosin Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180910214638.55926030@vmware.local.home Signed-off-by: Ingo Molnar --- kernel/locking/rtmutex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2823d4163a37..581edcc63c26 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1485,9 +1485,9 @@ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) __rt_mutex_lock(lock, subclass); } EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); -#endif -#ifndef CONFIG_DEBUG_LOCK_ALLOC +#else /* !CONFIG_DEBUG_LOCK_ALLOC */ + /** * rt_mutex_lock - lock a rt_mutex * -- cgit v1.2.3 From a6ae928c25835ca18deb4a527079f169b68ed292 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Mon, 10 Sep 2018 15:52:06 +0200 Subject: Revert "printk: make sure to print log on console." This reverts commit 375899cddcbb26881b03cb3fbdcfd600e4e67f4a. The visibility of early messages did not longer take into account "quiet", "debug", and "loglevel" early parameters. It would be possible to invalidate and recompute LOG_NOCONS flag for the affected messages. But it would be hairy. Instead this patch just reverts the problematic commit. We could come up with a better solution for the original problem. For example, we could simplify the logic and just mark messages that should always be visible or always invisible on the console. Also this patch reverts the related build fix commit ffaa619af1b06 ("printk: Fix warning about unused suppress_message_printing"). Finally, this patch does not put back the unused LOG_NOCONS flag. Link: http://lkml.kernel.org/r/20180910145747.emvfzv4mzlk5dfqk@pathway.suse.cz Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H . Peter Anvin" Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: Steven Rostedt Cc: Maninder Singh Reported-by: Hans de Goede Acked-by: Hans de Goede Acked-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 9a63aeeaaf5d..e30e5023511b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -349,7 +349,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT; */ enum log_flags { - LOG_NOCONS = 1, /* suppress print, do not print to console */ LOG_NEWLINE = 2, /* text ended with a newline */ LOG_PREFIX = 4, /* text started with a prefix */ LOG_CONT = 8, /* text is a fragment of a continuation line */ @@ -1879,9 +1878,6 @@ int vprintk_store(int facility, int level, if (dict) lflags |= LOG_PREFIX|LOG_NEWLINE; - if (suppress_message_printing(level)) - lflags |= LOG_NOCONS; - return log_output(facility, level, lflags, dict, dictlen, text, text_len); } @@ -2030,6 +2026,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, const char *text, size_t len) {} static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *buf, size_t size) { return 0; } +static bool suppress_message_printing(int level) { return false; } #endif /* CONFIG_PRINTK */ @@ -2365,10 +2362,11 @@ skip: break; msg = log_from_idx(console_idx); - if (msg->flags & LOG_NOCONS) { + if (suppress_message_printing(msg->level)) { /* - * Skip record if !ignore_loglevel, and - * record has level above the console loglevel. + * Skip record we have buffered and already printed + * directly to the console when we received it, and + * record that has level above the console loglevel. */ console_idx = log_next(console_idx); console_seq++; -- cgit v1.2.3 From 74f008f251bc1797109b7f027d3fdb1ad620385d Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Thu, 2 Aug 2018 12:34:22 +0200 Subject: kernel/reboot.c: export pm_power_off_prepare Export pm_power_off_prepare. It is needed to implement power off on Freescale/NXP iMX6 based boards with external power management integrated circuit (PMIC). Signed-off-by: Oleksij Rempel Signed-off-by: Mark Brown --- kernel/reboot.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/reboot.c b/kernel/reboot.c index 8fb44dec9ad7..e1b79b6a2735 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -49,6 +49,7 @@ int reboot_force; */ void (*pm_power_off_prepare)(void); +EXPORT_SYMBOL_GPL(pm_power_off_prepare); /** * emergency_restart - reboot the system -- cgit v1.2.3 From cb92173d1f0474784c6171a9d3fdbbca0ee53554 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 11 Sep 2018 11:51:27 +0200 Subject: locking/lockdep, cpu/hotplug: Annotate AP thread Anybody trying to assert the cpu_hotplug_lock is held (lockdep_assert_cpus_held()) from AP callbacks will fail, because the lock is held by the BP. Stick in an explicit annotation in cpuhp_thread_fun() to make this work. Reported-by: Ingo Molnar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-tip-commits@vger.kernel.org Fixes: cb538267ea1e ("jump_label/lockdep: Assert we hold the hotplug lock for _cpuslocked() operations") Link: http://lkml.kernel.org/r/20180911095127.GT24082@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/cpu.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 0097acec1c71..be4859f07153 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -315,6 +315,16 @@ void lockdep_assert_cpus_held(void) percpu_rwsem_assert_held(&cpu_hotplug_lock); } +static void lockdep_acquire_cpus_lock(void) +{ + rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_); +} + +static void lockdep_release_cpus_lock(void) +{ + rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_); +} + /* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects @@ -344,6 +354,17 @@ void cpu_hotplug_enable(void) cpu_maps_update_done(); } EXPORT_SYMBOL_GPL(cpu_hotplug_enable); + +#else + +static void lockdep_acquire_cpus_lock(void) +{ +} + +static void lockdep_release_cpus_lock(void) +{ +} + #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_HOTPLUG_SMT @@ -616,6 +637,12 @@ static void cpuhp_thread_fun(unsigned int cpu) */ smp_mb(); + /* + * The BP holds the hotplug lock, but we're now running on the AP, + * ensure that anybody asserting the lock is held, will actually find + * it so. + */ + lockdep_acquire_cpus_lock(); cpuhp_lock_acquire(bringup); if (st->single) { @@ -661,6 +688,7 @@ static void cpuhp_thread_fun(unsigned int cpu) } cpuhp_lock_release(bringup); + lockdep_release_cpus_lock(); if (!st->should_run) complete_ap_thread(st, bringup); -- cgit v1.2.3 From 86989c41b5ea08776c450cb759592532314a4ed6 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Jul 2018 19:47:27 -0500 Subject: signal: Always ignore SIGKILL and SIGSTOP sent to the global init If the first process started (aka /sbin/init) receives a SIGKILL it will panic the system if it is delivered. Making the system unusable and undebugable. It isn't much better if the first process started receives SIGSTOP. So always ignore SIGSTOP and SIGKILL sent to init. This is done in a separate clause in sig_task_ignored as force_sig_info can clear SIG_UNKILLABLE and this protection should work even then. Reviewed-by: Thomas Gleixner Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 5843c541fda9..b33264bb2064 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -78,6 +78,10 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force) handler = sig_handler(t, sig); + /* SIGKILL and SIGSTOP may not be sent to the global init */ + if (unlikely(is_global_init(t) && sig_kernel_only(sig))) + return true; + if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && handler == SIG_DFL && !(force && sig_kernel_only(sig))) return true; -- cgit v1.2.3 From 55a3235fc71bf34303e34a95eeee235b2d2a35dd Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 19 Jul 2018 20:33:53 -0500 Subject: signal: Properly deliver SIGILL from uprobes For userspace to tell the difference between a random signal and an exception, the exception must include siginfo information. Using SEND_SIG_FORCED for SIGILL is thus wrong, and it will result in userspace seeing si_code == SI_USER (like a random signal) instead of si_code == SI_KERNEL or a more specific si_code as all exceptions deliver. Therefore replace force_sig_info(SIGILL, SEND_SIG_FORCE, current) with force_sig(SIG_ILL, current) which gets this right and is shorter and easier to type. Fixes: 014940bad8e4 ("uprobes/x86: Send SIGILL if arch_uprobe_post_xol() fails") Fixes: 0b5256c7f173 ("uprobes: Send SIGILL if handle_trampoline() fails") Reviewed-by: Thomas Gleixner Signed-off-by: "Eric W. Biederman" --- kernel/events/uprobes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3207a4d26849..2bf792d22087 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1858,7 +1858,7 @@ static void handle_trampoline(struct pt_regs *regs) sigill: uprobe_warn(current, "handle uretprobe, sending SIGILL."); - force_sig_info(SIGILL, SEND_SIG_FORCED, current); + force_sig(SIGILL, current); } @@ -1966,7 +1966,7 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) if (unlikely(err)) { uprobe_warn(current, "execute the probed insn, sending SIGILL."); - force_sig_info(SIGILL, SEND_SIG_FORCED, current); + force_sig(SIGILL, current); } } -- cgit v1.2.3 From 3597dfe01d12f570bc739da67f857fd222a3ea66 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 3 Sep 2018 20:02:46 +0200 Subject: signal: Always deliver the kernel's SIGKILL and SIGSTOP to a pid namespace init Instead of playing whack-a-mole and changing SEND_SIG_PRIV to SEND_SIG_FORCED throughout the kernel to ensure a pid namespace init gets signals sent by the kernel, stop allowing a pid namespace init to ignore SIGKILL or SIGSTOP sent by the kernel. A pid namespace init is only supposed to be able to ignore signals sent from itself and children with SIG_DFL. Fixes: 921cf9f63089 ("signals: protect cinit from unblocked SIG_DFL signals") Reviewed-by: Thomas Gleixner Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index b33264bb2064..8081ab79e97d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1039,7 +1039,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, result = TRACE_SIGNAL_IGNORED; if (!prepare_signal(sig, t, - from_ancestor_ns || (info == SEND_SIG_FORCED))) + from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED))) goto ret; pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; -- cgit v1.2.3 From 035150540545f62bada95860ba00fe1e0cd62f63 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 20 Jul 2018 05:31:53 -0500 Subject: signal: Don't send siginfo to kthreads. Today kernel threads never dequeue siginfo so it is pointless to enqueue siginfo for them. The usb gadget mass storage driver goes one farther and uses SEND_SIG_FORCED to guarantee that no siginfo is even enqueued. Generalize the optimization of the usb mass storage driver and never perform an unnecessary allocation when delivering signals to kthreads. Switch the mass storage driver from sending signals with SEND_SIG_FORCED to SEND_SIG_PRIV. As using SEND_SIG_FORCED is now unnecessary. Reviewed-by: Thomas Gleixner Signed-off-by: "Eric W. Biederman" --- drivers/usb/gadget/function/f_mass_storage.c | 2 +- kernel/signal.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 70038a475c9f..cb402e7a1e9b 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -403,7 +403,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state) common->exception_req_tag = common->ep0_req_tag; common->state = new_state; if (common->thread_task) - send_sig_info(SIGUSR1, SEND_SIG_FORCED, + send_sig_info(SIGUSR1, SEND_SIG_PRIV, common->thread_task); } spin_unlock_irqrestore(&common->lock, flags); diff --git a/kernel/signal.c b/kernel/signal.c index 8081ab79e97d..20931a892ace 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1057,7 +1057,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, * fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. */ - if (info == SEND_SIG_FORCED) + if ((info == SEND_SIG_FORCED) || (t->flags & PF_KTHREAD)) goto out_set; /* -- cgit v1.2.3 From f149b31557446aff9ca96d4be7e39cc266f6e7cc Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 3 Sep 2018 09:50:36 +0200 Subject: signal: Never allocate siginfo for SIGKILL or SIGSTOP The SIGKILL and SIGSTOP signals are never delivered to userspace so queued siginfo for these signals can never be observed. Therefore remove the chance of failure by never even attempting to allocate siginfo in those cases. Reviewed-by: Thomas Gleixner Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 20931a892ace..d7d1adf735f4 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1054,10 +1054,11 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, result = TRACE_SIGNAL_DELIVERED; /* - * fast-pathed signals for kernel-internal things like SIGSTOP - * or SIGKILL. + * Skip useless siginfo allocation for SIGKILL SIGSTOP, + * and kernel threads. */ - if ((info == SEND_SIG_FORCED) || (t->flags & PF_KTHREAD)) + if ((info == SEND_SIG_FORCED) || + sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) goto out_set; /* -- cgit v1.2.3 From 079b22dc9be985c591589fcb94769b8e13518aa0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 3 Sep 2018 10:32:52 +0200 Subject: signal: Use SEND_SIG_PRIV not SEND_SIG_FORCED with SIGKILL and SIGSTOP Now that siginfo is never allocated for SIGKILL and SIGSTOP there is no difference between SEND_SIG_PRIV and SEND_SIG_FORCED for SIGKILL and SIGSTOP. This makes SEND_SIG_FORCED unnecessary and redundant in the presence of SIGKILL and SIGSTOP. Therefore change users of SEND_SIG_FORCED that are sending SIGKILL or SIGSTOP to use SEND_SIG_PRIV instead. This removes the last users of SEND_SIG_FORCED. Reviewed-by: Thomas Gleixner Signed-off-by: "Eric W. Biederman" --- kernel/pid_namespace.c | 2 +- kernel/ptrace.c | 4 ++-- mm/oom_kill.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 2a2ac53d8b8b..c8d53397bbdd 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -216,7 +216,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { task = pid_task(pid, PIDTYPE_PID); if (task && !__fatal_signal_pending(task)) - send_sig_info(SIGKILL, SEND_SIG_FORCED, task); + send_sig_info(SIGKILL, SEND_SIG_PRIV, task); } read_unlock(&tasklist_lock); rcu_read_unlock(); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 21fec73d45d4..45f77a1b9c97 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -396,7 +396,7 @@ static int ptrace_attach(struct task_struct *task, long request, /* SEIZE doesn't trap tracee on attach */ if (!seize) - send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); + send_sig_info(SIGSTOP, SEND_SIG_PRIV, task); spin_lock(&task->sighand->siglock); @@ -563,7 +563,7 @@ void exit_ptrace(struct task_struct *tracer, struct list_head *dead) list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { if (unlikely(p->ptrace & PT_EXITKILL)) - send_sig_info(SIGKILL, SEND_SIG_FORCED, p); + send_sig_info(SIGKILL, SEND_SIG_PRIV, p); if (__ptrace_detach(tracer, p)) list_add(&p->ptrace_entry, dead); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b5b25e4dcbbb..3bcfeaaeed87 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -858,7 +858,7 @@ static void __oom_kill_process(struct task_struct *victim) * in order to prevent the OOM victim from depleting the memory * reserves from the user space under its control. */ - do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID); + do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID); mark_oom_victim(victim); pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), @@ -896,7 +896,7 @@ static void __oom_kill_process(struct task_struct *victim) */ if (unlikely(p->flags & PF_KTHREAD)) continue; - do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID); + do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID); } rcu_read_unlock(); -- cgit v1.2.3 From 4ff4c31a6e85f4c49fbeebeaa28018d002884b5a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 3 Sep 2018 10:39:04 +0200 Subject: signal: Remove SEND_SIG_FORCED There are no more users of SEND_SIG_FORCED so it may be safely removed. Remove the definition of SEND_SIG_FORCED, it's use in is_si_special, it's use in TP_STORE_SIGINFO, and it's use in __send_signal as without any users the uses of SEND_SIG_FORCED are now unncessary. This makes the code simpler, easier to understand and use. Users of signal sending functions now no longer need to ask themselves do I need to use SEND_SIG_FORCED. Reviewed-by: Thomas Gleixner Signed-off-by: "Eric W. Biederman" --- include/linux/sched/signal.h | 1 - include/trace/events/signal.h | 3 +-- kernel/signal.c | 7 +++---- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 9b6968cbde14..9e07f3521549 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -477,7 +477,6 @@ static inline int kill_cad_pid(int sig, int priv) /* These can be the second arg to send_sig_info/send_group_sig_info. */ #define SEND_SIG_NOINFO ((struct siginfo *) 0) #define SEND_SIG_PRIV ((struct siginfo *) 1) -#define SEND_SIG_FORCED ((struct siginfo *) 2) /* * True if we are on the alternate signal stack. diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index 86582923d51c..3deeed50ffd0 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -11,8 +11,7 @@ #define TP_STORE_SIGINFO(__entry, info) \ do { \ - if (info == SEND_SIG_NOINFO || \ - info == SEND_SIG_FORCED) { \ + if (info == SEND_SIG_NOINFO) { \ __entry->errno = 0; \ __entry->code = SI_USER; \ } else if (info == SEND_SIG_PRIV) { \ diff --git a/kernel/signal.c b/kernel/signal.c index d7d1adf735f4..ec136fda457a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -736,7 +736,7 @@ static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) static inline int is_si_special(const struct siginfo *info) { - return info <= SEND_SIG_FORCED; + return info <= SEND_SIG_PRIV; } static inline bool si_fromuser(const struct siginfo *info) @@ -1039,7 +1039,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, result = TRACE_SIGNAL_IGNORED; if (!prepare_signal(sig, t, - from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED))) + from_ancestor_ns || (info == SEND_SIG_PRIV))) goto ret; pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; @@ -1057,8 +1057,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, * Skip useless siginfo allocation for SIGKILL SIGSTOP, * and kernel threads. */ - if ((info == SEND_SIG_FORCED) || - sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) + if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) goto out_set; /* -- cgit v1.2.3 From a7c19db38d62fc1ce797dba19936e9f81cf2b9fb Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 6 Sep 2018 17:26:04 -0700 Subject: bpf: add bpffs pretty print for program array map Added bpffs pretty print for program array map. For a particular array index, if the program array points to a valid program, the ": " will be printed out like 0: 6 which means bpf program with id "6" is installed at index "0". Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- kernel/bpf/arraymap.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index f9d24121be99..dded84cbe814 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -553,6 +553,29 @@ static void bpf_fd_array_map_clear(struct bpf_map *map) fd_array_map_delete_elem(map, &i); } +static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, + struct seq_file *m) +{ + void **elem, *ptr; + u32 prog_id; + + rcu_read_lock(); + + elem = array_map_lookup_elem(map, key); + if (elem) { + ptr = READ_ONCE(*elem); + if (ptr) { + seq_printf(m, "%u: ", *(u32 *)key); + prog_id = prog_fd_array_sys_lookup_elem(ptr); + btf_type_seq_show(map->btf, map->btf_value_type_id, + &prog_id, m); + seq_puts(m, "\n"); + } + } + + rcu_read_unlock(); +} + const struct bpf_map_ops prog_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, @@ -564,7 +587,7 @@ const struct bpf_map_ops prog_array_map_ops = { .map_fd_put_ptr = prog_fd_array_put_ptr, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_release_uref = bpf_fd_array_map_clear, - .map_check_btf = map_check_no_btf, + .map_seq_show_elem = prog_array_map_seq_show_elem, }; static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, -- cgit v1.2.3 From d0555fc78fdba5646a460e83bd2d8249c539bb89 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 11 Sep 2018 19:19:14 +0900 Subject: kprobes: Remove pointless BUG_ON() from disarming process All aggr_probes at this line are already disarmed by disable_kprobe() or checked by kprobe_disarmed(). So this BUG_ON() is pointless, remove it. Signed-off-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: David S . Miller Cc: Linus Torvalds Cc: Naveen N . Rao Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/153666115463.21306.8799008438116029806.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ab257be4d924..d1edd8d5641e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1704,7 +1704,6 @@ noclean: return 0; disarmed: - BUG_ON(!kprobe_disarmed(ap)); hlist_del_rcu(&ap->hlist); return 0; } -- cgit v1.2.3 From c72e6742f62d7bb82a77a41ca53940cb8f73e60f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 11 Sep 2018 19:19:43 +0900 Subject: kprobes: Remove pointless BUG_ON() from add_new_kprobe() Before calling add_new_kprobe(), aggr_probe's GONE flag and kprobe GONE flag are cleared. We don't need to worry about that flag at this point. Signed-off-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: David S . Miller Cc: Linus Torvalds Cc: Naveen N . Rao Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/153666118298.21306.4915366706875652652.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index d1edd8d5641e..231569e1e2c8 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1259,8 +1259,6 @@ NOKPROBE_SYMBOL(cleanup_rp_inst); /* Add the new probe to ap->list */ static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) { - BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); - if (p->post_handler) unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ -- cgit v1.2.3 From a6d18e65dff2b73ceeb187c598b48898e36ad7b1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 11 Sep 2018 19:20:11 +0900 Subject: kprobes: Remove pointless BUG_ON() from reuse_unused_kprobe() Since reuse_unused_kprobe() is called when the given kprobe is unused, checking it inside again with BUG_ON() is pointless. Remove it. Signed-off-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: David S . Miller Cc: Linus Torvalds Cc: Naveen N . Rao Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/153666121154.21306.17540752948574483565.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 231569e1e2c8..277a6cbe83db 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -704,7 +704,6 @@ static void reuse_unused_kprobe(struct kprobe *ap) { struct optimized_kprobe *op; - BUG_ON(!kprobe_unused(ap)); /* * Unused kprobe MUST be on the way of delayed unoptimizing (means * there is still a relative jump) and disabled. -- cgit v1.2.3 From 819319fc93461c07b9cdb3064f154bd8cfd48172 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 11 Sep 2018 19:20:40 +0900 Subject: kprobes: Return error if we fail to reuse kprobe instead of BUG_ON() Make reuse_unused_kprobe() to return error code if it fails to reuse unused kprobe for optprobe instead of calling BUG_ON(). Signed-off-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: David S . Miller Cc: Linus Torvalds Cc: Naveen N . Rao Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/153666124040.21306.14150398706331307654.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 277a6cbe83db..63c342e5e6c3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force) } /* Cancel unoptimizing for reusing */ -static void reuse_unused_kprobe(struct kprobe *ap) +static int reuse_unused_kprobe(struct kprobe *ap) { struct optimized_kprobe *op; + int ret; /* * Unused kprobe MUST be on the way of delayed unoptimizing (means @@ -713,8 +714,12 @@ static void reuse_unused_kprobe(struct kprobe *ap) /* Enable the probe again */ ap->flags &= ~KPROBE_FLAG_DISABLED; /* Optimize it again (remove from op->list) */ - BUG_ON(!kprobe_optready(ap)); + ret = kprobe_optready(ap); + if (ret) + return ret; + optimize_kprobe(ap); + return 0; } /* Remove optimized instructions */ @@ -939,11 +944,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt) #define kprobe_disarmed(p) kprobe_disabled(p) #define wait_for_kprobe_optimizer() do {} while (0) -/* There should be no unused kprobes can be reused without optimization */ -static void reuse_unused_kprobe(struct kprobe *ap) +static int reuse_unused_kprobe(struct kprobe *ap) { + /* + * If the optimized kprobe is NOT supported, the aggr kprobe is + * released at the same time that the last aggregated kprobe is + * unregistered. + * Thus there should be no chance to reuse unused kprobe. + */ printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); - BUG_ON(kprobe_unused(ap)); + return -EINVAL; } static void free_aggr_kprobe(struct kprobe *p) @@ -1315,9 +1325,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) goto out; } init_aggr_kprobe(ap, orig_p); - } else if (kprobe_unused(ap)) + } else if (kprobe_unused(ap)) { /* This probe is going to die. Rescue it */ - reuse_unused_kprobe(ap); + ret = reuse_unused_kprobe(ap); + if (ret) + goto out; + } if (kprobe_gone(ap)) { /* -- cgit v1.2.3 From cbdd96f5586151e48317d90a403941ec23f12660 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 11 Sep 2018 19:21:09 +0900 Subject: kprobes: Don't call BUG_ON() if there is a kprobe in use on free list Instead of calling BUG_ON(), if we find a kprobe in use on free kprobe list, just remove it from the list and keep it on kprobe hash list as same as other in-use kprobes. Signed-off-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: David S . Miller Cc: Linus Torvalds Cc: Naveen N . Rao Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/153666126882.21306.10738207224288507996.stgit@devbox Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 63c342e5e6c3..90e98e233647 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -546,8 +546,14 @@ static void do_free_cleaned_kprobes(void) struct optimized_kprobe *op, *tmp; list_for_each_entry_safe(op, tmp, &freeing_list, list) { - BUG_ON(!kprobe_unused(&op->kp)); list_del_init(&op->list); + if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { + /* + * This must not happen, but if there is a kprobe + * still in use, keep it on kprobes hash list. + */ + continue; + } free_aggr_kprobe(&op->kp); } } -- cgit v1.2.3 From 4b1c5d917d34f705096bb7dd8a2bd19b0881970e Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 12 Sep 2018 10:29:11 -0700 Subject: bpf: btf: Fix end boundary calculation for type section The end boundary math for type section is incorrect in btf_check_all_metas(). It just happens that hdr->type_off is always 0 for now because there are only two sections (type and string) and string section must be at the end (ensured in btf_parse_str_sec). However, type_off may not be 0 if a new section would be added later. This patch fixes it. Fixes: f80442a4cd18 ("bpf: btf: Change how section is supported in btf_header") Reported-by: Dmitry Vyukov Signed-off-by: Martin KaFai Lau Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2590700237c1..138f0302692e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env) hdr = &btf->hdr; cur = btf->nohdr_data + hdr->type_off; - end = btf->nohdr_data + hdr->type_len; + end = cur + hdr->type_len; env->log_type_id = 1; while (cur < end) { -- cgit v1.2.3 From dd066823db2ac4e22f721ec85190817b58059a54 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 12 Sep 2018 14:06:10 -0700 Subject: bpf/verifier: disallow pointer subtraction Subtraction of pointers was accidentally allowed for unpriv programs by commit 82abbf8d2fc4. Revert that part of commit. Fixes: 82abbf8d2fc4 ("bpf: do not allow root to mangle valid pointers") Reported-by: Jann Horn Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 92246117d2b0..bb07e74b34a2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3163,7 +3163,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, * an arbitrary scalar. Disallow all math except * pointer subtraction */ - if (opcode == BPF_SUB){ + if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } -- cgit v1.2.3 From d58e468b1112dcd1d5193c0a89ff9f98b5a3e8b9 Mon Sep 17 00:00:00 2001 From: Petar Penkov Date: Fri, 14 Sep 2018 07:46:18 -0700 Subject: flow_dissector: implements flow dissector BPF hook Adds a hook for programs of type BPF_PROG_TYPE_FLOW_DISSECTOR and attach type BPF_FLOW_DISSECTOR that is executed in the flow dissector path. The BPF program is per-network namespace. Signed-off-by: Petar Penkov Signed-off-by: Willem de Bruijn Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + include/linux/bpf_types.h | 1 + include/linux/skbuff.h | 7 +++ include/net/net_namespace.h | 3 + include/net/sch_generic.h | 12 +++- include/uapi/linux/bpf.h | 26 +++++++++ kernel/bpf/syscall.c | 8 +++ kernel/bpf/verifier.c | 32 +++++++++++ net/core/filter.c | 70 +++++++++++++++++++++++ net/core/flow_dissector.c | 134 ++++++++++++++++++++++++++++++++++++++++++++ 10 files changed, 291 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 523481a3471b..988a00797bcd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -212,6 +212,7 @@ enum bpf_reg_type { PTR_TO_PACKET_META, /* skb->data - meta_len */ PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET_END, /* skb->data + headlen */ + PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ }; /* The information passed from prog-specific *_is_valid_access diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index cd26c090e7c0..22083712dd18 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -32,6 +32,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) #ifdef CONFIG_INET BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport) #endif +BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 17a13e4785fc..ce0e863f02a2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -243,6 +243,8 @@ struct scatterlist; struct pipe_inode_info; struct iov_iter; struct napi_struct; +struct bpf_prog; +union bpf_attr; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -1192,6 +1194,11 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); +int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog); + +int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr); + bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 9b5fdc50519a..99d4148e0f90 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -43,6 +43,7 @@ struct ctl_table_header; struct net_generic; struct uevent_sock; struct netns_ipvs; +struct bpf_prog; #define NETDEV_HASHBITS 8 @@ -145,6 +146,8 @@ struct net { #endif struct net_generic __rcu *gen; + struct bpf_prog __rcu *flow_dissector_prog; + /* Note : following structs are cache line aligned */ #ifdef CONFIG_XFRM struct netns_xfrm xfrm; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a6d00093f35e..1b81ba85fd2d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -19,6 +19,7 @@ struct Qdisc_ops; struct qdisc_walker; struct tcf_walker; struct module; +struct bpf_flow_keys; typedef int tc_setup_cb_t(enum tc_setup_type type, void *type_data, void *cb_priv); @@ -307,9 +308,14 @@ struct tcf_proto { }; struct qdisc_skb_cb { - unsigned int pkt_len; - u16 slave_dev_queue_mapping; - u16 tc_classid; + union { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + struct bpf_flow_keys *flow_keys; + }; #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; }; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 66917a4eba27..aa5ccd2385ed 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -152,6 +152,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, + BPF_PROG_TYPE_FLOW_DISSECTOR, }; enum bpf_attach_type { @@ -172,6 +173,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, + BPF_FLOW_DISSECTOR, __MAX_BPF_ATTACH_TYPE }; @@ -2333,6 +2335,7 @@ struct __sk_buff { /* ... here. */ __u32 data_meta; + struct bpf_flow_keys *flow_keys; }; struct bpf_tunnel_key { @@ -2778,4 +2781,27 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; /* ETH_P_* of valid addrs */ + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; /* in6_addr; network order */ + __u32 ipv6_dst[4]; /* in6_addr; network order */ + }; + }; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 3c9636f03bb2..b3c2d09bcf7a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1615,6 +1615,9 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_LIRC_MODE2: ptype = BPF_PROG_TYPE_LIRC_MODE2; break; + case BPF_FLOW_DISSECTOR: + ptype = BPF_PROG_TYPE_FLOW_DISSECTOR; + break; default: return -EINVAL; } @@ -1636,6 +1639,9 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_PROG_TYPE_LIRC_MODE2: ret = lirc_prog_attach(attr, prog); break; + case BPF_PROG_TYPE_FLOW_DISSECTOR: + ret = skb_flow_dissector_bpf_prog_attach(attr, prog); + break; default: ret = cgroup_bpf_prog_attach(attr, ptype, prog); } @@ -1688,6 +1694,8 @@ static int bpf_prog_detach(const union bpf_attr *attr) return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); case BPF_LIRC_MODE2: return lirc_prog_detach(attr); + case BPF_FLOW_DISSECTOR: + return skb_flow_dissector_bpf_prog_detach(attr); default: return -EINVAL; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6ff1bac1795d..8ccbff4fff93 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -261,6 +261,7 @@ static const char * const reg_type_str[] = { [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", + [PTR_TO_FLOW_KEYS] = "flow_keys", }; static char slot_type_char[] = { @@ -965,6 +966,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: + case PTR_TO_FLOW_KEYS: case CONST_PTR_TO_MAP: return true; default: @@ -1238,6 +1240,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: + case BPF_PROG_TYPE_FLOW_DISSECTOR: if (meta) return meta->pkt_access; @@ -1321,6 +1324,18 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, return -EACCES; } +static int check_flow_keys_access(struct bpf_verifier_env *env, int off, + int size) +{ + if (size < 0 || off < 0 || + (u64)off + size > sizeof(struct bpf_flow_keys)) { + verbose(env, "invalid access to flow keys off=%d size=%d\n", + off, size); + return -EACCES; + } + return 0; +} + static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { @@ -1422,6 +1437,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); + case PTR_TO_FLOW_KEYS: + pointer_desc = "flow keys "; + break; case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; @@ -1692,6 +1710,17 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_FLOW_KEYS) { + if (t == BPF_WRITE && value_regno >= 0 && + is_pointer_value(env, value_regno)) { + verbose(env, "R%d leaks addr into flow keys\n", + value_regno); + return -EACCES; + } + + err = check_flow_keys_access(env, off, size); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -1839,6 +1868,8 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); + case PTR_TO_FLOW_KEYS: + return check_flow_keys_access(env, reg->off, access_size); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); @@ -4366,6 +4397,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: + case PTR_TO_FLOW_KEYS: /* Only valid matches are exact, which memcmp() above * would have accepted */ diff --git a/net/core/filter.c b/net/core/filter.c index bf5b6efd369a..9cc76f134ddb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5123,6 +5123,17 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } } +static const struct bpf_func_proto * +flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_skb_load_bytes: + return &bpf_skb_load_bytes_proto; + default: + return bpf_base_func_proto(func_id); + } +} + static const struct bpf_func_proto * lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -5241,6 +5252,10 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type if (size != size_default) return false; break; + case bpf_ctx_range(struct __sk_buff, flow_keys): + if (size != sizeof(struct bpf_flow_keys *)) + return false; + break; default: /* Only narrow read access allowed for now. */ if (type == BPF_WRITE) { @@ -5266,6 +5281,7 @@ static bool sk_filter_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): + case bpf_ctx_range(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -5291,6 +5307,7 @@ static bool lwt_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range(struct __sk_buff, flow_keys): return false; } @@ -5501,6 +5518,7 @@ static bool tc_cls_act_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; + case bpf_ctx_range(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -5702,6 +5720,7 @@ static bool sk_skb_is_valid_access(int off, int size, switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range(struct __sk_buff, flow_keys): return false; } @@ -5761,6 +5780,39 @@ static bool sk_msg_is_valid_access(int off, int size, return true; } +static bool flow_dissector_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (type == BPF_WRITE) { + switch (off) { + case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): + break; + default: + return false; + } + } + + switch (off) { + case bpf_ctx_range(struct __sk_buff, data): + info->reg_type = PTR_TO_PACKET; + break; + case bpf_ctx_range(struct __sk_buff, data_end): + info->reg_type = PTR_TO_PACKET_END; + break; + case bpf_ctx_range(struct __sk_buff, flow_keys): + info->reg_type = PTR_TO_FLOW_KEYS; + break; + case bpf_ctx_range(struct __sk_buff, tc_classid): + case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range_till(struct __sk_buff, family, local_port): + return false; + } + + return bpf_skb_is_valid_access(off, size, type, prog, info); +} + static u32 bpf_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, @@ -6055,6 +6107,15 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, bpf_target_off(struct sock_common, skc_num, 2, target_size)); break; + + case offsetof(struct __sk_buff, flow_keys): + off = si->off; + off -= offsetof(struct __sk_buff, flow_keys); + off += offsetof(struct sk_buff, cb); + off += offsetof(struct qdisc_skb_cb, flow_keys); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, + si->src_reg, off); + break; } return insn - insn_buf; @@ -7018,6 +7079,15 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = { const struct bpf_prog_ops sk_msg_prog_ops = { }; +const struct bpf_verifier_ops flow_dissector_verifier_ops = { + .get_func_proto = flow_dissector_func_proto, + .is_valid_access = flow_dissector_is_valid_access, + .convert_ctx_access = bpf_convert_ctx_access, +}; + +const struct bpf_prog_ops flow_dissector_prog_ops = { +}; + int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ce9eeeb7c024..5c5dd74b5b3b 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -25,6 +25,9 @@ #include #include #include +#include + +static DEFINE_MUTEX(flow_dissector_mutex); static void dissector_set_key(struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) @@ -62,6 +65,44 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, } EXPORT_SYMBOL(skb_flow_dissector_init); +int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + struct bpf_prog *attached; + struct net *net; + + net = current->nsproxy->net_ns; + mutex_lock(&flow_dissector_mutex); + attached = rcu_dereference_protected(net->flow_dissector_prog, + lockdep_is_held(&flow_dissector_mutex)); + if (attached) { + /* Only one BPF program can be attached at a time */ + mutex_unlock(&flow_dissector_mutex); + return -EEXIST; + } + rcu_assign_pointer(net->flow_dissector_prog, prog); + mutex_unlock(&flow_dissector_mutex); + return 0; +} + +int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) +{ + struct bpf_prog *attached; + struct net *net; + + net = current->nsproxy->net_ns; + mutex_lock(&flow_dissector_mutex); + attached = rcu_dereference_protected(net->flow_dissector_prog, + lockdep_is_held(&flow_dissector_mutex)); + if (!attached) { + mutex_unlock(&flow_dissector_mutex); + return -ENOENT; + } + bpf_prog_put(attached); + RCU_INIT_POINTER(net->flow_dissector_prog, NULL); + mutex_unlock(&flow_dissector_mutex); + return 0; +} /** * skb_flow_get_be16 - extract be16 entity * @skb: sk_buff to extract from @@ -588,6 +629,60 @@ static bool skb_flow_dissect_allowed(int *num_hdrs) return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS); } +static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, + struct flow_dissector *flow_dissector, + void *target_container) +{ + struct flow_dissector_key_control *key_control; + struct flow_dissector_key_basic *key_basic; + struct flow_dissector_key_addrs *key_addrs; + struct flow_dissector_key_ports *key_ports; + + key_control = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_CONTROL, + target_container); + key_control->thoff = flow_keys->thoff; + if (flow_keys->is_frag) + key_control->flags |= FLOW_DIS_IS_FRAGMENT; + if (flow_keys->is_first_frag) + key_control->flags |= FLOW_DIS_FIRST_FRAG; + if (flow_keys->is_encap) + key_control->flags |= FLOW_DIS_ENCAPSULATION; + + key_basic = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_BASIC, + target_container); + key_basic->n_proto = flow_keys->n_proto; + key_basic->ip_proto = flow_keys->ip_proto; + + if (flow_keys->addr_proto == ETH_P_IP && + dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + key_addrs = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + target_container); + key_addrs->v4addrs.src = flow_keys->ipv4_src; + key_addrs->v4addrs.dst = flow_keys->ipv4_dst; + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } else if (flow_keys->addr_proto == ETH_P_IPV6 && + dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + key_addrs = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + target_container); + memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src, + sizeof(key_addrs->v6addrs)); + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + } + + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { + key_ports = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS, + target_container); + key_ports->src = flow_keys->sport; + key_ports->dst = flow_keys->dport; + } +} + /** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified @@ -619,6 +714,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_vlan *key_vlan; enum flow_dissect_ret fdret; enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; + struct bpf_prog *attached; int num_hdrs = 0; u8 ip_proto = 0; bool ret; @@ -658,6 +754,44 @@ bool __skb_flow_dissect(const struct sk_buff *skb, FLOW_DISSECTOR_KEY_BASIC, target_container); + rcu_read_lock(); + attached = skb ? rcu_dereference(dev_net(skb->dev)->flow_dissector_prog) + : NULL; + if (attached) { + /* Note that even though the const qualifier is discarded + * throughout the execution of the BPF program, all changes(the + * control block) are reverted after the BPF program returns. + * Therefore, __skb_flow_dissect does not alter the skb. + */ + struct bpf_flow_keys flow_keys = {}; + struct bpf_skb_data_end cb_saved; + struct bpf_skb_data_end *cb; + u32 result; + + cb = (struct bpf_skb_data_end *)skb->cb; + + /* Save Control Block */ + memcpy(&cb_saved, cb, sizeof(cb_saved)); + memset(cb, 0, sizeof(cb_saved)); + + /* Pass parameters to the BPF program */ + cb->qdisc_cb.flow_keys = &flow_keys; + flow_keys.nhoff = nhoff; + + bpf_compute_data_pointers((struct sk_buff *)skb); + result = BPF_PROG_RUN(attached, skb); + + /* Restore state */ + memcpy(cb, &cb_saved, sizeof(cb_saved)); + + __skb_flow_bpf_to_target(&flow_keys, flow_dissector, + target_container); + key_control->thoff = min_t(u16, key_control->thoff, skb->len); + rcu_read_unlock(); + return result == BPF_OK; + } + rcu_read_unlock(); + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct ethhdr *eth = eth_hdr(skb); -- cgit v1.2.3 From 82058d6684658430cd9b4123d4c3e863fd48f813 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 20 Jul 2018 16:35:14 -0500 Subject: signal: Use group_send_sig_info to kill all processes in a pid namespace Replace send_sig_info in zap_pid_ns_processes with group_send_sig_info. This makes more sense as the entire process group is being killed. More importantly this allows the kill of those processes with PIDTYPE_MAX to indicate all of the process in the pid namespace are being signaled. This is needed for fork to detect when signals are sent to a group of processes. Admittedly fork has another case to catch SIGKILL but the principle remains that it is desirable to know when a group of processes is being signaled. Signed-off-by: "Eric W. Biederman" --- kernel/pid_namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index c8d53397bbdd..aa6e72fb7c08 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -216,7 +216,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) idr_for_each_entry_continue(&pid_ns->idr, pid, nr) { task = pid_task(pid, PIDTYPE_PID); if (task && !__fatal_signal_pending(task)) - send_sig_info(SIGKILL, SEND_SIG_PRIV, task); + group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX); } read_unlock(&tasklist_lock); rcu_read_unlock(); -- cgit v1.2.3 From b21c5bd562dd97ac0b936439fc64bd30ec09b2e0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 21 Jul 2018 11:34:03 -0500 Subject: signal: Remove specific_send_sig_info This function is static and it only has two callers. As specific_send_sig_info is only called twice remembering what specific_send_sig_info does when reading the code is difficutl and it makes it hard to see which sending sending functions are equivalent to which others. So remove specific_send_sig_info to make the code easier to read. Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index ec136fda457a..99e91163c9a3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1199,12 +1199,6 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) return send_signal(sig, info, p, PIDTYPE_TGID); } -static int -specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) -{ - return send_signal(sig, info, t, PIDTYPE_PID); -} - int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, enum pid_type type) { @@ -1254,7 +1248,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) */ if (action->sa.sa_handler == SIG_DFL && !t->ptrace) t->signal->flags &= ~SIGNAL_UNKILLABLE; - ret = specific_send_sig_info(sig, info, t); + ret = send_signal(sig, info, t, PIDTYPE_PID); spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; @@ -2330,7 +2324,7 @@ static int ptrace_signal(int signr, siginfo_t *info) /* If the (new) signal is now blocked, requeue it. */ if (sigismember(¤t->blocked, signr)) { - specific_send_sig_info(signr, info, current); + send_signal(signr, info, current, PIDTYPE_PID); signr = 0; } -- cgit v1.2.3 From fb50f5a4011c499bc1b1fae77299cfcb3945e51b Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 13 Sep 2018 19:26:35 +0200 Subject: signal: Pair exports with their functions For readability and consistency with the other exports in kernel/signal.c pair the exports of signal sending functions with their functions, instead of having the exports in one big clump. Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 99e91163c9a3..e16278710b36 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -176,6 +176,7 @@ void recalc_sigpending(void) clear_thread_flag(TIF_SIGPENDING); } +EXPORT_SYMBOL(recalc_sigpending); void calculate_sigpending(void) { @@ -466,6 +467,7 @@ void flush_signals(struct task_struct *t) flush_sigqueue(&t->signal->shared_pending); spin_unlock_irqrestore(&t->sighand->siglock, flags); } +EXPORT_SYMBOL(flush_signals); #ifdef CONFIG_POSIX_TIMERS static void __flush_itimer_signals(struct sigpending *pending) @@ -684,6 +686,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) #endif return signr; } +EXPORT_SYMBOL_GPL(dequeue_signal); /* * Tell a process that it has a new active signal.. @@ -1490,6 +1493,7 @@ int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) return do_send_sig_info(sig, info, p, PIDTYPE_PID); } +EXPORT_SYMBOL(send_sig_info); #define __si_special(priv) \ ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) @@ -1499,11 +1503,13 @@ send_sig(int sig, struct task_struct *p, int priv) { return send_sig_info(sig, __si_special(priv), p); } +EXPORT_SYMBOL(send_sig); void force_sig(int sig, struct task_struct *p) { force_sig_info(sig, SEND_SIG_PRIV, p); } +EXPORT_SYMBOL(force_sig); /* * When things go south during signal handling, we @@ -2634,14 +2640,6 @@ out: } } -EXPORT_SYMBOL(recalc_sigpending); -EXPORT_SYMBOL_GPL(dequeue_signal); -EXPORT_SYMBOL(flush_signals); -EXPORT_SYMBOL(force_sig); -EXPORT_SYMBOL(send_sig); -EXPORT_SYMBOL(send_sig_info); -EXPORT_SYMBOL(sigprocmask); - /* * System call entry points. */ @@ -2735,6 +2733,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) __set_current_blocked(&newset); return 0; } +EXPORT_SYMBOL(sigprocmask); /** * sys_rt_sigprocmask - change the list of currently blocked signals -- cgit v1.2.3 From 83f365554e47997ec68dc4eca3f5dce525cd15c3 Mon Sep 17 00:00:00 2001 From: Vaibhav Nagarnaik Date: Fri, 7 Sep 2018 15:31:29 -0700 Subject: ring-buffer: Allow for rescheduling when removing pages When reducing ring buffer size, pages are removed by scheduling a work item on each CPU for the corresponding CPU ring buffer. After the pages are removed from ring buffer linked list, the pages are free()d in a tight loop. The loop does not give up CPU until all pages are removed. In a worst case behavior, when lot of pages are to be freed, it can cause system stall. After the pages are removed from the list, the free() can happen while the work is rescheduled. Call cond_resched() in the loop to prevent the system hangup. Link: http://lkml.kernel.org/r/20180907223129.71994-1-vnagarnaik@google.com Cc: stable@vger.kernel.org Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic") Reported-by: Jason Behmer Signed-off-by: Vaibhav Nagarnaik Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ring_buffer.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1d92d4a982fd..65bd4616220d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) tmp_iter_page = first_page; do { + cond_resched(); + to_remove_page = tmp_iter_page; rb_inc_page(cpu_buffer, &tmp_iter_page); -- cgit v1.2.3 From 8ffe4e61c06a48324cfd97f1199bb9838acce2f2 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Sun, 9 Sep 2018 01:58:37 +0800 Subject: irq/matrix: Split out the CPU selection code into a helper Linux finds the CPU which has the lowest vector allocation count to spread out the non managed interrupts across the possible target CPUs, but does not do so for managed interrupts. Split out the CPU selection code into a helper function for reuse. No functional change. Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180908175838.14450-1-dou_liyang@163.com --- kernel/irq/matrix.c | 65 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 5092494bf261..67768bbe736e 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm, return area; } +/* Find the best CPU which has the lowest vector allocation count */ +static unsigned int matrix_find_best_cpu(struct irq_matrix *m, + const struct cpumask *msk) +{ + unsigned int cpu, best_cpu, maxavl = 0; + struct cpumap *cm; + + best_cpu = UINT_MAX; + + for_each_cpu(cpu, msk) { + cm = per_cpu_ptr(m->maps, cpu); + + if (!cm->online || cm->available <= maxavl) + continue; + + best_cpu = cpu; + maxavl = cm->available; + } + return best_cpu; +} + /** * irq_matrix_assign_system - Assign system wide entry in the matrix * @m: Matrix pointer @@ -322,37 +343,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m) int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, bool reserved, unsigned int *mapped_cpu) { - unsigned int cpu, best_cpu, maxavl = 0; + unsigned int cpu, bit; struct cpumap *cm; - unsigned int bit; - best_cpu = UINT_MAX; - for_each_cpu(cpu, msk) { - cm = per_cpu_ptr(m->maps, cpu); - - if (!cm->online || cm->available <= maxavl) - continue; + cpu = matrix_find_best_cpu(m, msk); + if (cpu == UINT_MAX) + return -ENOSPC; - best_cpu = cpu; - maxavl = cm->available; - } + cm = per_cpu_ptr(m->maps, cpu); + bit = matrix_alloc_area(m, cm, 1, false); + if (bit >= m->alloc_end) + return -ENOSPC; + cm->allocated++; + cm->available--; + m->total_allocated++; + m->global_available--; + if (reserved) + m->global_reserved--; + *mapped_cpu = cpu; + trace_irq_matrix_alloc(bit, cpu, m, cm); + return bit; - if (maxavl) { - cm = per_cpu_ptr(m->maps, best_cpu); - bit = matrix_alloc_area(m, cm, 1, false); - if (bit < m->alloc_end) { - cm->allocated++; - cm->available--; - m->total_allocated++; - m->global_available--; - if (reserved) - m->global_reserved--; - *mapped_cpu = best_cpu; - trace_irq_matrix_alloc(bit, best_cpu, m, cm); - return bit; - } - } - return -ENOSPC; } /** -- cgit v1.2.3 From 76f99ae5b54d48430d1f0c5512a84da0ff9761e0 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Sun, 9 Sep 2018 01:58:38 +0800 Subject: irq/matrix: Spread managed interrupts on allocation Linux spreads out the non managed interrupt across the possible target CPUs to avoid vector space exhaustion. Managed interrupts are treated differently, as for them the vectors are reserved (with guarantee) when the interrupt descriptors are initialized. When the interrupt is requested a real vector is assigned. The assignment logic uses the first CPU in the affinity mask for assignment. If the interrupt has more than one CPU in the affinity mask, which happens when a multi queue device has less queues than CPUs, then doing the same search as for non managed interrupts makes sense as it puts the interrupt on the least interrupt plagued CPU. For single CPU affine vectors that's obviously a NOOP. Restructre the matrix allocation code so it does the 'best CPU' search, add the sanity check for an empty affinity mask and adapt the call site in the x86 vector management code. [ tglx: Added the empty mask check to the core and improved change log ] Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180908175838.14450-2-dou_liyang@163.com --- arch/x86/kernel/apic/vector.c | 9 ++++----- include/linux/irq.h | 3 ++- kernel/irq/matrix.c | 17 ++++++++++++++--- 3 files changed, 20 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 7654febd5102..652e7ffa9b9d 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) struct apic_chip_data *apicd = apic_chip_data(irqd); int vector, cpu; - cpumask_and(vector_searchmask, vector_searchmask, affmsk); - cpu = cpumask_first(vector_searchmask); - if (cpu >= nr_cpu_ids) - return -EINVAL; + cpumask_and(vector_searchmask, dest, affmsk); + /* set_affinity might call here for nothing */ if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) return 0; - vector = irq_matrix_alloc_managed(vector_matrix, cpu); + vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, + &cpu); trace_vector_alloc_managed(irqd->irq, vector, vector); if (vector < 0) return vector; diff --git a/include/linux/irq.h b/include/linux/irq.h index 201de12a9957..c9bffda04a45 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m); void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); -int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); +int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, + unsigned int *mapped_cpu); void irq_matrix_reserve(struct irq_matrix *m); void irq_matrix_remove_reserved(struct irq_matrix *m); int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 67768bbe736e..6e6d467f3dec 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -260,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) * @m: Matrix pointer * @cpu: On which CPU the interrupt should be allocated */ -int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) +int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, + unsigned int *mapped_cpu) { - struct cpumap *cm = per_cpu_ptr(m->maps, cpu); - unsigned int bit, end = m->alloc_end; + unsigned int bit, cpu, end = m->alloc_end; + struct cpumap *cm; + + if (cpumask_empty(msk)) + return -EINVAL; + cpu = matrix_find_best_cpu(m, msk); + if (cpu == UINT_MAX) + return -ENOSPC; + + cm = per_cpu_ptr(m->maps, cpu); + end = m->alloc_end; /* Get managed bit which are not allocated */ bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); bit = find_first_bit(m->scratch_map, end); @@ -273,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) set_bit(bit, cm->alloc_map); cm->allocated++; m->total_allocated++; + *mapped_cpu = cpu; trace_irq_matrix_alloc_managed(bit, cpu, m, cm); return bit; } -- cgit v1.2.3 From 1627314fb54a33ebd23bd08f2e215eaed0f44712 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Wed, 4 Apr 2018 17:53:23 +0300 Subject: perf: Suppress AUX/OVERWRITE records It has been pointed out to me many times that it is useful to be able to switch off AUX records to save the bandwidth for records that actually matter, for example, in AUX overwrite mode. The usefulness of PERF_RECORD_AUX is in some of its flags, like the TRUNCATED flag that tells the decoder where exactly gaps in the trace are. The OVERWRITE flag, on the other hand will be set on every single record in overwrite mode. However, a PERF_RECORD_AUX[flags=OVERWRITE] is generated on every target task's sched_out, which over time adds up to a lot of useless information. If any folks out there have userspace that depends on a constant stream of OVERWRITE records for a good reason, they'll have to let us know. Signed-off-by: Alexander Shishkin Acked-by: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Will Deacon Cc: Adrian Hunter Cc: Markus T Metzger Link: http://lkml.kernel.org/r/20180404145323.28651-1-alexander.shishkin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/ring_buffer.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 5d3cf407e374..4a9937076331 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -459,10 +459,20 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) if (size || handle->aux_flags) { /* * Only send RECORD_AUX if we have something useful to communicate + * + * Note: the OVERWRITE records by themselves are not considered + * useful, as they don't communicate any *new* information, + * aside from the short-lived offset, that becomes history at + * the next event sched-in and therefore isn't useful. + * The userspace that needs to copy out AUX data in overwrite + * mode should know to use user_page::aux_head for the actual + * offset. So, from now on we don't output AUX records that + * have *only* OVERWRITE flag set. */ - perf_event_aux_event(handle->event, aux_head, size, - handle->aux_flags); + if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE) + perf_event_aux_event(handle->event, aux_head, size, + handle->aux_flags); } rb->user_page->aux_head = rb->aux_head; -- cgit v1.2.3 From a3ceed87b07769fb80ce9dc6b604e515dba14c4b Mon Sep 17 00:00:00 2001 From: He Zhe Date: Mon, 17 Sep 2018 11:24:20 +0800 Subject: dma-mapping: fix panic caused by passing empty cma command line argument early_cma does not check input argument before passing it to simple_strtoull. The argument would be a NULL pointer if "cma", without its value, is set in command line and thus causes the following panic. PANIC: early exception 0xe3 IP 10:ffffffffa3e9db8d error 0 cr2 0x0 [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 4.19.0-rc3-yocto-standard+ #7 [ 0.000000] RIP: 0010:_parse_integer_fixup_radix+0xd/0x70 ... [ 0.000000] Call Trace: [ 0.000000] simple_strtoull+0x29/0x70 [ 0.000000] memparse+0x26/0x90 [ 0.000000] early_cma+0x17/0x6a [ 0.000000] do_early_param+0x57/0x8e [ 0.000000] parse_args+0x208/0x320 [ 0.000000] ? rdinit_setup+0x30/0x30 [ 0.000000] parse_early_options+0x29/0x2d [ 0.000000] ? rdinit_setup+0x30/0x30 [ 0.000000] parse_early_param+0x36/0x4d [ 0.000000] setup_arch+0x336/0x99e [ 0.000000] start_kernel+0x6f/0x4e6 [ 0.000000] x86_64_start_reservations+0x24/0x26 [ 0.000000] x86_64_start_kernel+0x6f/0x72 [ 0.000000] secondary_startup_64+0xa4/0xb0 This patch adds a check to prevent the panic. Signed-off-by: He Zhe Reviewed-by: Marek Szyprowski Cc: stable@vger.kernel.org Signed-off-by: Christoph Hellwig --- kernel/dma/contiguous.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index 286d82329eb0..b2a87905846d 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -49,7 +49,11 @@ static phys_addr_t limit_cmdline; static int __init early_cma(char *p) { - pr_debug("%s(%s)\n", __func__, p); + if (!p) { + pr_err("Config string not provided\n"); + return -EINVAL; + } + size_cmdline = memparse(p, &p); if (*p != '@') return 0; -- cgit v1.2.3 From 684f7e91d3c72d555b82db13b2a48798e6097886 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Sep 2018 08:54:57 +0200 Subject: dma-mapping: add the missing ARCH_HAS_SYNC_DMA_FOR_CPU_ALL declaration The patch adding the infrastructure failed to actually add the symbol declaration, oops.. Fixes: faef87723a ("dma-noncoherent: add a arch_sync_dma_for_cpu_all hook") Signed-off-by: Christoph Hellwig --- kernel/dma/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 9bd54304446f..1b1d63b3634b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU bool select NEED_DMA_MAP_STATE +config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL + bool + config DMA_DIRECT_OPS bool depends on HAS_DMA -- cgit v1.2.3 From f3ecc0ff0457eae93503792c6fc35921fa8a6204 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Aug 2018 14:53:20 +0200 Subject: dma-mapping: move the dma_coherent flag to struct device Various architectures support both coherent and non-coherent dma on a per-device basis. Move the dma_noncoherent flag from the mips archdata field to struct device proper to prepare the infrastructure for reuse on other architectures. Signed-off-by: Christoph Hellwig Acked-by: Paul Burton Acked-by: Greg Kroah-Hartman --- arch/mips/Kconfig | 1 + arch/mips/include/asm/Kbuild | 1 + arch/mips/include/asm/device.h | 19 ------------------- arch/mips/include/asm/dma-coherence.h | 6 ++++++ arch/mips/include/asm/dma-mapping.h | 2 +- arch/mips/mm/dma-noncoherent.c | 32 ++++++-------------------------- include/linux/device.h | 7 +++++++ include/linux/dma-noncoherent.h | 16 ++++++++++++++++ kernel/dma/Kconfig | 3 +++ 9 files changed, 41 insertions(+), 46 deletions(-) delete mode 100644 arch/mips/include/asm/device.h (limited to 'kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0b25180028b8..54c52bd0d9d3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1106,6 +1106,7 @@ config ARCH_SUPPORTS_UPROBES bool config DMA_MAYBE_COHERENT + select ARCH_HAS_DMA_COHERENCE_H select DMA_NONCOHERENT bool diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 58351e48421e..9a81e72119da 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,6 +1,7 @@ # MIPS headers generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += current.h +generic-y += device.h generic-y += dma-contiguous.h generic-y += emergency-restart.h generic-y += export.h diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h deleted file mode 100644 index 6aa796f1081a..000000000000 --- a/arch/mips/include/asm/device.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#ifndef _ASM_MIPS_DEVICE_H -#define _ASM_MIPS_DEVICE_H - -struct dev_archdata { -#ifdef CONFIG_DMA_PERDEV_COHERENT - /* Non-zero if DMA is coherent with CPU caches */ - bool dma_coherent; -#endif -}; - -struct pdev_archdata { -}; - -#endif /* _ASM_MIPS_DEVICE_H*/ diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h index 8eda48748ed5..5eaa1fcc878a 100644 --- a/arch/mips/include/asm/dma-coherence.h +++ b/arch/mips/include/asm/dma-coherence.h @@ -20,6 +20,12 @@ enum coherent_io_user_state { #elif defined(CONFIG_DMA_MAYBE_COHERENT) extern enum coherent_io_user_state coherentio; extern int hw_coherentio; + +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return coherentio == IO_COHERENCE_ENABLED || + (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio); +} #else #ifdef CONFIG_DMA_NONCOHERENT #define coherentio IO_COHERENCE_DISABLED diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index e81c4e97ff1a..40d825c779de 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -25,7 +25,7 @@ static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, bool coherent) { #ifdef CONFIG_DMA_PERDEV_COHERENT - dev->archdata.dma_coherent = coherent; + dev->dma_coherent = coherent; #endif } diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 2aca1236af36..d408ac51f56c 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -14,26 +14,6 @@ #include #include -#ifdef CONFIG_DMA_PERDEV_COHERENT -static inline int dev_is_coherent(struct device *dev) -{ - return dev->archdata.dma_coherent; -} -#else -static inline int dev_is_coherent(struct device *dev) -{ - switch (coherentio) { - default: - case IO_COHERENCE_DEFAULT: - return hw_coherentio; - case IO_COHERENCE_ENABLED: - return 1; - case IO_COHERENCE_DISABLED: - return 0; - } -} -#endif /* CONFIG_DMA_PERDEV_COHERENT */ - /* * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively * fill random cachelines with stale data at any time, requiring an extra @@ -49,7 +29,7 @@ static inline int dev_is_coherent(struct device *dev) */ static inline bool cpu_needs_post_dma_flush(struct device *dev) { - if (dev_is_coherent(dev)) + if (dev_is_dma_coherent(dev)) return false; switch (boot_cpu_type()) { @@ -76,7 +56,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, if (!ret) return NULL; - if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { dma_cache_wback_inv((unsigned long) ret, size); ret = (void *)UNCAC_ADDR(ret); } @@ -87,7 +67,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev)) + if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_dma_coherent(dev)) cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); } @@ -103,7 +83,7 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long pfn; int ret = -ENXIO; - if (!dev_is_coherent(dev)) + if (!dev_is_dma_coherent(dev)) addr = CAC_ADDR(addr); pfn = page_to_pfn(virt_to_page((void *)addr)); @@ -187,7 +167,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - if (!dev_is_coherent(dev)) + if (!dev_is_dma_coherent(dev)) dma_sync_phys(paddr, size, dir); } @@ -203,6 +183,6 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, { BUG_ON(direction == DMA_NONE); - if (!dev_is_coherent(dev)) + if (!dev_is_dma_coherent(dev)) dma_sync_virt(vaddr, size, direction); } diff --git a/include/linux/device.h b/include/linux/device.h index 8f882549edee..983506789402 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -927,6 +927,8 @@ struct dev_links_info { * @offline: Set after successful invocation of bus type's .offline(). * @of_node_reused: Set if the device-tree node is shared with an ancestor * device. + * @dma_coherent: this particular device is dma coherent, even if the + * architecture supports non-coherent devices. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -1016,6 +1018,11 @@ struct device { bool offline_disabled:1; bool offline:1; bool of_node_reused:1; +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) + bool dma_coherent:1; +#endif }; static inline struct device *kobj_to_dev(struct kobject *kobj) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index a0aa00cc909d..ce9732506ef4 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -4,6 +4,22 @@ #include +#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H +#include +#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return dev->dma_coherent; +} +#else +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return true; +} +#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ + void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 1b1d63b3634b..79476749f196 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -13,6 +13,9 @@ config NEED_DMA_MAP_STATE config ARCH_DMA_ADDR_T_64BIT def_bool 64BIT || PHYS_ADDR_T_64BIT +config ARCH_HAS_DMA_COHERENCE_H + bool + config HAVE_GENERIC_DMA_COHERENT bool -- cgit v1.2.3 From bc3ec75de5452db59b683487867ba562b950708a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 8 Sep 2018 11:22:43 +0200 Subject: dma-mapping: merge direct and noncoherent ops All the cache maintainance is already stubbed out when not enabled, but merging the two allows us to nicely handle the case where cache maintainance is required for some devices, but not others. Signed-off-by: Christoph Hellwig Acked-by: Paul Burton # MIPS parts --- arch/arc/Kconfig | 2 +- arch/arc/mm/dma.c | 16 ++--- arch/arm/mm/dma-mapping-nommu.c | 5 +- arch/c6x/Kconfig | 2 +- arch/hexagon/Kconfig | 2 +- arch/m68k/Kconfig | 2 +- arch/microblaze/Kconfig | 2 +- arch/mips/Kconfig | 1 - arch/mips/include/asm/dma-mapping.h | 2 - arch/mips/jazz/jazzdma.c | 6 +- arch/mips/mm/dma-noncoherent.c | 29 +++------ arch/nds32/Kconfig | 2 +- arch/nios2/Kconfig | 2 +- arch/openrisc/Kconfig | 2 +- arch/parisc/Kconfig | 2 +- arch/parisc/kernel/setup.c | 2 +- arch/sh/Kconfig | 3 +- arch/sparc/Kconfig | 2 +- arch/sparc/include/asm/dma-mapping.h | 4 +- arch/x86/kernel/amd_gart_64.c | 6 +- arch/xtensa/Kconfig | 2 +- include/asm-generic/dma-mapping.h | 9 --- include/linux/dma-direct.h | 4 ++ include/linux/dma-mapping.h | 1 - include/linux/dma-noncoherent.h | 5 -- kernel/dma/Kconfig | 9 +-- kernel/dma/Makefile | 1 - kernel/dma/direct.c | 121 +++++++++++++++++++++++++++++++++-- kernel/dma/noncoherent.c | 106 ------------------------------ 29 files changed, 160 insertions(+), 192 deletions(-) delete mode 100644 kernel/dma/noncoherent.c (limited to 'kernel') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index b4441b0764d7..ca03694d518a 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -17,7 +17,7 @@ config ARC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index c75d5c3470e3..535ed4a068ef 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -167,7 +167,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, } /* - * Plug in coherent or noncoherent dma ops + * Plug in direct dma map ops. */ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) @@ -175,13 +175,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, /* * IOC hardware snoops all DMA traffic keeping the caches consistent * with memory - eliding need for any explicit cache maintenance of - * DMA buffers - so we can use dma_direct cache ops. + * DMA buffers. */ - if (is_isa_arcv2() && ioc_enable && coherent) { - set_dma_ops(dev, &dma_direct_ops); - dev_info(dev, "use dma_direct_ops cache ops\n"); - } else { - set_dma_ops(dev, &dma_noncoherent_ops); - dev_info(dev, "use dma_noncoherent_ops cache ops\n"); - } + if (is_isa_arcv2() && ioc_enable && coherent) + dev->dma_coherent = true; + + dev_info(dev, "use %sncoherent DMA ops\n", + dev->dma_coherent ? "" : "non"); } diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index aa7aba302e76..0ad156f9985b 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -47,7 +47,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, */ if (attrs & DMA_ATTR_NON_CONSISTENT) - return dma_direct_alloc(dev, size, dma_handle, gfp, attrs); + return dma_direct_alloc_pages(dev, size, dma_handle, gfp, + attrs); ret = dma_alloc_from_global_coherent(size, dma_handle); @@ -70,7 +71,7 @@ static void arm_nommu_dma_free(struct device *dev, size_t size, unsigned long attrs) { if (attrs & DMA_ATTR_NON_CONSISTENT) { - dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } else { int ret = dma_release_from_global_coherent(get_order(size), cpu_addr); diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index a641b0bf1611..f65a084607fd 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -9,7 +9,7 @@ config C6X select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select CLKDEV_LOOKUP - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 6cee842a9b44..3ef46522e89f 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -30,7 +30,7 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 070553791e97..c7b2a8d60a41 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -26,7 +26,7 @@ config M68K select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 select OLD_SIGACTION - select DMA_NONCOHERENT_OPS if HAS_DMA + select DMA_DIRECT_OPS if HAS_DMA select HAVE_MEMBLOCK select ARCH_DISCARD_MEMBLOCK select NO_BOOTMEM diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index ace5c5bf1836..0f48ab6a8070 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -11,7 +11,7 @@ config MICROBLAZE select TIMER_OF select CLONE_BACKWARDS3 select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 54c52bd0d9d3..96da6e3396e1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1121,7 +1121,6 @@ config DMA_NONCOHERENT select NEED_DMA_MAP_STATE select DMA_NONCOHERENT_MMAP select DMA_NONCOHERENT_CACHE_SYNC - select DMA_NONCOHERENT_OPS config SYS_HAS_EARLY_PRINTK bool diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 40d825c779de..b4c477eb46ce 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -12,8 +12,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &jazz_dma_ops; #elif defined(CONFIG_SWIOTLB) return &swiotlb_dma_ops; -#elif defined(CONFIG_DMA_NONCOHERENT_OPS) - return &dma_noncoherent_ops; #else return &dma_direct_ops; #endif diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index d31bc2f01208..bb49dfa1a9a3 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -564,13 +564,13 @@ static void *jazz_dma_alloc(struct device *dev, size_t size, { void *ret; - ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); + ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); if (!ret) return NULL; *dma_handle = vdma_alloc(virt_to_phys(ret), size); if (*dma_handle == VDMA_ERROR) { - dma_direct_free(dev, size, ret, *dma_handle, attrs); + dma_direct_free_pages(dev, size, ret, *dma_handle, attrs); return NULL; } @@ -587,7 +587,7 @@ static void jazz_dma_free(struct device *dev, size_t size, void *vaddr, vdma_free(dma_handle); if (!(attrs & DMA_ATTR_NON_CONSISTENT)) vaddr = (void *)CAC_ADDR((unsigned long)vaddr); - return dma_direct_free(dev, size, vaddr, dma_handle, attrs); + dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs); } static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index d408ac51f56c..b01b9a3e424f 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -29,9 +29,6 @@ */ static inline bool cpu_needs_post_dma_flush(struct device *dev) { - if (dev_is_dma_coherent(dev)) - return false; - switch (boot_cpu_type()) { case CPU_R10000: case CPU_R12000: @@ -52,11 +49,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, { void *ret; - ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); - if (!ret) - return NULL; - - if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { + ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); + if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) { dma_cache_wback_inv((unsigned long) ret, size); ret = (void *)UNCAC_ADDR(ret); } @@ -67,9 +61,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_dma_coherent(dev)) + if (!(attrs & DMA_ATTR_NON_CONSISTENT)) cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); - dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, @@ -78,16 +72,11 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, { unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long addr = (unsigned long)cpu_addr; + unsigned long addr = CAC_ADDR((unsigned long)cpu_addr); unsigned long off = vma->vm_pgoff; - unsigned long pfn; + unsigned long pfn = page_to_pfn(virt_to_page((void *)addr)); int ret = -ENXIO; - if (!dev_is_dma_coherent(dev)) - addr = CAC_ADDR(addr); - - pfn = page_to_pfn(virt_to_page((void *)addr)); - if (attrs & DMA_ATTR_WRITE_COMBINE) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); else @@ -167,8 +156,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - if (!dev_is_dma_coherent(dev)) - dma_sync_phys(paddr, size, dir); + dma_sync_phys(paddr, size, dir); } void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, @@ -183,6 +171,5 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, { BUG_ON(direction == DMA_NONE); - if (!dev_is_dma_coherent(dev)) - dma_sync_virt(vaddr, size, direction); + dma_sync_virt(vaddr, size, direction); } diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 7068f341133d..56992330026a 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -11,7 +11,7 @@ config NDS32 select CLKSRC_MMIO select CLONE_BACKWARDS select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index f4ad1138e6b9..03965692fbfe 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -4,7 +4,7 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_SWAP - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select TIMER_OF select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index e0081e734827..a655ae280637 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -7,7 +7,7 @@ config OPENRISC def_bool y select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select OF select OF_EARLY_FLATTREE select IRQ_DOMAIN diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 8e6d83f79e72..f1cd12afd943 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -186,7 +186,7 @@ config PA11 depends on PA7000 || PA7100LC || PA7200 || PA7300LC select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select DMA_NONCOHERENT_CACHE_SYNC config PREFETCH diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 4e87c35c22b7..755e89ec828a 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -102,7 +102,7 @@ void __init dma_ops_init(void) case pcxl: /* falls through */ case pcxs: case pcxt: - hppa_dma_ops = &dma_noncoherent_ops; + hppa_dma_ops = &dma_direct_ops; break; default: break; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 1fb7b6d72baf..475d786a65b0 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -7,6 +7,7 @@ config SUPERH select ARCH_NO_COHERENT_DMA_MMAP if !MMU select HAVE_PATA_PLATFORM select CLKDEV_LOOKUP + select DMA_DIRECT_OPS select HAVE_IDE if HAS_IOPORT_MAP select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP @@ -158,13 +159,11 @@ config SWAP_IO_SPACE bool config DMA_COHERENT - select DMA_DIRECT_OPS bool config DMA_NONCOHERENT def_bool !DMA_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_OPS config PGTABLE_LEVELS default 3 if X2TLB diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e6f2a38d2e61..7e2aa59fcc29 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -51,7 +51,7 @@ config SPARC config SPARC32 def_bool !64BIT select ARCH_HAS_SYNC_DMA_FOR_CPU - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select CLZ_TAB select HAVE_UID16 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index e17566376934..b0bb2fcaf1c9 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -14,11 +14,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) - return &dma_noncoherent_ops; + return &dma_direct_ops; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (bus == &pci_bus_type) - return &dma_noncoherent_ops; + return &dma_direct_ops; #endif return dma_ops; } diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index f299d8a479bb..3f9d1b4019bb 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -482,7 +482,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, { void *vaddr; - vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs); + vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs); if (!vaddr || !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) return vaddr; @@ -494,7 +494,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, goto out_free; return vaddr; out_free: - dma_direct_free(dev, size, vaddr, *dma_addr, attrs); + dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs); return NULL; } @@ -504,7 +504,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs) { gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); - dma_direct_free(dev, size, vaddr, dma_addr, attrs); + dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); } static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 04d038f3b6fa..516694937b7a 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -12,7 +12,7 @@ config XTENSA select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_IRQ_SHOW diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index ad2868263867..880a292d792f 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -4,16 +4,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - /* - * Use the non-coherent ops if available. If an architecture wants a - * more fine-grained selection of operations it will have to implement - * get_arch_dma_ops itself or use the per-device dma_ops. - */ -#ifdef CONFIG_DMA_NONCOHERENT_OPS - return &dma_noncoherent_ops; -#else return &dma_direct_ops; -#endif } #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 8d9f33febde5..86a59ba5a7f3 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -59,6 +59,10 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); +void *dma_direct_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); +void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index eafd6f318e78..8f2001181cd1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -136,7 +136,6 @@ struct dma_map_ops { }; extern const struct dma_map_ops dma_direct_ops; -extern const struct dma_map_ops dma_noncoherent_ops; extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index ce9732506ef4..3f503025a0cd 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -24,14 +24,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); - -#ifdef CONFIG_DMA_NONCOHERENT_MMAP int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); -#else -#define arch_dma_mmap NULL -#endif /* CONFIG_DMA_NONCOHERENT_MMAP */ #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 79476749f196..5617c9a76208 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -33,18 +33,13 @@ config DMA_DIRECT_OPS bool depends on HAS_DMA -config DMA_NONCOHERENT_OPS - bool - depends on HAS_DMA - select DMA_DIRECT_OPS - config DMA_NONCOHERENT_MMAP bool - depends on DMA_NONCOHERENT_OPS + depends on DMA_DIRECT_OPS config DMA_NONCOHERENT_CACHE_SYNC bool - depends on DMA_NONCOHERENT_OPS + depends on DMA_DIRECT_OPS config DMA_VIRT_OPS bool diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile index 6de44e4eb454..7d581e4eea4a 100644 --- a/kernel/dma/Makefile +++ b/kernel/dma/Makefile @@ -4,7 +4,6 @@ obj-$(CONFIG_HAS_DMA) += mapping.o obj-$(CONFIG_DMA_CMA) += contiguous.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o -obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o obj-$(CONFIG_DMA_VIRT_OPS) += virt.o obj-$(CONFIG_DMA_API_DEBUG) += debug.o obj-$(CONFIG_SWIOTLB) += swiotlb.o diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index de87b0282e74..09e85f6aa4ba 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -1,13 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* - * DMA operations that map physical memory directly without using an IOMMU or - * flushing caches. + * Copyright (C) 2018 Christoph Hellwig. + * + * DMA operations that map physical memory directly without using an IOMMU. */ #include #include #include #include #include +#include #include #include @@ -58,8 +60,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) return addr + size - 1 <= dev->coherent_dma_mask; } -void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) +void *dma_direct_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; int page_order = get_order(size); @@ -124,7 +126,7 @@ again: * NOTE: this function must never look at the dma_addr argument, because we want * to be able to use it as a helper for iommu implementations as well. */ -void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, +void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -136,14 +138,106 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, free_pages((unsigned long)cpu_addr, page_order); } +void *dma_direct_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev)) + return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); + return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); +} + +void dma_direct_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev)) + arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); + else + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); +} + +static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev) && + IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP)) + return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + +static void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + if (dev_is_dma_coherent(dev)) + return; + arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + if (dev_is_dma_coherent(dev)) + return; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); +} + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) +static void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + if (dev_is_dma_coherent(dev)) + return; + arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); + arch_sync_dma_for_cpu_all(dev); +} + +static void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + if (dev_is_dma_coherent(dev)) + return; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); + arch_sync_dma_for_cpu_all(dev); +} + +static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_single_for_cpu(dev, addr, size, dir); +} + +static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); +} +#endif + dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { - dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; + phys_addr_t phys = page_to_phys(page) + offset; + dma_addr_t dma_addr = phys_to_dma(dev, phys); if (!check_addr(dev, dma_addr, size, __func__)) return DIRECT_MAPPING_ERROR; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_single_for_device(dev, dma_addr, size, dir); return dma_addr; } @@ -162,6 +256,8 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, sg_dma_len(sg) = sg->length; } + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_sg_for_device(dev, sgl, nents, dir); return nents; } @@ -197,9 +293,22 @@ int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc, .free = dma_direct_free, + .mmap = dma_direct_mmap, .map_page = dma_direct_map_page, .map_sg = dma_direct_map_sg, +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_device = dma_direct_sync_sg_for_device, +#endif +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) + .sync_single_for_cpu = dma_direct_sync_single_for_cpu, + .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, + .unmap_page = dma_direct_unmap_page, + .unmap_sg = dma_direct_unmap_sg, +#endif .dma_supported = dma_direct_supported, .mapping_error = dma_direct_mapping_error, + .cache_sync = arch_dma_cache_sync, }; EXPORT_SYMBOL(dma_direct_ops); diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c deleted file mode 100644 index 031fe235d958..000000000000 --- a/kernel/dma/noncoherent.c +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2018 Christoph Hellwig. - * - * DMA operations that map physical memory directly without providing cache - * coherence. - */ -#include -#include -#include -#include -#include - -static void dma_noncoherent_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); -} - -static void dma_noncoherent_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); -} - -static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t addr; - - addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); - if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - arch_sync_dma_for_device(dev, page_to_phys(page) + offset, - size, dir); - return addr; -} - -static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); - if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); - return nents; -} - -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) -static void dma_noncoherent_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); - arch_sync_dma_for_cpu_all(dev); -} - -static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); - arch_sync_dma_for_cpu_all(dev); -} - -static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); -} - -static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); -} -#endif - -const struct dma_map_ops dma_noncoherent_ops = { - .alloc = arch_dma_alloc, - .free = arch_dma_free, - .mmap = arch_dma_mmap, - .sync_single_for_device = dma_noncoherent_sync_single_for_device, - .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, - .map_page = dma_noncoherent_map_page, - .map_sg = dma_noncoherent_map_sg, -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) - .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, - .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, - .unmap_page = dma_noncoherent_unmap_page, - .unmap_sg = dma_noncoherent_unmap_sg, -#endif - .dma_supported = dma_direct_supported, - .mapping_error = dma_direct_mapping_error, - .cache_sync = arch_dma_cache_sync, -}; -EXPORT_SYMBOL(dma_noncoherent_ops); -- cgit v1.2.3 From 58b0440663ec11372befb8ead0ee7099d8878590 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Sep 2018 08:55:28 +0200 Subject: dma-mapping: consolidate the dma mmap implementations The only functional differences (modulo a few missing fixes in the arch code) is that architectures without coherent caches need a hook to convert a virtual or dma address into a pfn, given that we don't have the kernel linear mapping available for the otherwise easy virt_to_page call. As a side effect we can support mmap of the per-device coherent area even on architectures not providing the callback, and we make previous dangerous default methods dma_common_mmap actually save for non-coherent architectures by rejecting it without the right helper. In addition to that we need a hook so that some architectures can override the protection bits when mmaping a dma coherent allocations. Signed-off-by: Christoph Hellwig Acked-by: Paul Burton # MIPS parts --- arch/arc/Kconfig | 2 +- arch/arc/mm/dma.c | 25 +++---------------------- arch/arm/mm/dma-mapping-nommu.c | 2 +- arch/microblaze/Kconfig | 2 +- arch/microblaze/include/asm/pgtable.h | 2 -- arch/microblaze/kernel/dma.c | 22 ---------------------- arch/microblaze/mm/consistent.c | 3 ++- arch/mips/Kconfig | 3 ++- arch/mips/jazz/jazzdma.c | 1 - arch/mips/mm/dma-noncoherent.c | 32 +++++++++----------------------- drivers/xen/swiotlb-xen.c | 2 +- include/linux/dma-mapping.h | 5 +++-- include/linux/dma-noncoherent.h | 10 ++++++++-- kernel/dma/Kconfig | 10 ++++++---- kernel/dma/direct.c | 11 ----------- kernel/dma/mapping.c | 32 +++++++++++++++++++++----------- 16 files changed, 58 insertions(+), 106 deletions(-) (limited to 'kernel') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index ca03694d518a..3d9bdecfa52d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -9,6 +9,7 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_DMA_COHERENT_TO_PFN select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -18,7 +19,6 @@ config ARC select CLONE_BACKWARDS select COMMON_CLK select DMA_DIRECT_OPS - select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 535ed4a068ef..db203ff69ccf 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -84,29 +84,10 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, __free_pages(page, get_order(size)); } -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) { - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long pfn = __phys_to_pfn(dma_addr); - unsigned long off = vma->vm_pgoff; - int ret = -ENXIO; - - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - if (off < count && user_count <= (count - off)) { - ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); - } - - return ret; + return __phys_to_pfn(dma_addr); } /* diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 0ad156f9985b..712416ecd8e6 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -91,7 +91,7 @@ static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) return ret; - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 0f48ab6a8070..164a4857737a 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -1,6 +1,7 @@ config MICROBLAZE def_bool y select ARCH_NO_SWAP + select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -12,7 +13,6 @@ config MICROBLAZE select CLONE_BACKWARDS3 select COMMON_CLK select DMA_DIRECT_OPS - select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 7b650ab14fa0..f64ebb9c9a41 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -553,8 +553,6 @@ void __init *early_get_page(void); extern unsigned long ioremap_bot, ioremap_base; -unsigned long consistent_virt_to_pfn(void *vaddr); - void setup_memory(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 71032cf64669..a89c2d4ed5ff 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -42,25 +42,3 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, { __dma_sync(dev, paddr, size, dir); } - -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ -#ifdef CONFIG_MMU - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long off = vma->vm_pgoff; - unsigned long pfn; - - if (off >= count || user_count > (count - off)) - return -ENXIO; - - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - pfn = consistent_virt_to_pfn(cpu_addr); - return remap_pfn_range(vma, vma->vm_start, pfn + off, - vma->vm_end - vma->vm_start, vma->vm_page_prot); -#else - return -ENXIO; -#endif -} diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index c9a278ac795a..d801cc5f5b95 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -165,7 +165,8 @@ static pte_t *consistent_virt_to_pte(void *vaddr) return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr); } -unsigned long consistent_virt_to_pfn(void *vaddr) +long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, + dma_addr_t dma_addr) { pte_t *ptep = consistent_virt_to_pte(vaddr); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 96da6e3396e1..77c022e56e6e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1116,10 +1116,11 @@ config DMA_PERDEV_COHERENT config DMA_NONCOHERENT bool + select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_CPU select NEED_DMA_MAP_STATE - select DMA_NONCOHERENT_MMAP + select ARCH_HAS_DMA_COHERENT_TO_PFN select DMA_NONCOHERENT_CACHE_SYNC config SYS_HAS_EARLY_PRINTK diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index bb49dfa1a9a3..0a0aaf39fd16 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -682,7 +682,6 @@ static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) const struct dma_map_ops jazz_dma_ops = { .alloc = jazz_dma_alloc, .free = jazz_dma_free, - .mmap = arch_dma_mmap, .map_page = jazz_dma_map_page, .unmap_page = jazz_dma_unmap_page, .map_sg = jazz_dma_map_sg, diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index b01b9a3e424f..e6c9485cadcf 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -66,33 +66,19 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) { - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long addr = CAC_ADDR((unsigned long)cpu_addr); - unsigned long off = vma->vm_pgoff; - unsigned long pfn = page_to_pfn(virt_to_page((void *)addr)); - int ret = -ENXIO; + return page_to_pfn(virt_to_page((void *)addr)); +} +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) +{ if (attrs & DMA_ATTR_WRITE_COMBINE) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - else - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - if (off < count && user_count <= (count - off)) { - ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); - } - - return ret; + return pgprot_writecombine(prot); + return pgprot_noncached(prot); } static inline void dma_sync_virt(void *addr, size_t size, diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index a6f9ba85dc4b..470757ddddea 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -662,7 +662,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); #endif - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } /* diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 8f2001181cd1..c3378d4e0d57 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -444,7 +444,8 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, } extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); void *dma_common_contiguous_remap(struct page *page, size_t size, unsigned long vm_flags, @@ -476,7 +477,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 3f503025a0cd..9051b055beec 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -24,9 +24,15 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr); + +#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); +#else +# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) +#endif #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 5617c9a76208..645c7a2ecde8 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -29,13 +29,15 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL bool -config DMA_DIRECT_OPS +config ARCH_HAS_DMA_COHERENT_TO_PFN bool - depends on HAS_DMA -config DMA_NONCOHERENT_MMAP +config ARCH_HAS_DMA_MMAP_PGPROT bool - depends on DMA_DIRECT_OPS + +config DMA_DIRECT_OPS + bool + depends on HAS_DMA config DMA_NONCOHERENT_CACHE_SYNC bool diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 09e85f6aa4ba..c954f0a6dc62 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -155,16 +155,6 @@ void dma_direct_free(struct device *dev, size_t size, dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } -static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - if (!dev_is_dma_coherent(dev) && - IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP)) - return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); -} - static void dma_direct_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { @@ -293,7 +283,6 @@ int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc, .free = dma_direct_free, - .mmap = dma_direct_mmap, .map_page = dma_direct_map_page, .map_sg = dma_direct_map_sg, #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 3540cb399bd2..42fd73aca305 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -7,7 +7,7 @@ */ #include -#include +#include #include #include #include @@ -220,27 +220,37 @@ EXPORT_SYMBOL(dma_common_get_sgtable); * Create userspace mapping for the DMA-coherent memory. */ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) { - int ret = -ENXIO; #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; + unsigned long pfn; + int ret = -ENXIO; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; - if (off < count && user_count <= (count - off)) - ret = remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(cpu_addr)) + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); -#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ + if (off >= count || user_count > count - off) + return -ENXIO; + + if (!dev_is_dma_coherent(dev)) { + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) + return -ENXIO; + pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + } else { + pfn = page_to_pfn(virt_to_page(cpu_addr)); + } - return ret; + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + user_count << PAGE_SHIFT, vma->vm_page_prot); +#else + return -ENXIO; +#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ } EXPORT_SYMBOL(dma_common_mmap); -- cgit v1.2.3 From 9406a49fd1f4379409ed87b29fdaa259b0441912 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Aug 2018 09:39:38 +0200 Subject: dma-mapping: support non-coherent devices in dma_common_get_sgtable We can use the arch_dma_coherent_to_pfn hook to provide a ->get_sgtable implementation. Note that this isn't an endorsement of this interface (which is a horrible bad idea), but it is required to move arm64 over to the generic code without a loss of functionality. Signed-off-by: Christoph Hellwig --- drivers/xen/swiotlb-xen.c | 2 +- include/linux/dma-mapping.h | 7 ++++--- kernel/dma/mapping.c | 23 ++++++++++++++++------- 3 files changed, 21 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 470757ddddea..28819a0e61d0 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -689,7 +689,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, handle, size, attrs); } #endif - return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); + return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); } static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c3378d4e0d57..bd81e74cca7b 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -483,8 +483,8 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) int -dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size); +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, unsigned long attrs); static inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, @@ -496,7 +496,8 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); - return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); } #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 42fd73aca305..58dec7a92b7b 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -202,17 +202,26 @@ EXPORT_SYMBOL(dmam_release_declared_memory); * Create scatter-list for the already allocated DMA buffer. */ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size) + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) { - struct page *page = virt_to_page(cpu_addr); + struct page *page; int ret; - ret = sg_alloc_table(sgt, 1, GFP_KERNEL); - if (unlikely(ret)) - return ret; + if (!dev_is_dma_coherent(dev)) { + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) + return -ENXIO; - sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - return 0; + page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, + dma_addr)); + } else { + page = virt_to_page(cpu_addr); + } + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (!ret) + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return ret; } EXPORT_SYMBOL(dma_common_get_sgtable); -- cgit v1.2.3 From f83606f5eb007adc33bc8541ede00590f477bdeb Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Thu, 20 Sep 2018 12:22:25 -0700 Subject: fork: report pid exhaustion correctly Make the clone and fork syscalls return EAGAIN when the limit on the number of pids /proc/sys/kernel/pid_max is exceeded. Currently, when the pid_max limit is exceeded, the kernel will return ENOSPC from the fork and clone syscalls. This is contrary to the documented behaviour, which explicitly calls out the pid_max case as one where EAGAIN should be returned. It also leads to really confusing error messages in userspace programs which will complain about a lack of disk space when they fail to create processes/threads for this reason. This error is being returned because alloc_pid() uses the idr api to find a new pid; when there are none available, idr_alloc_cyclic() returns -ENOSPC, and this is being propagated back to userspace. This behaviour has been broken before, and was explicitly fixed in commit 35f71bc0a09a ("fork: report pid reservation failure properly"), so I think -EAGAIN is definitely the right thing to return in this case. The current behaviour change dates from commit 95846ecf9dac ("pid: replace pid bitmap implementation with IDR AIP") and was I believe unintentional. This patch has no impact on the case where allocating a pid fails because the child reaper for the namespace is dead; that case will still return -ENOMEM. Link: http://lkml.kernel.org/r/20180903111016.46461-1-ktsanaktsidis@zendesk.com Fixes: 95846ecf9dac ("pid: replace pid bitmap implementation with IDR AIP") Signed-off-by: KJ Tsanaktsidis Reviewed-by: Andrew Morton Acked-by: Michal Hocko Cc: Gargi Sharma Cc: Rik van Riel Cc: Oleg Nesterov Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/pid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index de1cfc4f75a2..cdf63e53a014 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) idr_preload_end(); if (nr < 0) { - retval = nr; + retval = (nr == -ENOSPC) ? -EAGAIN : nr; goto out_free; } -- cgit v1.2.3 From 3bf181bc5d8bc86f04ffd538d7fda9e69af1f2c2 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 20 Sep 2018 12:22:43 -0700 Subject: kernel/sys.c: remove duplicated include Link: http://lkml.kernel.org/r/20180821133424.18716-1-yuehaibing@huawei.com Signed-off-by: YueHaibing Reviewed-by: Andrew Morton Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/sys.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index cf5c67533ff1..123bd73046ec 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -71,9 +71,6 @@ #include #include -/* Hardening for Spectre-v1 */ -#include - #include "uid16.h" #ifndef SET_UNALIGN_CTL -- cgit v1.2.3 From 788758d1fe874fd20ecb0ab490552d94c024a9de Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Thu, 20 Sep 2018 17:46:12 +0800 Subject: bpf: remove redundant null pointer check before consume_skb consume_skb has taken the null pointer into account. hence it is safe to remove the redundant null pointer check before consume_skb. Signed-off-by: zhong jiang Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 488ef9663c01..a9359cbc3f93 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -590,8 +590,7 @@ static int free_sg(struct sock *sk, int start, if (i == MAX_SKB_FRAGS) i = 0; } - if (md->skb) - consume_skb(md->skb); + consume_skb(md->skb); return free; } @@ -973,8 +972,7 @@ bytes_ready: if (!sg->length && md->sg_start == md->sg_end) { list_del(&md->list); - if (md->skb) - consume_skb(md->skb); + consume_skb(md->skb); kfree(md); } } -- cgit v1.2.3 From 5607fff303636d48b88414c6be353d9fed700af2 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 18 Sep 2018 09:01:44 -0700 Subject: bpf: sockmap only allow ESTABLISHED sock state After this patch we only allow socks that are in ESTABLISHED state or are being added via a sock_ops event that is transitioning into an ESTABLISHED state. By allowing sock_ops events we allow users to manage sockmaps directly from sock ops programs. The two supported sock_ops ops are BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB and BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB. Similar to TLS ULP this ensures sk_user_data is correct. Reported-by: Eric Dumazet Fixes: 1aa12bdf1bfb ("bpf: sockmap, add sock close() hook to remove socks") Signed-off-by: John Fastabend Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 488ef9663c01..1f97b559892a 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -2097,8 +2097,12 @@ static int sock_map_update_elem(struct bpf_map *map, return -EINVAL; } + /* ULPs are currently supported only for TCP sockets in ESTABLISHED + * state. + */ if (skops.sk->sk_type != SOCK_STREAM || - skops.sk->sk_protocol != IPPROTO_TCP) { + skops.sk->sk_protocol != IPPROTO_TCP || + skops.sk->sk_state != TCP_ESTABLISHED) { fput(socket->file); return -EOPNOTSUPP; } @@ -2453,6 +2457,16 @@ static int sock_hash_update_elem(struct bpf_map *map, return -EINVAL; } + /* ULPs are currently supported only for TCP sockets in ESTABLISHED + * state. + */ + if (skops.sk->sk_type != SOCK_STREAM || + skops.sk->sk_protocol != IPPROTO_TCP || + skops.sk->sk_state != TCP_ESTABLISHED) { + fput(socket->file); + return -EOPNOTSUPP; + } + lock_sock(skops.sk); preempt_disable(); rcu_read_lock(); @@ -2543,10 +2557,22 @@ const struct bpf_map_ops sock_hash_ops = { .map_check_btf = map_check_no_btf, }; +static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops) +{ + return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || + ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; +} BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, struct bpf_map *, map, void *, key, u64, flags) { WARN_ON_ONCE(!rcu_read_lock_held()); + + /* ULPs are currently supported only for TCP sockets in ESTABLISHED + * state. This checks that the sock ops triggering the update is + * one indicating we are (or will be soon) in an ESTABLISHED state. + */ + if (!bpf_is_valid_sock_op(bpf_sock)) + return -EOPNOTSUPP; return sock_map_ctx_update_elem(bpf_sock, map, key, flags); } @@ -2565,6 +2591,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock, struct bpf_map *, map, void *, key, u64, flags) { WARN_ON_ONCE(!rcu_read_lock_held()); + + if (!bpf_is_valid_sock_op(bpf_sock)) + return -EOPNOTSUPP; return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); } -- cgit v1.2.3 From b05545e15e1ff1d6a6a8593971275f9cc3e6b92b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 18 Sep 2018 09:01:49 -0700 Subject: bpf: sockmap, fix transition through disconnect without close It is possible (via shutdown()) for TCP socks to go trough TCP_CLOSE state via tcp_disconnect() without actually calling tcp_close which would then call our bpf_tcp_close() callback. Because of this a user could disconnect a socket then put it in a LISTEN state which would break our assumptions about sockets always being ESTABLISHED state. To resolve this rely on the unhash hook, which is called in the disconnect case, to remove the sock from the sockmap. Reported-by: Eric Dumazet Fixes: 1aa12bdf1bfb ("bpf: sockmap, add sock close() hook to remove socks") Signed-off-by: John Fastabend Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 60 +++++++++++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 1f97b559892a..0a0f2ec75370 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -132,6 +132,7 @@ struct smap_psock { struct work_struct gc_work; struct proto *sk_proto; + void (*save_unhash)(struct sock *sk); void (*save_close)(struct sock *sk, long timeout); void (*save_data_ready)(struct sock *sk); void (*save_write_space)(struct sock *sk); @@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); static int bpf_tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); +static void bpf_tcp_unhash(struct sock *sk); static void bpf_tcp_close(struct sock *sk, long timeout); static inline struct smap_psock *smap_psock_sk(const struct sock *sk) @@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], struct proto *base) { prot[SOCKMAP_BASE] = *base; + prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash; prot[SOCKMAP_BASE].close = bpf_tcp_close; prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; @@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk) return -EBUSY; } + psock->save_unhash = sk->sk_prot->unhash; psock->save_close = sk->sk_prot->close; psock->sk_proto = sk->sk_prot; @@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, return e; } -static void bpf_tcp_close(struct sock *sk, long timeout) +static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock) { - void (*close_fun)(struct sock *sk, long timeout); struct smap_psock_map_entry *e; struct sk_msg_buff *md, *mtmp; - struct smap_psock *psock; struct sock *osk; - lock_sock(sk); - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - release_sock(sk); - return sk->sk_prot->close(sk, timeout); - } - - /* The psock may be destroyed anytime after exiting the RCU critial - * section so by the time we use close_fun the psock may no longer - * be valid. However, bpf_tcp_close is called with the sock lock - * held so the close hook and sk are still valid. - */ - close_fun = psock->save_close; - if (psock->cork) { free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); @@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout) kfree(e); e = psock_map_pop(sk, psock); } +} + +static void bpf_tcp_unhash(struct sock *sk) +{ + void (*unhash_fun)(struct sock *sk); + struct smap_psock *psock; + + rcu_read_lock(); + psock = smap_psock_sk(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + if (sk->sk_prot->unhash) + sk->sk_prot->unhash(sk); + return; + } + unhash_fun = psock->save_unhash; + bpf_tcp_remove(sk, psock); + rcu_read_unlock(); + unhash_fun(sk); +} + +static void bpf_tcp_close(struct sock *sk, long timeout) +{ + void (*close_fun)(struct sock *sk, long timeout); + struct smap_psock *psock; + + lock_sock(sk); + rcu_read_lock(); + psock = smap_psock_sk(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + release_sock(sk); + return sk->sk_prot->close(sk, timeout); + } + close_fun = psock->save_close; + bpf_tcp_remove(sk, psock); rcu_read_unlock(); release_sock(sk); close_fun(sk, timeout); -- cgit v1.2.3 From c839e7a03f92bafd71fd145b470dcdc7f43f2d4c Mon Sep 17 00:00:00 2001 From: "Dennis Zhou (Facebook)" Date: Tue, 11 Sep 2018 14:41:33 -0400 Subject: blkcg: remove bio->bi_css and instead use bio->bi_blkg Prior patches ensured that all bios are now associated with some blkg. This now makes bio->bi_css unnecessary as blkg maintains a reference to the blkcg already. This patch removes the field bi_css and transfers corresponding uses to access via bi_blkg. Signed-off-by: Dennis Zhou Reviewed-by: Josef Bacik Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/bio.c | 56 +++++++++------------------------------------- block/bounce.c | 2 +- drivers/block/loop.c | 5 +++-- drivers/md/raid0.c | 2 +- include/linux/bio.h | 9 +++----- include/linux/blk-cgroup.h | 8 +++---- include/linux/blk_types.h | 1 - kernel/trace/blktrace.c | 4 ++-- 8 files changed, 25 insertions(+), 62 deletions(-) (limited to 'kernel') diff --git a/block/bio.c b/block/bio.c index 387480de6992..71cfe3720ea7 100644 --- a/block/bio.c +++ b/block/bio.c @@ -609,7 +609,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; - bio_clone_blkcg_association(bio, bio_src); + bio_clone_blkg_association(bio, bio_src); blkcg_bio_issue_init(bio); } @@ -1956,34 +1956,6 @@ EXPORT_SYMBOL(bioset_init_from_src); #ifdef CONFIG_BLK_CGROUP -/** - * bio_associate_blkcg - associate a bio with the specified blkcg - * @bio: target bio - * @blkcg_css: css of the blkcg to associate - * - * Associate @bio with the blkcg specified by @blkcg_css. Block layer will - * treat @bio as if it were issued by a task which belongs to the blkcg. - * - * This function takes an extra reference of @blkcg_css which will be put - * when @bio is released. The caller must own @bio and is responsible for - * synchronizing calls to this function. If @blkcg_css is NULL, a call to - * blkcg_get_css finds the current css from the kthread or task. - */ -int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) -{ - if (unlikely(bio->bi_css)) - return -EBUSY; - - if (blkcg_css) - css_get(blkcg_css); - else - blkcg_css = blkcg_get_css(); - - bio->bi_css = blkcg_css; - return 0; -} -EXPORT_SYMBOL_GPL(bio_associate_blkcg); - /** * bio_associate_blkg - associate a bio with the a blkg * @bio: target bio @@ -2033,7 +2005,6 @@ int bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) { css_get(css); - bio->bi_css = css; return __bio_associate_blkg_from_css(bio, css); } EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); @@ -2054,12 +2025,11 @@ int bio_associate_blkg_from_page(struct bio *bio, struct page *page) { struct cgroup_subsys_state *css; - if (unlikely(bio->bi_css)) + if (unlikely(bio->bi_blkg)) return -EBUSY; if (!page->mem_cgroup) return 0; css = cgroup_get_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); - bio->bi_css = css; return __bio_associate_blkg_from_css(bio, css); } @@ -2085,8 +2055,7 @@ int bio_associate_create_blkg(struct request_queue *q, struct bio *bio) rcu_read_lock(); - bio_associate_blkcg(bio, NULL); - blkcg = bio_blkcg(bio); + blkcg = css_to_blkcg(blkcg_get_css()); if (!blkcg->css.parent) { ret = bio_associate_blkg(bio, q->root_blkg); @@ -2110,30 +2079,27 @@ void bio_disassociate_task(struct bio *bio) put_io_context(bio->bi_ioc); bio->bi_ioc = NULL; } - if (bio->bi_css) { - css_put(bio->bi_css); - bio->bi_css = NULL; - } if (bio->bi_blkg) { + /* a ref is always taken on css */ + css_put(&bio_blkcg(bio)->css); blkg_put(bio->bi_blkg); bio->bi_blkg = NULL; } } /** - * bio_clone_blkcg_association - clone blkcg association from src to dst bio + * bio_clone_blkg_association - clone blkg association from src to dst bio * @dst: destination bio * @src: source bio */ -void bio_clone_blkcg_association(struct bio *dst, struct bio *src) +void bio_clone_blkg_association(struct bio *dst, struct bio *src) { - if (src->bi_css) - WARN_ON(bio_associate_blkcg(dst, src->bi_css)); - - if (src->bi_blkg) + if (src->bi_blkg) { + css_get(&bio_blkcg(src)->css); bio_associate_blkg(dst, src->bi_blkg); + } } -EXPORT_SYMBOL_GPL(bio_clone_blkcg_association); +EXPORT_SYMBOL_GPL(bio_clone_blkg_association); #endif /* CONFIG_BLK_CGROUP */ static void __init biovec_init_slabs(void) diff --git a/block/bounce.c b/block/bounce.c index 7a08703b1204..b30071ac4ec6 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -257,7 +257,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, } } - bio_clone_blkcg_association(bio, bio_src); + bio_clone_blkg_association(bio, bio_src); blkcg_bio_issue_init(bio); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ea9debf59b22..abad6d15f956 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -77,6 +77,7 @@ #include #include #include +#include #include "loop.h" @@ -1760,8 +1761,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, /* always use the first bio's css */ #ifdef CONFIG_BLK_CGROUP - if (cmd->use_aio && rq->bio && rq->bio->bi_css) { - cmd->css = rq->bio->bi_css; + if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) { + cmd->css = &bio_blkcg(rq->bio)->css; css_get(cmd->css); } else #endif diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index ac1cffd2a09b..f3fb5bb8c82a 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) !discard_bio) continue; bio_chain(discard_bio, bio); - bio_clone_blkcg_association(discard_bio, bio); + bio_clone_blkg_association(discard_bio, bio); if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), diff --git a/include/linux/bio.h b/include/linux/bio.h index c73a870ebc0e..e973876625a8 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -540,24 +540,21 @@ static inline int bio_associate_blkg_from_page(struct bio *bio, #endif #ifdef CONFIG_BLK_CGROUP -int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); int bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css); int bio_associate_create_blkg(struct request_queue *q, struct bio *bio); void bio_disassociate_task(struct bio *bio); -void bio_clone_blkcg_association(struct bio *dst, struct bio *src); +void bio_clone_blkg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline int bio_associate_blkcg(struct bio *bio, - struct cgroup_subsys_state *blkcg_css) { return 0; } static inline int bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) { return 0; } static inline int bio_associate_create_blkg(struct request_queue *q, struct bio *bio) { return 0; } static inline void bio_disassociate_task(struct bio *bio) { } -static inline void bio_clone_blkcg_association(struct bio *dst, - struct bio *src) { } +static inline void bio_clone_blkg_association(struct bio *dst, + struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_HIGHMEM diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index a6b6e741a75e..c41cfcc2b4d8 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -308,8 +308,8 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) */ static inline struct blkcg *__bio_blkcg(struct bio *bio) { - if (bio && bio->bi_css) - return css_to_blkcg(bio->bi_css); + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; return css_to_blkcg(blkcg_css()); } @@ -323,8 +323,8 @@ static inline struct blkcg *__bio_blkcg(struct bio *bio) */ static inline struct blkcg *bio_blkcg(struct bio *bio) { - if (bio && bio->bi_css) - return css_to_blkcg(bio->bi_css); + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; return NULL; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index f6dfb30737d8..9578c7ab1eb6 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -178,7 +178,6 @@ struct bio { * release. Read comment on top of bio_associate_current(). */ struct io_context *bi_ioc; - struct cgroup_subsys_state *bi_css; struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; #endif diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 2868d85f1fb1..fac0ddf8a8e2 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) return NULL; - if (!bio->bi_css) + if (!bio->bi_blkg) return NULL; - return cgroup_get_kernfs_id(bio->bi_css->cgroup); + return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup); } #else static union kernfs_node_id * -- cgit v1.2.3 From f0fcb3ec89f37167810e660b0595d9a6155d9807 Mon Sep 17 00:00:00 2001 From: "Dennis Zhou (Facebook)" Date: Tue, 11 Sep 2018 14:41:34 -0400 Subject: blkcg: remove additional reference to the css The previous patch in this series removed carrying around a pointer to the css in blkg. However, the blkg association logic still relied on taking a reference on the css to ensure we wouldn't fail in getting a reference for the blkg. Here the implicit dependency on the css is removed. The association continues to rely on the tryget logic walking up the blkg tree. This streamlines the three ways that association can happen: normal, swap, and writeback. Acked-by: Tejun Heo Signed-off-by: Dennis Zhou Signed-off-by: Jens Axboe --- block/bio.c | 62 +++++++++++++++++++++++++++------------------- include/linux/blk-cgroup.h | 52 +++----------------------------------- include/linux/cgroup.h | 2 ++ kernel/cgroup/cgroup.c | 48 ++++++++++++++++++++++++++++------- 4 files changed, 81 insertions(+), 83 deletions(-) (limited to 'kernel') diff --git a/block/bio.c b/block/bio.c index 71cfe3720ea7..c39251e69447 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1978,18 +1978,30 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) return 0; } +/** + * __bio_associate_blkg_from_css - internal blkg association function + * + * This in the core association function that all association paths rely on. + * A blkg reference is taken which is released upon freeing of the bio. + */ static int __bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) { + struct request_queue *q = bio->bi_disk->queue; struct blkcg_gq *blkg; + int ret; rcu_read_lock(); - blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue); + if (!css || !css->parent) + blkg = q->root_blkg; + else + blkg = blkg_lookup_create(css_to_blkcg(css), q); - rcu_read_unlock(); + ret = bio_associate_blkg(bio, blkg); - return bio_associate_blkg(bio, blkg); + rcu_read_unlock(); + return ret; } /** @@ -1998,13 +2010,14 @@ static int __bio_associate_blkg_from_css(struct bio *bio, * @css: target css * * Associate @bio with the blkg found by combining the css's blkg and the - * request_queue of the @bio. This takes a reference on the css that will - * be put upon freeing of @bio. + * request_queue of the @bio. This falls back to the queue's root_blkg if + * the association fails with the css. */ int bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) { - css_get(css); + if (unlikely(bio->bi_blkg)) + return -EBUSY; return __bio_associate_blkg_from_css(bio, css); } EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); @@ -2016,22 +2029,29 @@ EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); * @page: the page to lookup the blkcg from * * Associate @bio with the blkg from @page's owning memcg and the respective - * request_queue. This works like every other associate function wrt - * references. + * request_queue. If cgroup_e_css returns NULL, fall back to the queue's + * root_blkg. * * Note: this must be called after bio has an associated device. */ int bio_associate_blkg_from_page(struct bio *bio, struct page *page) { struct cgroup_subsys_state *css; + int ret; if (unlikely(bio->bi_blkg)) return -EBUSY; if (!page->mem_cgroup) return 0; - css = cgroup_get_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); - return __bio_associate_blkg_from_css(bio, css); + rcu_read_lock(); + + css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); + + ret = __bio_associate_blkg_from_css(bio, css); + + rcu_read_unlock(); + return ret; } #endif /* CONFIG_MEMCG */ @@ -2041,12 +2061,12 @@ int bio_associate_blkg_from_page(struct bio *bio, struct page *page) * @bio: target bio * * Associate @bio with the blkg found from the bio's css and the request_queue. - * If one is not found, bio_lookup_blkg creates the blkg. + * If one is not found, bio_lookup_blkg creates the blkg. This falls back to + * the queue's root_blkg if association fails. */ int bio_associate_create_blkg(struct request_queue *q, struct bio *bio) { - struct blkcg *blkcg; - struct blkcg_gq *blkg; + struct cgroup_subsys_state *css; int ret = 0; /* someone has already associated this bio with a blkg */ @@ -2055,15 +2075,9 @@ int bio_associate_create_blkg(struct request_queue *q, struct bio *bio) rcu_read_lock(); - blkcg = css_to_blkcg(blkcg_get_css()); + css = blkcg_css(); - if (!blkcg->css.parent) { - ret = bio_associate_blkg(bio, q->root_blkg); - } else { - blkg = blkg_lookup_create(blkcg, q); - - ret = bio_associate_blkg(bio, blkg); - } + ret = __bio_associate_blkg_from_css(bio, css); rcu_read_unlock(); return ret; @@ -2080,8 +2094,6 @@ void bio_disassociate_task(struct bio *bio) bio->bi_ioc = NULL; } if (bio->bi_blkg) { - /* a ref is always taken on css */ - css_put(&bio_blkcg(bio)->css); blkg_put(bio->bi_blkg); bio->bi_blkg = NULL; } @@ -2094,10 +2106,8 @@ void bio_disassociate_task(struct bio *bio) */ void bio_clone_blkg_association(struct bio *dst, struct bio *src) { - if (src->bi_blkg) { - css_get(&bio_blkcg(src)->css); + if (src->bi_blkg) bio_associate_blkg(dst, src->bi_blkg); - } } EXPORT_SYMBOL_GPL(bio_clone_blkg_association); #endif /* CONFIG_BLK_CGROUP */ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index c41cfcc2b4d8..2951ea3541b1 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -249,47 +249,6 @@ static inline struct cgroup_subsys_state *blkcg_css(void) return task_css(current, io_cgrp_id); } -/** - * blkcg_get_css - find and get a reference to the css - * - * Find the css associated with either the kthread or the current task. - * This takes a reference on the blkcg which will need to be managed by the - * caller. - */ -static inline struct cgroup_subsys_state *blkcg_get_css(void) -{ - struct cgroup_subsys_state *css; - - rcu_read_lock(); - - css = kthread_blkcg(); - if (css) { - css_get(css); - } else { - /* - * This is a bit complicated. It is possible task_css is seeing - * an old css pointer here. This is caused by the current - * thread migrating away from this cgroup and this cgroup dying. - * css_tryget() will fail when trying to take a ref on a cgroup - * that's ref count has hit 0. - * - * Therefore, if it does fail, this means current must have - * been swapped away already and this is waiting for it to - * propagate on the polling cpu. Hence the use of cpu_relax(). - */ - while (true) { - css = task_css(current, io_cgrp_id); - if (likely(css_tryget(css))) - break; - cpu_relax(); - } - } - - rcu_read_unlock(); - - return css; -} - static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; @@ -628,10 +587,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q, rcu_read_lock(); blkcg = bio_blkcg(bio); - if (blkcg) - css_get(&blkcg->css); - else - blkcg = css_to_blkcg(blkcg_get_css()); + if (!blkcg) + blkcg = css_to_blkcg(blkcg_css()); /* bypass blkg lookup and use @q->root_rl directly for root */ if (blkcg == &blkcg_root) @@ -646,7 +603,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q, if (unlikely(!blkg)) goto root_rl; - blkg_get(blkg); + if (!blkg_try_get(blkg)) + goto root_rl; rcu_read_unlock(); return &blkg->rl; root_rl: @@ -663,8 +621,6 @@ root_rl: */ static inline void blk_put_rl(struct request_list *rl) { - /* an additional ref is always taken for rl */ - css_put(&rl->blkg->blkcg->css); if (rl->blkg->blkcg != &blkcg_root) blkg_put(rl->blkg); } diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 32c553556bbd..b8bcbdeb2eac 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -93,6 +93,8 @@ extern struct css_set init_css_set; bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index aae10baf1902..48fb22e49467 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -492,7 +492,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, } /** - * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem + * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss * @cgrp: the cgroup of interest * @ss: the subsystem of interest (%NULL returns @cgrp->self) * @@ -501,8 +501,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, * enabled. If @ss is associated with the hierarchy @cgrp is on, this * function is guaranteed to return non-NULL css. */ -static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, - struct cgroup_subsys *ss) +static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, + struct cgroup_subsys *ss) { lockdep_assert_held(&cgroup_mutex); @@ -522,6 +522,35 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, return cgroup_css(cgrp, ss); } +/** + * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem + * @cgrp: the cgroup of interest + * @ss: the subsystem of interest + * + * Find and get the effective css of @cgrp for @ss. The effective css is + * defined as the matching css of the nearest ancestor including self which + * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, + * the root css is returned, so this function always returns a valid css. + * + * The returned css is not guaranteed to be online, and therefore it is the + * callers responsiblity to tryget a reference for it. + */ +struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, + struct cgroup_subsys *ss) +{ + struct cgroup_subsys_state *css; + + do { + css = cgroup_css(cgrp, ss); + + if (css) + return css; + cgrp = cgroup_parent(cgrp); + } while (cgrp); + + return init_css_set.subsys[ss->id]; +} + /** * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem * @cgrp: the cgroup of interest @@ -604,10 +633,11 @@ EXPORT_SYMBOL_GPL(of_css); * * Should be called under cgroup_[tree_]mutex. */ -#define for_each_e_css(css, ssid, cgrp) \ - for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ - if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \ - ; \ +#define for_each_e_css(css, ssid, cgrp) \ + for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ + if (!((css) = cgroup_e_css_by_mask(cgrp, \ + cgroup_subsys[(ssid)]))) \ + ; \ else /** @@ -1006,7 +1036,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, * @ss is in this hierarchy, so we want the * effective css from @cgrp. */ - template[i] = cgroup_e_css(cgrp, ss); + template[i] = cgroup_e_css_by_mask(cgrp, ss); } else { /* * @ss is not in this hierarchy, so we don't want @@ -3019,7 +3049,7 @@ static int cgroup_apply_control(struct cgroup *cgrp) return ret; /* - * At this point, cgroup_e_css() results reflect the new csses + * At this point, cgroup_e_css_by_mask() results reflect the new csses * making the following cgroup_update_dfl_csses() properly update * css associations of all tasks in the subtree. */ -- cgit v1.2.3 From d6b183eda466415bb5defcf9afe4cb64734839e8 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 24 Aug 2018 16:20:28 -0400 Subject: tracing/kprobe: Remove unneeded extra strchr() from create_trace_kprobe() By utilizing a temporary variable, we can avoid adding another call to strchr(). Instead, save the first call to a temp variable, and then use that variable as the reference to set the event variable. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c30032367aab..508396edc56a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -749,10 +749,13 @@ static int create_trace_kprobe(int argc, char **argv) } if (event) { - if (strchr(event, '/')) { + char *slash; + + slash = strchr(event, '/'); + if (slash) { group = event; - event = strchr(group, '/') + 1; - event[-1] = '\0'; + event = slash + 1; + slash[0] = '\0'; if (strlen(group) == 0) { pr_info("Group name is not specified\n"); return -EINVAL; -- cgit v1.2.3 From 1cc33161a83d20b5462b1e93f95d3ce6388079ee Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Mon, 20 Aug 2018 10:12:47 +0530 Subject: uprobes: Support SDT markers having reference count (semaphore) Userspace Statically Defined Tracepoints[1] are dtrace style markers inside userspace applications. Applications like PostgreSQL, MySQL, Pthread, Perl, Python, Java, Ruby, Node.js, libvirt, QEMU, glib etc have these markers embedded in them. These markers are added by developer at important places in the code. Each marker source expands to a single nop instruction in the compiled code but there may be additional overhead for computing the marker arguments which expands to couple of instructions. In case the overhead is more, execution of it can be omitted by runtime if() condition when no one is tracing on the marker: if (reference_counter > 0) { Execute marker instructions; } Default value of reference counter is 0. Tracer has to increment the reference counter before tracing on a marker and decrement it when done with the tracing. Implement the reference counter logic in core uprobe. User will be able to use it from trace_uprobe as well as from kernel module. New trace_uprobe definition with reference counter will now be: :[(ref_ctr_offset)] where ref_ctr_offset is an optional field. For kernel module, new variant of uprobe_register() has been introduced: uprobe_register_refctr(inode, offset, ref_ctr_offset, consumer) No new variant for uprobe_unregister() because it's assumed to have only one reference counter for one uprobe. [1] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation Note: 'reference counter' is called as 'semaphore' in original Dtrace (or Systemtap, bcc and even in ELF) documentation and code. But the term 'semaphore' is misleading in this context. This is just a counter used to hold number of tracers tracing on a marker. This is not really used for any synchronization. So we are calling it a 'reference counter' in kernel / perf code. Link: http://lkml.kernel.org/r/20180820044250.11659-2-ravi.bangoria@linux.ibm.com Reviewed-by: Masami Hiramatsu [Only trace_uprobe.c] Reviewed-by: Oleg Nesterov Reviewed-by: Song Liu Tested-by: Song Liu Signed-off-by: Ravi Bangoria Signed-off-by: Steven Rostedt (VMware) --- include/linux/uprobes.h | 5 + kernel/events/uprobes.c | 259 ++++++++++++++++++++++++++++++++++++++++++-- kernel/trace/trace.c | 2 +- kernel/trace/trace_uprobe.c | 38 ++++++- 4 files changed, 293 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index bb9d2084af03..103a48a48872 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -123,6 +123,7 @@ extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); +extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_mmap(struct vm_area_struct *vma); @@ -160,6 +161,10 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { return -ENOSYS; } +static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) +{ + return -ENOSYS; +} static inline int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3207a4d26849..934feb39f6be 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -73,6 +73,7 @@ struct uprobe { struct uprobe_consumer *consumers; struct inode *inode; /* Also hold a ref to inode */ loff_t offset; + loff_t ref_ctr_offset; unsigned long flags; /* @@ -88,6 +89,15 @@ struct uprobe { struct arch_uprobe arch; }; +struct delayed_uprobe { + struct list_head list; + struct uprobe *uprobe; + struct mm_struct *mm; +}; + +static DEFINE_MUTEX(delayed_uprobe_lock); +static LIST_HEAD(delayed_uprobe_list); + /* * Execute out of line area: anonymous executable mapping installed * by the probed task to execute the copy of the original instruction @@ -282,6 +292,166 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t return 1; } +static struct delayed_uprobe * +delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm) +{ + struct delayed_uprobe *du; + + list_for_each_entry(du, &delayed_uprobe_list, list) + if (du->uprobe == uprobe && du->mm == mm) + return du; + return NULL; +} + +static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm) +{ + struct delayed_uprobe *du; + + if (delayed_uprobe_check(uprobe, mm)) + return 0; + + du = kzalloc(sizeof(*du), GFP_KERNEL); + if (!du) + return -ENOMEM; + + du->uprobe = uprobe; + du->mm = mm; + list_add(&du->list, &delayed_uprobe_list); + return 0; +} + +static void delayed_uprobe_delete(struct delayed_uprobe *du) +{ + if (WARN_ON(!du)) + return; + list_del(&du->list); + kfree(du); +} + +static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm) +{ + struct list_head *pos, *q; + struct delayed_uprobe *du; + + if (!uprobe && !mm) + return; + + list_for_each_safe(pos, q, &delayed_uprobe_list) { + du = list_entry(pos, struct delayed_uprobe, list); + + if (uprobe && du->uprobe != uprobe) + continue; + if (mm && du->mm != mm) + continue; + + delayed_uprobe_delete(du); + } +} + +static bool valid_ref_ctr_vma(struct uprobe *uprobe, + struct vm_area_struct *vma) +{ + unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); + + return uprobe->ref_ctr_offset && + vma->vm_file && + file_inode(vma->vm_file) == uprobe->inode && + (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && + vma->vm_start <= vaddr && + vma->vm_end > vaddr; +} + +static struct vm_area_struct * +find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm) +{ + struct vm_area_struct *tmp; + + for (tmp = mm->mmap; tmp; tmp = tmp->vm_next) + if (valid_ref_ctr_vma(uprobe, tmp)) + return tmp; + + return NULL; +} + +static int +__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) +{ + void *kaddr; + struct page *page; + struct vm_area_struct *vma; + int ret; + short *ptr; + + if (!vaddr || !d) + return -EINVAL; + + ret = get_user_pages_remote(NULL, mm, vaddr, 1, + FOLL_WRITE, &page, &vma, NULL); + if (unlikely(ret <= 0)) { + /* + * We are asking for 1 page. If get_user_pages_remote() fails, + * it may return 0, in that case we have to return error. + */ + return ret == 0 ? -EBUSY : ret; + } + + kaddr = kmap_atomic(page); + ptr = kaddr + (vaddr & ~PAGE_MASK); + + if (unlikely(*ptr + d < 0)) { + pr_warn("ref_ctr going negative. vaddr: 0x%lx, " + "curr val: %d, delta: %d\n", vaddr, *ptr, d); + ret = -EINVAL; + goto out; + } + + *ptr += d; + ret = 0; +out: + kunmap_atomic(kaddr); + put_page(page); + return ret; +} + +static void update_ref_ctr_warn(struct uprobe *uprobe, + struct mm_struct *mm, short d) +{ + pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " + "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", + d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, + (unsigned long long) uprobe->offset, + (unsigned long long) uprobe->ref_ctr_offset, mm); +} + +static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm, + short d) +{ + struct vm_area_struct *rc_vma; + unsigned long rc_vaddr; + int ret = 0; + + rc_vma = find_ref_ctr_vma(uprobe, mm); + + if (rc_vma) { + rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset); + ret = __update_ref_ctr(mm, rc_vaddr, d); + if (ret) + update_ref_ctr_warn(uprobe, mm, d); + + if (d > 0) + return ret; + } + + mutex_lock(&delayed_uprobe_lock); + if (d > 0) + ret = delayed_uprobe_add(uprobe, mm); + else + delayed_uprobe_remove(uprobe, mm); + mutex_unlock(&delayed_uprobe_lock); + + return ret; +} + /* * NOTE: * Expect the breakpoint instruction to be the smallest size instruction for @@ -302,9 +472,13 @@ static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t opcode) { + struct uprobe *uprobe; struct page *old_page, *new_page; struct vm_area_struct *vma; - int ret; + int ret, is_register, ref_ctr_updated = 0; + + is_register = is_swbp_insn(&opcode); + uprobe = container_of(auprobe, struct uprobe, arch); retry: /* Read the page with vaddr into memory */ @@ -317,6 +491,15 @@ retry: if (ret <= 0) goto put_old; + /* We are going to replace instruction, update ref_ctr. */ + if (!ref_ctr_updated && uprobe->ref_ctr_offset) { + ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1); + if (ret) + goto put_old; + + ref_ctr_updated = 1; + } + ret = anon_vma_prepare(vma); if (ret) goto put_old; @@ -337,6 +520,11 @@ put_old: if (unlikely(ret == -EAGAIN)) goto retry; + + /* Revert back reference counter if instruction update failed. */ + if (ret && is_register && ref_ctr_updated) + update_ref_ctr(uprobe, mm, -1); + return ret; } @@ -378,8 +566,15 @@ static struct uprobe *get_uprobe(struct uprobe *uprobe) static void put_uprobe(struct uprobe *uprobe) { - if (atomic_dec_and_test(&uprobe->ref)) + if (atomic_dec_and_test(&uprobe->ref)) { + /* + * If application munmap(exec_vma) before uprobe_unregister() + * gets called, we don't get a chance to remove uprobe from + * delayed_uprobe_list from remove_breakpoint(). Do it here. + */ + delayed_uprobe_remove(uprobe, NULL); kfree(uprobe); + } } static int match_uprobe(struct uprobe *l, struct uprobe *r) @@ -484,7 +679,8 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe) return u; } -static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) +static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, + loff_t ref_ctr_offset) { struct uprobe *uprobe, *cur_uprobe; @@ -494,6 +690,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) uprobe->inode = inode; uprobe->offset = offset; + uprobe->ref_ctr_offset = ref_ctr_offset; init_rwsem(&uprobe->register_rwsem); init_rwsem(&uprobe->consumer_rwsem); @@ -895,7 +1092,7 @@ EXPORT_SYMBOL_GPL(uprobe_unregister); * else return 0 (success) */ static int __uprobe_register(struct inode *inode, loff_t offset, - struct uprobe_consumer *uc) + loff_t ref_ctr_offset, struct uprobe_consumer *uc) { struct uprobe *uprobe; int ret; @@ -912,7 +1109,7 @@ static int __uprobe_register(struct inode *inode, loff_t offset, return -EINVAL; retry: - uprobe = alloc_uprobe(inode, offset); + uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); if (!uprobe) return -ENOMEM; /* @@ -938,10 +1135,17 @@ static int __uprobe_register(struct inode *inode, loff_t offset, int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { - return __uprobe_register(inode, offset, uc); + return __uprobe_register(inode, offset, 0, uc); } EXPORT_SYMBOL_GPL(uprobe_register); +int uprobe_register_refctr(struct inode *inode, loff_t offset, + loff_t ref_ctr_offset, struct uprobe_consumer *uc) +{ + return __uprobe_register(inode, offset, ref_ctr_offset, uc); +} +EXPORT_SYMBOL_GPL(uprobe_register_refctr); + /* * uprobe_apply - unregister an already registered probe. * @inode: the file in which the probe has to be removed. @@ -1060,6 +1264,35 @@ static void build_probe_list(struct inode *inode, spin_unlock(&uprobes_treelock); } +/* @vma contains reference counter, not the probed instruction. */ +static int delayed_ref_ctr_inc(struct vm_area_struct *vma) +{ + struct list_head *pos, *q; + struct delayed_uprobe *du; + unsigned long vaddr; + int ret = 0, err = 0; + + mutex_lock(&delayed_uprobe_lock); + list_for_each_safe(pos, q, &delayed_uprobe_list) { + du = list_entry(pos, struct delayed_uprobe, list); + + if (du->mm != vma->vm_mm || + !valid_ref_ctr_vma(du->uprobe, vma)) + continue; + + vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); + ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); + if (ret) { + update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); + if (!err) + err = ret; + } + delayed_uprobe_delete(du); + } + mutex_unlock(&delayed_uprobe_lock); + return err; +} + /* * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. * @@ -1072,7 +1305,15 @@ int uprobe_mmap(struct vm_area_struct *vma) struct uprobe *uprobe, *u; struct inode *inode; - if (no_uprobe_events() || !valid_vma(vma, true)) + if (no_uprobe_events()) + return 0; + + if (vma->vm_file && + (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && + test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) + delayed_ref_ctr_inc(vma); + + if (!valid_vma(vma, true)) return 0; inode = file_inode(vma->vm_file); @@ -1246,6 +1487,10 @@ void uprobe_clear_state(struct mm_struct *mm) { struct xol_area *area = mm->uprobes_state.xol_area; + mutex_lock(&delayed_uprobe_lock); + delayed_uprobe_remove(NULL, mm); + mutex_unlock(&delayed_uprobe_lock); + if (!area) return; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index bf6f1d70484d..147be8523560 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4621,7 +4621,7 @@ static const char readme_msg[] = "place (kretprobe): [:][+]|\n" #endif #ifdef CONFIG_UPROBE_EVENTS - "\t place: :\n" + " place (uprobe): :[(ref_ctr_offset)]\n" #endif "\t args: =fetcharg[:type]\n" "\t fetcharg: %, @

, @[+|-],\n" diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e696667da29a..7b85172beab6 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -47,6 +47,7 @@ struct trace_uprobe { struct inode *inode; char *filename; unsigned long offset; + unsigned long ref_ctr_offset; unsigned long nhit; struct trace_probe tp; }; @@ -352,10 +353,10 @@ end: static int create_trace_uprobe(int argc, char **argv) { struct trace_uprobe *tu; - char *arg, *event, *group, *filename; + char *arg, *event, *group, *filename, *rctr, *rctr_end; char buf[MAX_EVENT_NAME_LEN]; struct path path; - unsigned long offset; + unsigned long offset, ref_ctr_offset; bool is_delete, is_return; int i, ret; @@ -364,6 +365,7 @@ static int create_trace_uprobe(int argc, char **argv) is_return = false; event = NULL; group = NULL; + ref_ctr_offset = 0; /* argc must be >= 1 */ if (argv[0][0] == '-') @@ -438,6 +440,26 @@ static int create_trace_uprobe(int argc, char **argv) goto fail_address_parse; } + /* Parse reference counter offset if specified. */ + rctr = strchr(arg, '('); + if (rctr) { + rctr_end = strchr(rctr, ')'); + if (rctr > rctr_end || *(rctr_end + 1) != 0) { + ret = -EINVAL; + pr_info("Invalid reference counter offset.\n"); + goto fail_address_parse; + } + + *rctr++ = '\0'; + *rctr_end = '\0'; + ret = kstrtoul(rctr, 0, &ref_ctr_offset); + if (ret) { + pr_info("Invalid reference counter offset.\n"); + goto fail_address_parse; + } + } + + /* Parse uprobe offset. */ ret = kstrtoul(arg, 0, &offset); if (ret) goto fail_address_parse; @@ -472,6 +494,7 @@ static int create_trace_uprobe(int argc, char **argv) goto fail_address_parse; } tu->offset = offset; + tu->ref_ctr_offset = ref_ctr_offset; tu->path = path; tu->filename = kstrdup(filename, GFP_KERNEL); @@ -590,6 +613,9 @@ static int probes_seq_show(struct seq_file *m, void *v) trace_event_name(&tu->tp.call), tu->filename, (int)(sizeof(void *) * 2), tu->offset); + if (tu->ref_ctr_offset) + seq_printf(m, "(0x%lx)", tu->ref_ctr_offset); + for (i = 0; i < tu->tp.nr_args; i++) seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); @@ -905,7 +931,13 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, tu->consumer.filter = filter; tu->inode = d_real_inode(tu->path.dentry); - ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); + if (tu->ref_ctr_offset) { + ret = uprobe_register_refctr(tu->inode, tu->offset, + tu->ref_ctr_offset, &tu->consumer); + } else { + ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); + } + if (ret) goto err_buffer; -- cgit v1.2.3 From 22bad38286d9a652d7061a02f9743bb2ebb84e59 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Mon, 20 Aug 2018 10:12:48 +0530 Subject: uprobes/sdt: Prevent multiple reference counter for same uprobe We assume to have only one reference counter for one uprobe. Don't allow user to register multiple uprobes having same inode+offset but different reference counter. Link: http://lkml.kernel.org/r/20180820044250.11659-3-ravi.bangoria@linux.ibm.com Acked-by: Srikar Dronamraju Reviewed-by: Oleg Nesterov Reviewed-by: Song Liu Tested-by: Song Liu Signed-off-by: Ravi Bangoria Signed-off-by: Steven Rostedt (VMware) --- kernel/events/uprobes.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'kernel') diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 934feb39f6be..96fb51f3994f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -679,6 +679,16 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe) return u; } +static void +ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe) +{ + pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx " + "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n", + uprobe->inode->i_ino, (unsigned long long) uprobe->offset, + (unsigned long long) cur_uprobe->ref_ctr_offset, + (unsigned long long) uprobe->ref_ctr_offset); +} + static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, loff_t ref_ctr_offset) { @@ -698,6 +708,12 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, cur_uprobe = insert_uprobe(uprobe); /* a uprobe exists for this inode:offset combination */ if (cur_uprobe) { + if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) { + ref_ctr_mismatch_warn(cur_uprobe, uprobe); + put_uprobe(cur_uprobe); + kfree(uprobe); + return ERR_PTR(-EINVAL); + } kfree(uprobe); uprobe = cur_uprobe; } @@ -1112,6 +1128,9 @@ static int __uprobe_register(struct inode *inode, loff_t offset, uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); if (!uprobe) return -ENOMEM; + if (IS_ERR(uprobe)) + return PTR_ERR(uprobe); + /* * We can race with uprobe_unregister()->delete_uprobe(). * Check uprobe_is_active() and retry if it is false. -- cgit v1.2.3 From ccea8727dc27d8f46df6557f9260ab32760ef409 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Mon, 20 Aug 2018 10:12:49 +0530 Subject: trace_uprobe/sdt: Prevent multiple reference counter for same uprobe We assume to have only one reference counter for one uprobe. Don't allow user to add multiple trace_uprobe entries having same inode+offset but different reference counter. Ex, # echo "p:sdt_tick/loop2 /home/ravi/tick:0x6e4(0x10036)" > uprobe_events # echo "p:sdt_tick/loop2_1 /home/ravi/tick:0x6e4(0xfffff)" >> uprobe_events bash: echo: write error: Invalid argument # dmesg trace_kprobe: Reference counter offset mismatch. There is one exception though: When user is trying to replace the old entry with the new one, we allow this if the new entry does not conflict with any other existing entries. Link: http://lkml.kernel.org/r/20180820044250.11659-4-ravi.bangoria@linux.ibm.com Acked-by: Srikar Dronamraju Reviewed-by: Song Liu Reviewed-by: Oleg Nesterov Tested-by: Song Liu Signed-off-by: Ravi Bangoria Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_uprobe.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 7b85172beab6..3a7c73c40007 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -312,6 +312,35 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu) return 0; } +/* + * Uprobe with multiple reference counter is not allowed. i.e. + * If inode and offset matches, reference counter offset *must* + * match as well. Though, there is one exception: If user is + * replacing old trace_uprobe with new one(same group/event), + * then we allow same uprobe with new reference counter as far + * as the new one does not conflict with any other existing + * ones. + */ +static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new) +{ + struct trace_uprobe *tmp, *old = NULL; + struct inode *new_inode = d_real_inode(new->path.dentry); + + old = find_probe_event(trace_event_name(&new->tp.call), + new->tp.call.class->system); + + list_for_each_entry(tmp, &uprobe_list, list) { + if ((old ? old != tmp : true) && + new_inode == d_real_inode(tmp->path.dentry) && + new->offset == tmp->offset && + new->ref_ctr_offset != tmp->ref_ctr_offset) { + pr_warn("Reference counter offset mismatch."); + return ERR_PTR(-EINVAL); + } + } + return old; +} + /* Register a trace_uprobe and probe_event */ static int register_trace_uprobe(struct trace_uprobe *tu) { @@ -321,8 +350,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu) mutex_lock(&uprobe_lock); /* register as an event */ - old_tu = find_probe_event(trace_event_name(&tu->tp.call), - tu->tp.call.class->system); + old_tu = find_old_trace_uprobe(tu); + if (IS_ERR(old_tu)) { + ret = PTR_ERR(old_tu); + goto end; + } + if (old_tu) { /* delete old event */ ret = unregister_trace_uprobe(old_tu); -- cgit v1.2.3 From 974c24c5bed75b53e229a6f68a0533b6d5f48feb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Sep 2018 11:00:49 +0200 Subject: dma-mapping: add the missing ARCH_HAS_SYNC_DMA_FOR_CPU_ALL declaration The patch adding the infrastructure failed to actually add the symbol declaration, oops.. Fixes: faef87723a ("dma-noncoherent: add a arch_sync_dma_for_cpu_all hook") Signed-off-by: Christoph Hellwig Reviewed-by: Paul Burton Acked-by: Florian Fainelli Signed-off-by: Christoph Hellwig --- kernel/dma/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 9bd54304446f..1b1d63b3634b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU bool select NEED_DMA_MAP_STATE +config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL + bool + config DMA_DIRECT_OPS bool depends on HAS_DMA -- cgit v1.2.3 From ca9184f0797c4893db928527fb2b6999eb753ccb Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 23 Jul 2018 11:31:59 +0300 Subject: tracing: Trivia spelling fix containerof() -> container_of() This is the only location on kernel that has wrong spelling of the container_of() helper. Fix it. Signed-off-by: Andy Shevchenko Acked-by: Steven Rostedt (VMware) Signed-off-by: Andy Shevchenko --- kernel/trace/trace_printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index b0875b327f5c..c3fd849d4a8f 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -115,7 +115,7 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, * section, then we need to read the link list pointers. The trick is * we pass the address of the string to the seq function just like * we do for the kernel core formats. To get back the structure that - * holds the format, we simply use containerof() and then go to the + * holds the format, we simply use container_of() and then go to the * next format in the list. */ static const char ** -- cgit v1.2.3 From dbfe2953f63c640463c630746cd5d9de8b2f63ae Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 25 Sep 2018 14:38:18 +0200 Subject: x86/speculation: Apply IBPB more strictly to avoid cross-process data leak Currently, IBPB is only issued in cases when switching into a non-dumpable process, the rationale being to protect such 'important and security sensitive' processess (such as GPG) from data leaking into a different userspace process via spectre v2. This is however completely insufficient to provide proper userspace-to-userpace spectrev2 protection, as any process can poison branch buffers before being scheduled out, and the newly scheduled process immediately becomes spectrev2 victim. In order to minimize the performance impact (for usecases that do require spectrev2 protection), issue the barrier only in cases when switching between processess where the victim can't be ptraced by the potential attacker (as in such cases, the attacker doesn't have to bother with branch buffers at all). [ tglx: Split up PTRACE_MODE_NOACCESS_CHK into PTRACE_MODE_SCHED and PTRACE_MODE_IBPB to be able to do ptrace() context tracking reasonably fine-grained ] Fixes: 18bf3c3ea8 ("x86/speculation: Use Indirect Branch Prediction Barrier in context switch") Originally-by: Tim Chen Signed-off-by: Jiri Kosina Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Josh Poimboeuf Cc: Andrea Arcangeli Cc: "WoodhouseDavid" Cc: Andi Kleen Cc: "SchauflerCasey" Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251437340.15880@cbobk.fhfr.pm --- arch/x86/mm/tlb.c | 31 ++++++++++++++++++++----------- include/linux/ptrace.h | 21 +++++++++++++++++++-- kernel/ptrace.c | 10 ++++++++++ 3 files changed, 49 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index e96b99eb800c..073b8df349a0 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -180,6 +181,19 @@ static void sync_current_stack_to_mm(struct mm_struct *mm) } } +static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id) +{ + /* + * Check if the current (previous) task has access to the memory + * of the @tsk (next) task. If access is denied, make sure to + * issue a IBPB to stop user->user Spectre-v2 attacks. + * + * Note: __ptrace_may_access() returns 0 or -ERRNO. + */ + return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id && + ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB)); +} + void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -262,18 +276,13 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * one process from doing Spectre-v2 attacks on another. * * As an optimization, flush indirect branches only when - * switching into processes that disable dumping. This - * protects high value processes like gpg, without having - * too high performance overhead. IBPB is *expensive*! - * - * This will not flush branches when switching into kernel - * threads. It will also not flush if we switch to idle - * thread and back to the same process. It will flush if we - * switch to a different non-dumpable process. + * switching into a processes that can't be ptrace by the + * current one (as in such case, attacker has much more + * convenient way how to tamper with the next process than + * branch buffer poisoning). */ - if (tsk && tsk->mm && - tsk->mm->context.ctx_id != last_ctx_id && - get_dumpable(tsk->mm) != SUID_DUMP_USER) + if (static_cpu_has(X86_FEATURE_USE_IBPB) && + ibpb_needed(tsk, last_ctx_id)) indirect_branch_prediction_barrier(); if (IS_ENABLED(CONFIG_VMAP_STACK)) { diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 4f36431c380b..e5e5ef513df3 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -62,14 +62,17 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 -#define PTRACE_MODE_FSCREDS 0x08 -#define PTRACE_MODE_REALCREDS 0x10 +#define PTRACE_MODE_FSCREDS 0x08 +#define PTRACE_MODE_REALCREDS 0x10 +#define PTRACE_MODE_SCHED 0x20 +#define PTRACE_MODE_IBPB 0x40 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) +#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB) /** * ptrace_may_access - check whether the caller is permitted to access @@ -87,6 +90,20 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); */ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); +/** + * ptrace_may_access - check whether the caller is permitted to access + * a target task. + * @task: target task + * @mode: selects type of access and caller credentials + * + * Returns true on success, false on denial. + * + * Similar to ptrace_may_access(). Only to be called from context switch + * code. Does not call into audit and the regular LSM hooks due to locking + * constraints. + */ +extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode); + static inline int ptrace_reparented(struct task_struct *child) { return !same_thread_group(child->real_parent, child->parent); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 21fec73d45d4..99cfddde6a55 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -261,6 +261,9 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) { + if (mode & PTRACE_MODE_SCHED) + return false; + if (mode & PTRACE_MODE_NOAUDIT) return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); else @@ -328,9 +331,16 @@ ok: !ptrace_has_cap(mm->user_ns, mode))) return -EPERM; + if (mode & PTRACE_MODE_SCHED) + return 0; return security_ptrace_access_check(task, mode); } +bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode) +{ + return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED); +} + bool ptrace_may_access(struct task_struct *task, unsigned int mode) { int err; -- cgit v1.2.3 From 53c613fe6349994f023245519265999eed75957f Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 25 Sep 2018 14:38:55 +0200 Subject: x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation STIBP is a feature provided by certain Intel ucodes / CPUs. This feature (once enabled) prevents cross-hyperthread control of decisions made by indirect branch predictors. Enable this feature if - the CPU is vulnerable to spectre v2 - the CPU supports SMT and has SMT siblings online - spectre_v2 mitigation autoselection is enabled (default) After some previous discussion, this leaves STIBP on all the time, as wrmsr on crossing kernel boundary is a no-no. This could perhaps later be a bit more optimized (like disabling it in NOHZ, experiment with disabling it in idle, etc) if needed. Note that the synchronization of the mask manipulation via newly added spec_ctrl_mutex is currently not strictly needed, as the only updater is already being serialized by cpu_add_remove_lock, but let's make this a little bit more future-proof. Signed-off-by: Jiri Kosina Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Josh Poimboeuf Cc: Andrea Arcangeli Cc: "WoodhouseDavid" Cc: Andi Kleen Cc: Tim Chen Cc: "SchauflerCasey" Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251438240.15880@cbobk.fhfr.pm --- arch/x86/kernel/cpu/bugs.c | 57 +++++++++++++++++++++++++++++++++++++++++----- kernel/cpu.c | 11 ++++++++- 2 files changed, 61 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 40bdaea97fe7..53eb14a65610 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -35,12 +35,10 @@ static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); -/* - * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any - * writes to SPEC_CTRL contain whatever reserved bits have been set. - */ -u64 __ro_after_init x86_spec_ctrl_base; +/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ +u64 x86_spec_ctrl_base; EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); +static DEFINE_MUTEX(spec_ctrl_mutex); /* * The vendor and possibly platform specific bits which can be modified in @@ -325,6 +323,46 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return cmd; } +static bool stibp_needed(void) +{ + if (spectre_v2_enabled == SPECTRE_V2_NONE) + return false; + + if (!boot_cpu_has(X86_FEATURE_STIBP)) + return false; + + return true; +} + +static void update_stibp_msr(void *info) +{ + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +} + +void arch_smt_update(void) +{ + u64 mask; + + if (!stibp_needed()) + return; + + mutex_lock(&spec_ctrl_mutex); + mask = x86_spec_ctrl_base; + if (cpu_smt_control == CPU_SMT_ENABLED) + mask |= SPEC_CTRL_STIBP; + else + mask &= ~SPEC_CTRL_STIBP; + + if (mask != x86_spec_ctrl_base) { + pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n", + cpu_smt_control == CPU_SMT_ENABLED ? + "Enabling" : "Disabling"); + x86_spec_ctrl_base = mask; + on_each_cpu(update_stibp_msr, NULL, 1); + } + mutex_unlock(&spec_ctrl_mutex); +} + static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -424,6 +462,9 @@ specv2_set_mode: setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } + + /* Enable STIBP if appropriate */ + arch_smt_update(); } #undef pr_fmt @@ -814,6 +855,8 @@ static ssize_t l1tf_show_state(char *buf) static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { + int ret; + if (!boot_cpu_has_bug(bug)) return sprintf(buf, "Not affected\n"); @@ -831,10 +874,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return sprintf(buf, "Mitigation: __user pointer sanitization\n"); case X86_BUG_SPECTRE_V2: - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "", spectre_v2_module_string()); + return ret; case X86_BUG_SPEC_STORE_BYPASS: return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); diff --git a/kernel/cpu.c b/kernel/cpu.c index aa7fe85ad62e..2fb49916ea56 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2025,6 +2025,12 @@ static void cpuhp_online_cpu_device(unsigned int cpu) kobject_uevent(&dev->kobj, KOBJ_ONLINE); } +/* + * Architectures that need SMT-specific errata handling during SMT hotplug + * should override this. + */ +void __weak arch_smt_update(void) { }; + static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { int cpu, ret = 0; @@ -2051,8 +2057,10 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) */ cpuhp_offline_cpu_device(cpu); } - if (!ret) + if (!ret) { cpu_smt_control = ctrlval; + arch_smt_update(); + } cpu_maps_update_done(); return ret; } @@ -2063,6 +2071,7 @@ static int cpuhp_smt_enable(void) cpu_maps_update_begin(); cpu_smt_control = CPU_SMT_ENABLED; + arch_smt_update(); for_each_present_cpu(cpu) { /* Skip online CPUs and CPUs on offline nodes */ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) -- cgit v1.2.3 From 9ae033aca8d600e36034d4d0743aad624cec92ed Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 18 Sep 2018 23:51:36 -0700 Subject: jump_label: Abstract jump_entry member accessors In preparation of allowing architectures to use relative references in jump_label entries [which can dramatically reduce the memory footprint], introduce abstractions for references to the 'code' and 'key' members of struct jump_entry. Signed-off-by: Ard Biesheuvel Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: linux-arm-kernel@lists.infradead.org Cc: linux-s390@vger.kernel.org Cc: Arnd Bergmann Cc: Heiko Carstens Cc: Kees Cook Cc: Will Deacon Cc: Catalin Marinas Cc: Steven Rostedt Cc: Martin Schwidefsky Cc: Jessica Yu Link: https://lkml.kernel.org/r/20180919065144.25010-2-ard.biesheuvel@linaro.org --- include/linux/jump_label.h | 34 ++++++++++++++++++++++++++++++++++ kernel/jump_label.c | 40 +++++++++++++++------------------------- 2 files changed, 49 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 1a0b6f17a5d6..2eadff9b3b90 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -119,6 +119,40 @@ struct static_key { #ifdef HAVE_JUMP_LABEL #include + +#ifndef __ASSEMBLY__ + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + return (struct static_key *)((unsigned long)entry->key & ~1UL); +} + +static inline bool jump_entry_is_branch(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 1UL; +} + +static inline bool jump_entry_is_init(const struct jump_entry *entry) +{ + return entry->code == 0; +} + +static inline void jump_entry_set_init(struct jump_entry *entry) +{ + entry->code = 0; +} + +#endif #endif #ifndef __ASSEMBLY__ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 2e62503bea0d..834e43de0daf 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -38,10 +38,10 @@ static int jump_label_cmp(const void *a, const void *b) const struct jump_entry *jea = a; const struct jump_entry *jeb = b; - if (jea->key < jeb->key) + if (jump_entry_key(jea) < jump_entry_key(jeb)) return -1; - if (jea->key > jeb->key) + if (jump_entry_key(jea) > jump_entry_key(jeb)) return 1; return 0; @@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit); static int addr_conflict(struct jump_entry *entry, void *start, void *end) { - if (entry->code <= (unsigned long)end && - entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) + if (jump_entry_code(entry) <= (unsigned long)end && + jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) return 1; return 0; @@ -321,16 +321,6 @@ static inline void static_key_set_linked(struct static_key *key) key->type |= JUMP_TYPE_LINKED; } -static inline struct static_key *jump_entry_key(struct jump_entry *entry) -{ - return (struct static_key *)((unsigned long)entry->key & ~1UL); -} - -static bool jump_entry_branch(struct jump_entry *entry) -{ - return (unsigned long)entry->key & 1UL; -} - /*** * A 'struct static_key' uses a union such that it either points directly * to a table of 'struct jump_entry' or to a linked list of modules which in @@ -355,7 +345,7 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool enabled = static_key_enabled(key); - bool branch = jump_entry_branch(entry); + bool branch = jump_entry_is_branch(entry); /* See the comment in linux/jump_label.h */ return enabled ^ branch; @@ -370,12 +360,12 @@ static void __jump_label_update(struct static_key *key, * An entry->code of 0 indicates an entry which has been * disabled because it was in an init text area. */ - if (entry->code) { - if (kernel_text_address(entry->code)) + if (!jump_entry_is_init(entry)) { + if (kernel_text_address(jump_entry_code(entry))) arch_jump_label_transform(entry, jump_label_type(entry)); else WARN_ONCE(1, "can't patch jump_label at %pS", - (void *)(unsigned long)entry->code); + (void *)jump_entry_code(entry)); } } } @@ -430,8 +420,8 @@ void __init jump_label_invalidate_initmem(void) struct jump_entry *iter; for (iter = iter_start; iter < iter_stop; iter++) { - if (init_section_contains((void *)(unsigned long)iter->code, 1)) - iter->code = 0; + if (init_section_contains((void *)jump_entry_code(iter), 1)) + jump_entry_set_init(iter); } } @@ -441,7 +431,7 @@ static enum jump_label_type jump_label_init_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool type = static_key_type(key); - bool branch = jump_entry_branch(entry); + bool branch = jump_entry_is_branch(entry); /* See the comment in linux/jump_label.h */ return type ^ branch; @@ -565,7 +555,7 @@ static int jump_label_add_module(struct module *mod) continue; key = iterk; - if (within_module(iter->key, mod)) { + if (within_module((unsigned long)key, mod)) { static_key_set_entries(key, iter); continue; } @@ -615,7 +605,7 @@ static void jump_label_del_module(struct module *mod) key = jump_entry_key(iter); - if (within_module(iter->key, mod)) + if (within_module((unsigned long)key, mod)) continue; /* No memory during module load */ @@ -659,8 +649,8 @@ static void jump_label_invalidate_module_init(struct module *mod) struct jump_entry *iter; for (iter = iter_start; iter < iter_stop; iter++) { - if (within_module_init(iter->code, mod)) - iter->code = 0; + if (within_module_init(jump_entry_code(iter), mod)) + jump_entry_set_init(iter); } } -- cgit v1.2.3 From 50ff18ab497aa22f6a59444625df7508c8918237 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 18 Sep 2018 23:51:37 -0700 Subject: jump_label: Implement generic support for relative references To reduce the size taken up by absolute references in jump label entries themselves and the associated relocation records in the .init segment, add support for emitting them as relative references instead. Note that this requires some extra care in the sorting routine, given that the offsets change when entries are moved around in the jump_entry table. Signed-off-by: Ard Biesheuvel Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: linux-arm-kernel@lists.infradead.org Cc: linux-s390@vger.kernel.org Cc: Arnd Bergmann Cc: Heiko Carstens Cc: Kees Cook Cc: Will Deacon Cc: Catalin Marinas Cc: Steven Rostedt Cc: Martin Schwidefsky Cc: Jessica Yu Link: https://lkml.kernel.org/r/20180919065144.25010-3-ard.biesheuvel@linaro.org --- arch/Kconfig | 3 +++ include/linux/jump_label.h | 28 ++++++++++++++++++++++++++++ kernel/jump_label.c | 22 +++++++++++++++++++++- 3 files changed, 52 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/Kconfig b/arch/Kconfig index 6801123932a5..9d329608913e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -359,6 +359,9 @@ config HAVE_PERF_USER_STACK_DUMP config HAVE_ARCH_JUMP_LABEL bool +config HAVE_ARCH_JUMP_LABEL_RELATIVE + bool + config HAVE_RCU_TABLE_FREE bool diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 2eadff9b3b90..2768a925bafa 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -121,6 +121,32 @@ struct static_key { #include #ifndef __ASSEMBLY__ +#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE + +struct jump_entry { + s32 code; + s32 target; + long key; // key may be far away from the core kernel under KASLR +}; + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return (unsigned long)&entry->code + entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return (unsigned long)&entry->target + entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + long offset = entry->key & ~1L; + + return (struct static_key *)((unsigned long)&entry->key + offset); +} + +#else static inline unsigned long jump_entry_code(const struct jump_entry *entry) { @@ -137,6 +163,8 @@ static inline struct static_key *jump_entry_key(const struct jump_entry *entry) return (struct static_key *)((unsigned long)entry->key & ~1UL); } +#endif + static inline bool jump_entry_is_branch(const struct jump_entry *entry) { return (unsigned long)entry->key & 1UL; diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 834e43de0daf..898a1d0c38dc 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -47,14 +47,34 @@ static int jump_label_cmp(const void *a, const void *b) return 0; } +static void jump_label_swap(void *a, void *b, int size) +{ + long delta = (unsigned long)a - (unsigned long)b; + struct jump_entry *jea = a; + struct jump_entry *jeb = b; + struct jump_entry tmp = *jea; + + jea->code = jeb->code - delta; + jea->target = jeb->target - delta; + jea->key = jeb->key - delta; + + jeb->code = tmp.code + delta; + jeb->target = tmp.target + delta; + jeb->key = tmp.key + delta; +} + static void jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) { unsigned long size; + void *swapfn = NULL; + + if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) + swapfn = jump_label_swap; size = (((unsigned long)stop - (unsigned long)start) / sizeof(struct jump_entry)); - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); } static void jump_label_update(struct static_key *key); -- cgit v1.2.3 From 19483677684b6ca01606f58503cb79cdfbbc7c72 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 18 Sep 2018 23:51:42 -0700 Subject: jump_label: Annotate entries that operate on __init code earlier Jump table entries are mostly read-only, with the exception of the init and module loader code that defuses entries that point into init code when the code being referred to is freed. For robustness, it would be better to move these entries into the ro_after_init section, but clearing the 'code' member of each jump table entry referring to init code at module load time races with the module_enable_ro() call that remaps the ro_after_init section read only, so we'd like to do it earlier. So given that whether such an entry refers to init code can be decided much earlier, we can pull this check forward. Since we may still need the code entry at this point, let's switch to setting a low bit in the 'key' member just like we do to annotate the default state of a jump table entry. Signed-off-by: Ard Biesheuvel Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Acked-by: Peter Zijlstra (Intel) Cc: linux-arm-kernel@lists.infradead.org Cc: linux-s390@vger.kernel.org Cc: Arnd Bergmann Cc: Heiko Carstens Cc: Will Deacon Cc: Catalin Marinas Cc: Steven Rostedt Cc: Martin Schwidefsky Cc: Jessica Yu Link: https://lkml.kernel.org/r/20180919065144.25010-8-ard.biesheuvel@linaro.org --- include/linux/jump_label.h | 11 ++++------- init/main.c | 1 - kernel/jump_label.c | 48 ++++++++++++++-------------------------------- 3 files changed, 18 insertions(+), 42 deletions(-) (limited to 'kernel') diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 2768a925bafa..5df6a621e464 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -141,7 +141,7 @@ static inline unsigned long jump_entry_target(const struct jump_entry *entry) static inline struct static_key *jump_entry_key(const struct jump_entry *entry) { - long offset = entry->key & ~1L; + long offset = entry->key & ~3L; return (struct static_key *)((unsigned long)&entry->key + offset); } @@ -160,7 +160,7 @@ static inline unsigned long jump_entry_target(const struct jump_entry *entry) static inline struct static_key *jump_entry_key(const struct jump_entry *entry) { - return (struct static_key *)((unsigned long)entry->key & ~1UL); + return (struct static_key *)((unsigned long)entry->key & ~3UL); } #endif @@ -172,12 +172,12 @@ static inline bool jump_entry_is_branch(const struct jump_entry *entry) static inline bool jump_entry_is_init(const struct jump_entry *entry) { - return entry->code == 0; + return (unsigned long)entry->key & 2UL; } static inline void jump_entry_set_init(struct jump_entry *entry) { - entry->code = 0; + entry->key |= 2; } #endif @@ -213,7 +213,6 @@ extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; extern void jump_label_init(void); -extern void jump_label_invalidate_initmem(void); extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, @@ -261,8 +260,6 @@ static __always_inline void jump_label_init(void) static_key_initialized = true; } -static inline void jump_label_invalidate_initmem(void) {} - static __always_inline bool static_key_false(struct static_key *key) { if (unlikely(static_key_count(key) > 0)) diff --git a/init/main.c b/init/main.c index 18f8f0140fa0..a664246450d1 100644 --- a/init/main.c +++ b/init/main.c @@ -1064,7 +1064,6 @@ static int __ref kernel_init(void *unused) /* need to finish all async __init code before freeing the memory */ async_synchronize_full(); ftrace_free_init_mem(); - jump_label_invalidate_initmem(); free_initmem(); mark_readonly(); diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 898a1d0c38dc..e8cf3ff3149c 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -373,14 +373,15 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry) static void __jump_label_update(struct static_key *key, struct jump_entry *entry, - struct jump_entry *stop) + struct jump_entry *stop, + bool init) { for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { /* * An entry->code of 0 indicates an entry which has been * disabled because it was in an init text area. */ - if (!jump_entry_is_init(entry)) { + if (init || !jump_entry_is_init(entry)) { if (kernel_text_address(jump_entry_code(entry))) arch_jump_label_transform(entry, jump_label_type(entry)); else @@ -420,6 +421,9 @@ void __init jump_label_init(void) if (jump_label_type(iter) == JUMP_LABEL_NOP) arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); + if (init_section_contains((void *)jump_entry_code(iter), 1)) + jump_entry_set_init(iter); + iterk = jump_entry_key(iter); if (iterk == key) continue; @@ -432,19 +436,6 @@ void __init jump_label_init(void) cpus_read_unlock(); } -/* Disable any jump label entries in __init/__exit code */ -void __init jump_label_invalidate_initmem(void) -{ - struct jump_entry *iter_start = __start___jump_table; - struct jump_entry *iter_stop = __stop___jump_table; - struct jump_entry *iter; - - for (iter = iter_start; iter < iter_stop; iter++) { - if (init_section_contains((void *)jump_entry_code(iter), 1)) - jump_entry_set_init(iter); - } -} - #ifdef CONFIG_MODULES static enum jump_label_type jump_label_init_type(struct jump_entry *entry) @@ -524,7 +515,8 @@ static void __jump_label_mod_update(struct static_key *key) stop = __stop___jump_table; else stop = m->jump_entries + m->num_jump_entries; - __jump_label_update(key, mod->entries, stop); + __jump_label_update(key, mod->entries, stop, + m->state == MODULE_STATE_COMING); } } @@ -570,6 +562,9 @@ static int jump_label_add_module(struct module *mod) for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk; + if (within_module_init(jump_entry_code(iter), mod)) + jump_entry_set_init(iter); + iterk = jump_entry_key(iter); if (iterk == key) continue; @@ -605,7 +600,7 @@ static int jump_label_add_module(struct module *mod) /* Only update if we've changed from our initial state */ if (jump_label_type(iter) != jump_label_init_type(iter)) - __jump_label_update(key, iter, iter_stop); + __jump_label_update(key, iter, iter_stop, true); } return 0; @@ -661,19 +656,6 @@ static void jump_label_del_module(struct module *mod) } } -/* Disable any jump label entries in module init code */ -static void jump_label_invalidate_module_init(struct module *mod) -{ - struct jump_entry *iter_start = mod->jump_entries; - struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; - struct jump_entry *iter; - - for (iter = iter_start; iter < iter_stop; iter++) { - if (within_module_init(jump_entry_code(iter), mod)) - jump_entry_set_init(iter); - } -} - static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) @@ -695,9 +677,6 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, case MODULE_STATE_GOING: jump_label_del_module(mod); break; - case MODULE_STATE_LIVE: - jump_label_invalidate_module_init(mod); - break; } jump_label_unlock(); @@ -767,7 +746,8 @@ static void jump_label_update(struct static_key *key) entry = static_key_entries(key); /* if there are no users, entry can be NULL */ if (entry) - __jump_label_update(key, entry, stop); + __jump_label_update(key, entry, stop, + system_state < SYSTEM_RUNNING); } #ifdef CONFIG_STATIC_KEYS_SELFTEST -- cgit v1.2.3 From e872267b8bcbb179e21ccc7118f258873d6e7a59 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 18 Sep 2018 23:51:43 -0700 Subject: jump_table: Move entries into ro_after_init region The __jump_table sections emitted into the core kernel and into each module consist of statically initialized references into other parts of the code, and with the exception of entries that point into init code, which are defused at post-init time, these data structures are never modified. So let's move them into the ro_after_init section, to prevent them from being corrupted inadvertently by buggy code, or deliberately by an attacker. Signed-off-by: Ard Biesheuvel Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Acked-by: Jessica Yu Acked-by: Peter Zijlstra (Intel) Cc: linux-arm-kernel@lists.infradead.org Cc: linux-s390@vger.kernel.org Cc: Arnd Bergmann Cc: Heiko Carstens Cc: Will Deacon Cc: Catalin Marinas Cc: Steven Rostedt Cc: Martin Schwidefsky Link: https://lkml.kernel.org/r/20180919065144.25010-9-ard.biesheuvel@linaro.org --- arch/s390/kernel/vmlinux.lds.S | 1 + include/asm-generic/vmlinux.lds.h | 11 +++++++---- kernel/module.c | 9 +++++++++ 3 files changed, 17 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index b43f8d33a369..4042bbf3f9ad 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -66,6 +66,7 @@ SECTIONS *(.data..ro_after_init) } EXCEPTION_TABLE(16) + JUMP_TABLE_DATA . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7b75ff6e2fce..f09ee3c544bc 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -253,10 +253,6 @@ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ - . = ALIGN(8); \ - __start___jump_table = .; \ - KEEP(*(__jump_table)) \ - __stop___jump_table = .; \ . = ALIGN(8); \ __start___verbose = .; \ KEEP(*(__verbose)) \ @@ -300,6 +296,12 @@ . = __start_init_task + THREAD_SIZE; \ __end_init_task = .; +#define JUMP_TABLE_DATA \ + . = ALIGN(8); \ + __start___jump_table = .; \ + KEEP(*(__jump_table)) \ + __stop___jump_table = .; + /* * Allow architectures to handle ro_after_init data on their * own by defining an empty RO_AFTER_INIT_DATA. @@ -308,6 +310,7 @@ #define RO_AFTER_INIT_DATA \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ + JUMP_TABLE_DATA \ __end_ro_after_init = .; #endif diff --git a/kernel/module.c b/kernel/module.c index 6746c85511fe..49a405891587 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3315,6 +3315,15 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. */ ndx = find_sec(info, ".data..ro_after_init"); + if (ndx) + info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; + /* + * Mark the __jump_table section as ro_after_init as well: these data + * structures are never modified, with the exception of entries that + * refer to code in the __init section, which are annotated as such + * at module load time. + */ + ndx = find_sec(info, "__jump_table"); if (ndx) info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; -- cgit v1.2.3 From 5bf7a60b8e70969f65c961d7e2c4eb40eb2c664d Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 27 Sep 2018 14:37:30 -0700 Subject: bpf: permit CGROUP_DEVICE programs accessing helper bpf_get_current_cgroup_id() Currently, helper bpf_get_current_cgroup_id() is not permitted for CGROUP_DEVICE type of programs. If the helper is used in such cases, the verifier will log the following error: 0: (bf) r6 = r1 1: (69) r7 = *(u16 *)(r6 +0) 2: (85) call bpf_get_current_cgroup_id#80 unknown func bpf_get_current_cgroup_id#80 The bpf_get_current_cgroup_id() is useful for CGROUP_DEVICE type of programs in order to customize action based on cgroup id. This patch added such a support. Cc: Roman Gushchin Signed-off-by: Yonghong Song Acked-by: Alexei Starovoitov Acked-by: Roman Gushchin Signed-off-by: Daniel Borkmann --- kernel/bpf/cgroup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 6a7d931bbc55..549f6fbcc461 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -677,6 +677,8 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_uid_gid_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; + case BPF_FUNC_get_current_cgroup_id: + return &bpf_get_current_cgroup_id_proto; case BPF_FUNC_trace_printk: if (capable(CAP_SYS_ADMIN)) return bpf_get_trace_printk_proto(); -- cgit v1.2.3 From 4288ea006c73e37c2a4f60dfaef20dd167b8df31 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 28 Sep 2018 14:33:21 +0100 Subject: bpf: harden flags check in cgroup_storage_update_elem() cgroup_storage_update_elem() shouldn't accept any flags argument values except BPF_ANY and BPF_EXIST to guarantee the backward compatibility, had a new flag value been added. Fixes: de9cbbaadba5 ("bpf: introduce cgroup storage maps") Signed-off-by: Roman Gushchin Reported-by: Daniel Borkmann Cc: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/local_storage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 22ad967d1e5f..94126cbffc88 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -129,7 +129,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, struct bpf_cgroup_storage *storage; struct bpf_storage_buffer *new; - if (flags & BPF_NOEXIST) + if (flags != BPF_ANY && flags != BPF_EXIST) return -EINVAL; storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, -- cgit v1.2.3 From dc6253108f0fbff4a634055d5b8a91958ec2af81 Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Fri, 28 Sep 2018 12:03:28 -0400 Subject: tick/broadcast: Remove redundant check tick_device_is_functional() is called early in tick_broadcast_control(), so no need to call it again later. Signed-off-by: Peng Hao Signed-off-by: Thomas Gleixner Cc: fweisbec@gmail.com Link: https://lkml.kernel.org/r/1538150608-2599-1-git-send-email-penghao122@sina.com.cn --- kernel/time/tick-broadcast.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index aa2094d5dd27..be0aac2b4300 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -400,8 +400,6 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) if (tick_broadcast_forced) break; cpumask_clear_cpu(cpu, tick_broadcast_on); - if (!tick_device_is_functional(dev)) - break; if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) -- cgit v1.2.3 From befb1b3c2703897c5b8ffb0044dc5d0e5f27c5d7 Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Wed, 19 Sep 2018 10:29:06 -0700 Subject: perf/core: Add sanity check to deal with pinned event failure It is possible that a failure can occur during the scheduling of a pinned event. The initial portion of perf_event_read_local() contains the various error checks an event should pass before it can be considered valid. Ensure that the potential scheduling failure of a pinned event is checked for and have a credible error. Suggested-by: Peter Zijlstra Signed-off-by: Reinette Chatre Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: fenghua.yu@intel.com Cc: tony.luck@intel.com Cc: acme@kernel.org Cc: gavin.hindman@intel.com Cc: jithu.joseph@intel.com Cc: dave.hansen@intel.com Cc: hpa@zytor.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/6486385d1f30336e9973b24c8c65f5079543d3d3.1537377064.git.reinette.chatre@intel.com --- kernel/events/core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index c80549bf82c6..dcb093e7b377 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3935,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value, goto out; } + /* If this is a pinned event it must be running on this CPU */ + if (event->attr.pinned && event->oncpu != smp_processor_id()) { + ret = -EBUSY; + goto out; + } + /* * If the event is currently on this CPU, its either a per-task event, * or local to this CPU. Furthermore it means its ACTIVE (otherwise -- cgit v1.2.3 From 513145ea66af95f1a5c744d7b5a4f4a97625e669 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Oct 2018 11:05:21 +0100 Subject: genirq/debugfs: Reset domain debugfs_file on removal of the debugfs file When removing a debugfs file for a given irq domain, we fail to clear the corresponding field, meaning that the corresponding domain won't be created again if we need to do so. It turns out that this is exactly what irq_domain_update_bus_token does (delete old file, update domain name, recreate file). This doesn't have any impact other than making debug more difficult, but we do value ease of debugging... So clear the debugfs_file field. Signed-off-by: Marc Zyngier Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20181001100522.180054-2-marc.zyngier@arm.com --- kernel/irq/irqdomain.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 5d9fc01b60a6..95a0acbdd4e6 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -1741,6 +1741,7 @@ static void debugfs_add_domain_dir(struct irq_domain *d) static void debugfs_remove_domain_dir(struct irq_domain *d) { debugfs_remove(d->debugfs_file); + d->debugfs_file = NULL; } void __init irq_domain_debugfs_init(struct dentry *root) -- cgit v1.2.3 From 94967b55ebf3b603f2fe750ecedd896042585a1c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Oct 2018 11:05:22 +0100 Subject: genirq/debugfs: Reinstate full OF path for domain name On a DT based system, we use the of_node full name to name the corresponding irq domain. We expect that name to be unique, so so that domains with the same base name won't clash (this happens on multi-node topologies, for example). Since a7e4cfb0a7ca ("of/fdt: only store the device node basename in full_name"), of_node_full_name() lies and only returns the basename. This breaks the above requirement, and we end-up with only a subset of the domains in /sys/kernel/debug/irq/domains. Let's reinstate the feature by using the fancy new %pOF format specifier, which happens to do the right thing. Fixes: a7e4cfb0a7ca ("of/fdt: only store the device node basename in full_name") Signed-off-by: Marc Zyngier Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20181001100522.180054-3-marc.zyngier@arm.com --- kernel/irq/irqdomain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 95a0acbdd4e6..3b30a4aeb0db 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -183,7 +183,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, * unhappy about. Replace them with ':', which does * the trick and is not as offensive as '\'... */ - name = kstrdup(of_node_full_name(of_node), GFP_KERNEL); + name = kasprintf(GFP_KERNEL, "%pOF", of_node); if (!name) { kfree(domain); return NULL; -- cgit v1.2.3 From 22839869f21ab3850fbbac9b425ccc4c0023926f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 5 Sep 2018 15:34:42 +0100 Subject: signal: Introduce COMPAT_SIGMINSTKSZ for use in compat_sys_sigaltstack The sigaltstack(2) system call fails with -ENOMEM if the new alternative signal stack is found to be smaller than SIGMINSTKSZ. On architectures such as arm64, where the native value for SIGMINSTKSZ is larger than the compat value, this can result in an unexpected error being reported to a compat task. See, for example: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=904385 This patch fixes the problem by extending do_sigaltstack to take the minimum signal stack size as an additional parameter, allowing the native and compat system call entry code to pass in their respective values. COMPAT_SIGMINSTKSZ is just defined as SIGMINSTKSZ if it has not been defined by the architecture. Cc: Arnd Bergmann Cc: Dominik Brodowski Cc: "Eric W. Biederman" Cc: Andrew Morton Cc: Al Viro Cc: Oleg Nesterov Reported-by: Steve McIntyre Tested-by: Steve McIntyre <93sam@debian.org> Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- include/linux/compat.h | 3 +++ kernel/signal.c | 14 +++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/include/linux/compat.h b/include/linux/compat.h index 1a3c4f37e908..de0c13bdcd2c 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -103,6 +103,9 @@ typedef struct compat_sigaltstack { compat_size_t ss_size; } compat_stack_t; #endif +#ifndef COMPAT_MINSIGSTKSZ +#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ +#endif #define compat_jiffies_to_clock_t(x) \ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) diff --git a/kernel/signal.c b/kernel/signal.c index 5843c541fda9..e4aad0e90882 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3460,7 +3460,8 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) } static int -do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp) +do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, + size_t min_ss_size) { struct task_struct *t = current; @@ -3490,7 +3491,7 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp) ss_size = 0; ss_sp = NULL; } else { - if (unlikely(ss_size < MINSIGSTKSZ)) + if (unlikely(ss_size < min_ss_size)) return -ENOMEM; } @@ -3508,7 +3509,8 @@ SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) if (uss && copy_from_user(&new, uss, sizeof(stack_t))) return -EFAULT; err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, - current_user_stack_pointer()); + current_user_stack_pointer(), + MINSIGSTKSZ); if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) err = -EFAULT; return err; @@ -3519,7 +3521,8 @@ int restore_altstack(const stack_t __user *uss) stack_t new; if (copy_from_user(&new, uss, sizeof(stack_t))) return -EFAULT; - (void)do_sigaltstack(&new, NULL, current_user_stack_pointer()); + (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), + MINSIGSTKSZ); /* squash all but EFAULT for now */ return 0; } @@ -3553,7 +3556,8 @@ static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, uss.ss_size = uss32.ss_size; } ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, - compat_user_stack_pointer()); + compat_user_stack_pointer(), + COMPAT_MINSIGSTKSZ); if (ret >= 0 && uoss_ptr) { compat_stack_t old; memset(&old, 0, sizeof(old)); -- cgit v1.2.3 From 8bad74f9840f87661f20ced3dc80c84ab4fd55a1 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 28 Sep 2018 14:45:36 +0000 Subject: bpf: extend cgroup bpf core to allow multiple cgroup storage types In order to introduce per-cpu cgroup storage, let's generalize bpf cgroup core to support multiple cgroup storage types. Potentially, per-node cgroup storage can be added later. This commit is mostly a formal change that replaces cgroup_storage pointer with a array of cgroup_storage pointers. It doesn't actually introduce a new storage type, it will be done later. Each bpf program is now able to have one cgroup storage of each type. Signed-off-by: Roman Gushchin Acked-by: Song Liu Cc: Daniel Borkmann Cc: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 38 +++++++++++++++++------- include/linux/bpf.h | 11 +++++-- kernel/bpf/cgroup.c | 74 ++++++++++++++++++++++++++++++++-------------- kernel/bpf/helpers.c | 15 ++++++---- kernel/bpf/local_storage.c | 18 ++++++----- kernel/bpf/syscall.c | 9 ++++-- kernel/bpf/verifier.c | 8 +++-- net/bpf/test_run.c | 20 +++++++++---- 8 files changed, 136 insertions(+), 57 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index f91b0f8ff3a9..e9871b012dac 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -2,6 +2,7 @@ #ifndef _BPF_CGROUP_H #define _BPF_CGROUP_H +#include #include #include #include @@ -22,7 +23,10 @@ struct bpf_cgroup_storage; extern struct static_key_false cgroup_bpf_enabled_key; #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) -DECLARE_PER_CPU(void*, bpf_cgroup_storage); +DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); + +#define for_each_cgroup_storage_type(stype) \ + for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) struct bpf_cgroup_storage_map; @@ -43,7 +47,7 @@ struct bpf_cgroup_storage { struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; - struct bpf_cgroup_storage *storage; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; struct bpf_prog_array; @@ -101,18 +105,29 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum bpf_attach_type type); -static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) +static inline enum bpf_cgroup_storage_type cgroup_storage_type( + struct bpf_map *map) { + return BPF_CGROUP_STORAGE_SHARED; +} + +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage + *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) +{ + enum bpf_cgroup_storage_type stype; struct bpf_storage_buffer *buf; - if (!storage) - return; + for_each_cgroup_storage_type(stype) { + if (!storage[stype]) + continue; - buf = READ_ONCE(storage->buf); - this_cpu_write(bpf_cgroup_storage, &buf->data[0]); + buf = READ_ONCE(storage[stype]->buf); + this_cpu_write(bpf_cgroup_storage[stype], &buf->data[0]); + } } -struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, + enum bpf_cgroup_storage_type stype); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, struct cgroup *cgroup, @@ -265,13 +280,14 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } -static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {} +static inline void bpf_cgroup_storage_set( + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map) { return 0; } static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( - struct bpf_prog *prog) { return 0; } + struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; } static inline void bpf_cgroup_storage_free( struct bpf_cgroup_storage *storage) {} @@ -293,6 +309,8 @@ static inline void bpf_cgroup_storage_free( #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) +#define for_each_cgroup_storage_type(stype) for (; false; ) + #endif /* CONFIG_CGROUP_BPF */ #endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 988a00797bcd..b457fbe7b70b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -272,6 +272,13 @@ struct bpf_prog_offload { u32 jited_len; }; +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED, + __BPF_CGROUP_STORAGE_MAX +}; + +#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX + struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; @@ -289,7 +296,7 @@ struct bpf_prog_aux { struct bpf_prog *prog; struct user_struct *user; u64 load_time; /* ns since boottime */ - struct bpf_map *cgroup_storage; + struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; char name[BPF_OBJ_NAME_LEN]; #ifdef CONFIG_SECURITY void *security; @@ -358,7 +365,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, */ struct bpf_prog_array_item { struct bpf_prog *prog; - struct bpf_cgroup_storage *cgroup_storage; + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; struct bpf_prog_array { diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 549f6fbcc461..00f6ed2e4f9a 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -25,6 +25,7 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key); */ void cgroup_bpf_put(struct cgroup *cgrp) { + enum bpf_cgroup_storage_type stype; unsigned int type; for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { @@ -34,8 +35,10 @@ void cgroup_bpf_put(struct cgroup *cgrp) list_for_each_entry_safe(pl, tmp, progs, node) { list_del(&pl->node); bpf_prog_put(pl->prog); - bpf_cgroup_storage_unlink(pl->storage); - bpf_cgroup_storage_free(pl->storage); + for_each_cgroup_storage_type(stype) { + bpf_cgroup_storage_unlink(pl->storage[stype]); + bpf_cgroup_storage_free(pl->storage[stype]); + } kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key); } @@ -97,6 +100,7 @@ static int compute_effective_progs(struct cgroup *cgrp, enum bpf_attach_type type, struct bpf_prog_array __rcu **array) { + enum bpf_cgroup_storage_type stype; struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct cgroup *p = cgrp; @@ -125,7 +129,9 @@ static int compute_effective_progs(struct cgroup *cgrp, continue; progs->items[cnt].prog = pl->prog; - progs->items[cnt].cgroup_storage = pl->storage; + for_each_cgroup_storage_type(stype) + progs->items[cnt].cgroup_storage[stype] = + pl->storage[stype]; cnt++; } } while ((p = cgroup_parent(p))); @@ -232,7 +238,9 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, { struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; - struct bpf_cgroup_storage *storage, *old_storage = NULL; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE], + *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL}; + enum bpf_cgroup_storage_type stype; struct bpf_prog_list *pl; bool pl_was_allocated; int err; @@ -254,34 +262,44 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) return -E2BIG; - storage = bpf_cgroup_storage_alloc(prog); - if (IS_ERR(storage)) - return -ENOMEM; + for_each_cgroup_storage_type(stype) { + storage[stype] = bpf_cgroup_storage_alloc(prog, stype); + if (IS_ERR(storage[stype])) { + storage[stype] = NULL; + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); + return -ENOMEM; + } + } if (flags & BPF_F_ALLOW_MULTI) { list_for_each_entry(pl, progs, node) { if (pl->prog == prog) { /* disallow attaching the same prog twice */ - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return -EINVAL; } } pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return -ENOMEM; } pl_was_allocated = true; pl->prog = prog; - pl->storage = storage; + for_each_cgroup_storage_type(stype) + pl->storage[stype] = storage[stype]; list_add_tail(&pl->node, progs); } else { if (list_empty(progs)) { pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return -ENOMEM; } pl_was_allocated = true; @@ -289,12 +307,15 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, } else { pl = list_first_entry(progs, typeof(*pl), node); old_prog = pl->prog; - old_storage = pl->storage; - bpf_cgroup_storage_unlink(old_storage); + for_each_cgroup_storage_type(stype) { + old_storage[stype] = pl->storage[stype]; + bpf_cgroup_storage_unlink(old_storage[stype]); + } pl_was_allocated = false; } pl->prog = prog; - pl->storage = storage; + for_each_cgroup_storage_type(stype) + pl->storage[stype] = storage[stype]; } cgrp->bpf.flags[type] = flags; @@ -304,21 +325,27 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, goto cleanup; static_branch_inc(&cgroup_bpf_enabled_key); - if (old_storage) - bpf_cgroup_storage_free(old_storage); + for_each_cgroup_storage_type(stype) { + if (!old_storage[stype]) + continue; + bpf_cgroup_storage_free(old_storage[stype]); + } if (old_prog) { bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key); } - bpf_cgroup_storage_link(storage, cgrp, type); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_link(storage[stype], cgrp, type); return 0; cleanup: /* and cleanup the prog list */ pl->prog = old_prog; - bpf_cgroup_storage_free(pl->storage); - pl->storage = old_storage; - bpf_cgroup_storage_link(old_storage, cgrp, type); + for_each_cgroup_storage_type(stype) { + bpf_cgroup_storage_free(pl->storage[stype]); + pl->storage[stype] = old_storage[stype]; + bpf_cgroup_storage_link(old_storage[stype], cgrp, type); + } if (pl_was_allocated) { list_del(&pl->node); kfree(pl); @@ -339,6 +366,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 unused_flags) { struct list_head *progs = &cgrp->bpf.progs[type]; + enum bpf_cgroup_storage_type stype; u32 flags = cgrp->bpf.flags[type]; struct bpf_prog *old_prog = NULL; struct bpf_prog_list *pl; @@ -385,8 +413,10 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, /* now can actually delete it from this cgroup list */ list_del(&pl->node); - bpf_cgroup_storage_unlink(pl->storage); - bpf_cgroup_storage_free(pl->storage); + for_each_cgroup_storage_type(stype) { + bpf_cgroup_storage_unlink(pl->storage[stype]); + bpf_cgroup_storage_free(pl->storage[stype]); + } kfree(pl); if (list_empty(progs)) /* last program was detached, reset flags to zero */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 1991466b8327..9070b2ace6aa 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -194,16 +194,18 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { .ret_type = RET_INTEGER, }; -DECLARE_PER_CPU(void*, bpf_cgroup_storage); +#ifdef CONFIG_CGROUP_BPF +DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) { - /* map and flags arguments are not used now, - * but provide an ability to extend the API - * for other types of local storages. - * verifier checks that their values are correct. + /* flags argument is not used now, + * but provides an ability to extend the API. + * verifier checks that its value is correct. */ - return (unsigned long) this_cpu_read(bpf_cgroup_storage); + enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); + + return (unsigned long) this_cpu_read(bpf_cgroup_storage[stype]); } const struct bpf_func_proto bpf_get_local_storage_proto = { @@ -214,3 +216,4 @@ const struct bpf_func_proto bpf_get_local_storage_proto = { .arg2_type = ARG_ANYTHING, }; #endif +#endif diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 22ad967d1e5f..0bd9f19fc557 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -7,7 +7,7 @@ #include #include -DEFINE_PER_CPU(void*, bpf_cgroup_storage); +DEFINE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #ifdef CONFIG_CGROUP_BPF @@ -251,6 +251,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = { int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) { + enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map); int ret = -EBUSY; @@ -258,11 +259,12 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) if (map->prog && map->prog != prog) goto unlock; - if (prog->aux->cgroup_storage && prog->aux->cgroup_storage != _map) + if (prog->aux->cgroup_storage[stype] && + prog->aux->cgroup_storage[stype] != _map) goto unlock; map->prog = prog; - prog->aux->cgroup_storage = _map; + prog->aux->cgroup_storage[stype] = _map; ret = 0; unlock: spin_unlock_bh(&map->lock); @@ -272,24 +274,26 @@ unlock: void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) { + enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); struct bpf_cgroup_storage_map *map = map_to_storage(_map); spin_lock_bh(&map->lock); if (map->prog == prog) { - WARN_ON(prog->aux->cgroup_storage != _map); + WARN_ON(prog->aux->cgroup_storage[stype] != _map); map->prog = NULL; - prog->aux->cgroup_storage = NULL; + prog->aux->cgroup_storage[stype] = NULL; } spin_unlock_bh(&map->lock); } -struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog) +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, + enum bpf_cgroup_storage_type stype) { struct bpf_cgroup_storage *storage; struct bpf_map *map; u32 pages; - map = prog->aux->cgroup_storage; + map = prog->aux->cgroup_storage[stype]; if (!map) return NULL; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b3c2d09bcf7a..8c91d2b41b1e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -988,10 +988,15 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) /* drop refcnt on maps used by eBPF program and free auxilary data */ static void free_used_maps(struct bpf_prog_aux *aux) { + enum bpf_cgroup_storage_type stype; int i; - if (aux->cgroup_storage) - bpf_cgroup_storage_release(aux->prog, aux->cgroup_storage); + for_each_cgroup_storage_type(stype) { + if (!aux->cgroup_storage[stype]) + continue; + bpf_cgroup_storage_release(aux->prog, + aux->cgroup_storage[stype]); + } for (i = 0; i < aux->used_map_cnt; i++) bpf_map_put(aux->used_maps[i]); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e986518d7bc3..e90899df585d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5171,11 +5171,15 @@ next_insn: /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { + enum bpf_cgroup_storage_type stype; int i; - if (env->prog->aux->cgroup_storage) + for_each_cgroup_storage_type(stype) { + if (!env->prog->aux->cgroup_storage[stype]) + continue; bpf_cgroup_storage_release(env->prog, - env->prog->aux->cgroup_storage); + env->prog->aux->cgroup_storage[stype]); + } for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f4078830ea50..0c423b8cd75c 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -12,7 +12,7 @@ #include static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, - struct bpf_cgroup_storage *storage) + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { u32 ret; @@ -28,13 +28,20 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) { - struct bpf_cgroup_storage *storage = NULL; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; + enum bpf_cgroup_storage_type stype; u64 time_start, time_spent = 0; u32 ret = 0, i; - storage = bpf_cgroup_storage_alloc(prog); - if (IS_ERR(storage)) - return PTR_ERR(storage); + for_each_cgroup_storage_type(stype) { + storage[stype] = bpf_cgroup_storage_alloc(prog, stype); + if (IS_ERR(storage[stype])) { + storage[stype] = NULL; + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); + return -ENOMEM; + } + } if (!repeat) repeat = 1; @@ -53,7 +60,8 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) do_div(time_spent, repeat); *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; - bpf_cgroup_storage_free(storage); + for_each_cgroup_storage_type(stype) + bpf_cgroup_storage_free(storage[stype]); return ret; } -- cgit v1.2.3 From f294b37ec7b24a574884cd157497a3748081c0f0 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 28 Sep 2018 14:45:40 +0000 Subject: bpf: rework cgroup storage pointer passing To simplify the following introduction of per-cpu cgroup storage, let's rework a bit a mechanism of passing a pointer to a cgroup storage into the bpf_get_local_storage(). Let's save a pointer to the corresponding bpf_cgroup_storage structure, instead of a pointer to the actual buffer. It will help us to handle per-cpu storage later, which has a different way of accessing to the actual data. Signed-off-by: Roman Gushchin Acked-by: Song Liu Cc: Daniel Borkmann Cc: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 13 ++++--------- kernel/bpf/helpers.c | 8 ++++++-- kernel/bpf/local_storage.c | 3 ++- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index e9871b012dac..7e0c9a1d48b7 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -23,7 +23,8 @@ struct bpf_cgroup_storage; extern struct static_key_false cgroup_bpf_enabled_key; #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) -DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); +DECLARE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #define for_each_cgroup_storage_type(stype) \ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) @@ -115,15 +116,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { enum bpf_cgroup_storage_type stype; - struct bpf_storage_buffer *buf; - - for_each_cgroup_storage_type(stype) { - if (!storage[stype]) - continue; - buf = READ_ONCE(storage[stype]->buf); - this_cpu_write(bpf_cgroup_storage[stype], &buf->data[0]); - } + for_each_cgroup_storage_type(stype) + this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); } struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 9070b2ace6aa..e42f8789b7ea 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -195,7 +195,8 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { }; #ifdef CONFIG_CGROUP_BPF -DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); +DECLARE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) { @@ -204,8 +205,11 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) * verifier checks that its value is correct. */ enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); + struct bpf_cgroup_storage *storage; - return (unsigned long) this_cpu_read(bpf_cgroup_storage[stype]); + storage = this_cpu_read(bpf_cgroup_storage[stype]); + + return (unsigned long)&READ_ONCE(storage->buf)->data[0]; } const struct bpf_func_proto bpf_get_local_storage_proto = { diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 0bd9f19fc557..6742292fb39e 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -7,7 +7,8 @@ #include #include -DEFINE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); +DEFINE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #ifdef CONFIG_CGROUP_BPF -- cgit v1.2.3 From b741f1630346defcbc8cc60f1a2bdae8b3b0036f Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 28 Sep 2018 14:45:43 +0000 Subject: bpf: introduce per-cpu cgroup local storage This commit introduced per-cpu cgroup local storage. Per-cpu cgroup local storage is very similar to simple cgroup storage (let's call it shared), except all the data is per-cpu. The main goal of per-cpu variant is to implement super fast counters (e.g. packet counters), which don't require neither lookups, neither atomic operations. >From userspace's point of view, accessing a per-cpu cgroup storage is similar to other per-cpu map types (e.g. per-cpu hashmaps and arrays). Writing to a per-cpu cgroup storage is not atomic, but is performed by copying longs, so some minimal atomicity is here, exactly as with other per-cpu maps. Signed-off-by: Roman Gushchin Cc: Daniel Borkmann Cc: Alexei Starovoitov Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/bpf-cgroup.h | 20 +++++- include/linux/bpf.h | 1 + include/linux/bpf_types.h | 1 + include/uapi/linux/bpf.h | 1 + kernel/bpf/helpers.c | 8 ++- kernel/bpf/local_storage.c | 150 +++++++++++++++++++++++++++++++++++++++------ kernel/bpf/syscall.c | 11 +++- kernel/bpf/verifier.c | 15 +++-- 8 files changed, 179 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 7e0c9a1d48b7..588dd5f0bd85 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -37,7 +37,10 @@ struct bpf_storage_buffer { }; struct bpf_cgroup_storage { - struct bpf_storage_buffer *buf; + union { + struct bpf_storage_buffer *buf; + void __percpu *percpu_buf; + }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; struct list_head list; @@ -109,6 +112,9 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, static inline enum bpf_cgroup_storage_type cgroup_storage_type( struct bpf_map *map) { + if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) + return BPF_CGROUP_STORAGE_PERCPU; + return BPF_CGROUP_STORAGE_SHARED; } @@ -131,6 +137,10 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); +int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, + void *value, u64 flags); + /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ @@ -285,6 +295,14 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; } static inline void bpf_cgroup_storage_free( struct bpf_cgroup_storage *storage) {} +static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, + void *value) { + return 0; +} +static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, + void *key, void *value, u64 flags) { + return 0; +} #define cgroup_bpf_enabled (0) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b457fbe7b70b..018299a595c8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -274,6 +274,7 @@ struct bpf_prog_offload { enum bpf_cgroup_storage_type { BPF_CGROUP_STORAGE_SHARED, + BPF_CGROUP_STORAGE_PERCPU, __BPF_CGROUP_STORAGE_MAX }; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index c9bd6fb765b0..5432f4c9f50e 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -43,6 +43,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) #endif #ifdef CONFIG_CGROUP_BPF BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index aa5ccd2385ed..e2070d819e04 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -127,6 +127,7 @@ enum bpf_map_type { BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, }; enum bpf_prog_type { diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e42f8789b7ea..6502115e8f55 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -206,10 +206,16 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) */ enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); struct bpf_cgroup_storage *storage; + void *ptr; storage = this_cpu_read(bpf_cgroup_storage[stype]); - return (unsigned long)&READ_ONCE(storage->buf)->data[0]; + if (stype == BPF_CGROUP_STORAGE_SHARED) + ptr = &READ_ONCE(storage->buf)->data[0]; + else + ptr = this_cpu_ptr(storage->percpu_buf); + + return (unsigned long)ptr; } const struct bpf_func_proto bpf_get_local_storage_proto = { diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 6742292fb39e..944eb297465f 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -152,6 +152,71 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, return 0; } +int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, + void *value) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct bpf_cgroup_storage_key *key = _key; + struct bpf_cgroup_storage *storage; + int cpu, off = 0; + u32 size; + + rcu_read_lock(); + storage = cgroup_storage_lookup(map, key, false); + if (!storage) { + rcu_read_unlock(); + return -ENOENT; + } + + /* per_cpu areas are zero-filled and bpf programs can only + * access 'value_size' of them, so copying rounded areas + * will not leak any kernel data + */ + size = round_up(_map->value_size, 8); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(value + off, + per_cpu_ptr(storage->percpu_buf, cpu), size); + off += size; + } + rcu_read_unlock(); + return 0; +} + +int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, + void *value, u64 map_flags) +{ + struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct bpf_cgroup_storage_key *key = _key; + struct bpf_cgroup_storage *storage; + int cpu, off = 0; + u32 size; + + if (map_flags != BPF_ANY && map_flags != BPF_EXIST) + return -EINVAL; + + rcu_read_lock(); + storage = cgroup_storage_lookup(map, key, false); + if (!storage) { + rcu_read_unlock(); + return -ENOENT; + } + + /* the user space will provide round_up(value_size, 8) bytes that + * will be copied into per-cpu area. bpf programs can only access + * value_size of it. During lookup the same extra bytes will be + * returned or zeros which were zero-filled by percpu_alloc, + * so no kernel data leaks possible + */ + size = round_up(_map->value_size, 8); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), + value + off, size); + off += size; + } + rcu_read_unlock(); + return 0; +} + static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, void *_next_key) { @@ -287,60 +352,105 @@ void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) spin_unlock_bh(&map->lock); } +static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) +{ + size_t size; + + if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) { + size = sizeof(struct bpf_storage_buffer) + map->value_size; + *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, + PAGE_SIZE) >> PAGE_SHIFT; + } else { + size = map->value_size; + *pages = round_up(round_up(size, 8) * num_possible_cpus(), + PAGE_SIZE) >> PAGE_SHIFT; + } + + return size; +} + struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { struct bpf_cgroup_storage *storage; struct bpf_map *map; + gfp_t flags; + size_t size; u32 pages; map = prog->aux->cgroup_storage[stype]; if (!map) return NULL; - pages = round_up(sizeof(struct bpf_cgroup_storage) + - sizeof(struct bpf_storage_buffer) + - map->value_size, PAGE_SIZE) >> PAGE_SHIFT; + size = bpf_cgroup_storage_calculate_size(map, &pages); + if (bpf_map_charge_memlock(map, pages)) return ERR_PTR(-EPERM); storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), __GFP_ZERO | GFP_USER, map->numa_node); - if (!storage) { - bpf_map_uncharge_memlock(map, pages); - return ERR_PTR(-ENOMEM); - } + if (!storage) + goto enomem; - storage->buf = kmalloc_node(sizeof(struct bpf_storage_buffer) + - map->value_size, __GFP_ZERO | GFP_USER, - map->numa_node); - if (!storage->buf) { - bpf_map_uncharge_memlock(map, pages); - kfree(storage); - return ERR_PTR(-ENOMEM); + flags = __GFP_ZERO | GFP_USER; + + if (stype == BPF_CGROUP_STORAGE_SHARED) { + storage->buf = kmalloc_node(size, flags, map->numa_node); + if (!storage->buf) + goto enomem; + } else { + storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); + if (!storage->percpu_buf) + goto enomem; } storage->map = (struct bpf_cgroup_storage_map *)map; return storage; + +enomem: + bpf_map_uncharge_memlock(map, pages); + kfree(storage); + return ERR_PTR(-ENOMEM); +} + +static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu) +{ + struct bpf_cgroup_storage *storage = + container_of(rcu, struct bpf_cgroup_storage, rcu); + + kfree(storage->buf); + kfree(storage); +} + +static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu) +{ + struct bpf_cgroup_storage *storage = + container_of(rcu, struct bpf_cgroup_storage, rcu); + + free_percpu(storage->percpu_buf); + kfree(storage); } void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage) { - u32 pages; + enum bpf_cgroup_storage_type stype; struct bpf_map *map; + u32 pages; if (!storage) return; map = &storage->map->map; - pages = round_up(sizeof(struct bpf_cgroup_storage) + - sizeof(struct bpf_storage_buffer) + - map->value_size, PAGE_SIZE) >> PAGE_SHIFT; + + bpf_cgroup_storage_calculate_size(map, &pages); bpf_map_uncharge_memlock(map, pages); - kfree_rcu(storage->buf, rcu); - kfree_rcu(storage, rcu); + stype = cgroup_storage_type(map); + if (stype == BPF_CGROUP_STORAGE_SHARED) + call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu); + else + call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu); } void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8c91d2b41b1e..5742df21598c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -686,7 +686,8 @@ static int map_lookup_elem(union bpf_attr *attr) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) + map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) value_size = round_up(map->value_size, 8) * num_possible_cpus(); else if (IS_FD_MAP(map)) value_size = sizeof(u32); @@ -705,6 +706,8 @@ static int map_lookup_elem(union bpf_attr *attr) err = bpf_percpu_hash_copy(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { err = bpf_percpu_array_copy(map, key, value); + } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { + err = bpf_percpu_cgroup_storage_copy(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { err = bpf_stackmap_copy(map, key, value); } else if (IS_FD_ARRAY(map)) { @@ -774,7 +777,8 @@ static int map_update_elem(union bpf_attr *attr) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) + map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) value_size = round_up(map->value_size, 8) * num_possible_cpus(); else value_size = map->value_size; @@ -809,6 +813,9 @@ static int map_update_elem(union bpf_attr *attr) err = bpf_percpu_hash_update(map, key, value, attr->flags); } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { err = bpf_percpu_array_update(map, key, value, attr->flags); + } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { + err = bpf_percpu_cgroup_storage_update(map, key, value, + attr->flags); } else if (IS_FD_ARRAY(map)) { rcu_read_lock(); err = bpf_fd_array_map_update_elem(map, f.file, key, value, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e90899df585d..a8cc83a970d1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2074,6 +2074,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, goto error; break; case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: if (func_id != BPF_FUNC_get_local_storage) goto error; break; @@ -2164,7 +2165,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, goto error; break; case BPF_FUNC_get_local_storage: - if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE) + if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && + map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) goto error; break; case BPF_FUNC_sk_select_reuseport: @@ -5049,6 +5051,12 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, return 0; } +static bool bpf_map_is_cgroup_storage(struct bpf_map *map) +{ + return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || + map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); +} + /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ @@ -5139,10 +5147,9 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) } env->used_maps[env->used_map_cnt++] = map; - if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE && + if (bpf_map_is_cgroup_storage(map) && bpf_cgroup_storage_assign(env->prog, map)) { - verbose(env, - "only one cgroup storage is allowed\n"); + verbose(env, "only one cgroup storage of each type is allowed\n"); fdput(f); return -EBUSY; } -- cgit v1.2.3 From c6fdcd6e0cc4dc316e3eb261025fb0abd69540b9 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 28 Sep 2018 14:45:46 +0000 Subject: bpf: don't allow create maps of per-cpu cgroup local storages Explicitly forbid creating map of per-cpu cgroup local storages. This behavior matches the behavior of shared cgroup storages. Signed-off-by: Roman Gushchin Acked-by: Song Liu Cc: Daniel Borkmann Cc: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/map_in_map.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index 3bfbf4464416..99d243e1ad6e 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -24,7 +24,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) * in the verifier is not enough. */ if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY || - inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE) { + inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || + inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { fdput(f); return ERR_PTR(-ENOTSUPP); } -- cgit v1.2.3 From a20bb058375147cb639c7aa17ef86ad68b32d847 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Sep 2018 13:26:13 +0200 Subject: dma-direct: add an explicit dma_direct_get_required_mask This is somewhat modelled after the powerpc version, and differs from the legacy fallback in use fls64 instead of pointlessly splitting up the address into low and high dwords and in that it takes (__)phys_to_dma into account. Signed-off-by: Christoph Hellwig Acked-by: Benjamin Herrenschmidt Reviewed-by: Robin Murphy --- include/linux/dma-direct.h | 1 + kernel/dma/direct.c | 22 +++++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 86a59ba5a7f3..b79496d8c75b 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -55,6 +55,7 @@ static inline void dma_mark_clean(void *addr, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ +u64 dma_direct_get_required_mask(struct device *dev); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index c954f0a6dc62..f32b33cfa331 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -4,6 +4,7 @@ * * DMA operations that map physical memory directly without using an IOMMU. */ +#include /* for max_pfn */ #include #include #include @@ -53,11 +54,25 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return true; } +static inline dma_addr_t phys_to_dma_direct(struct device *dev, + phys_addr_t phys) +{ + if (force_dma_unencrypted()) + return __phys_to_dma(dev, phys); + return phys_to_dma(dev, phys); +} + +u64 dma_direct_get_required_mask(struct device *dev) +{ + u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); + + return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; +} + static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { - dma_addr_t addr = force_dma_unencrypted() ? - __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); - return addr + size - 1 <= dev->coherent_dma_mask; + return phys_to_dma_direct(dev, phys) + size - 1 <= + dev->coherent_dma_mask; } void *dma_direct_alloc_pages(struct device *dev, size_t size, @@ -296,6 +311,7 @@ const struct dma_map_ops dma_direct_ops = { .unmap_page = dma_direct_unmap_page, .unmap_sg = dma_direct_unmap_sg, #endif + .get_required_mask = dma_direct_get_required_mask, .dma_supported = dma_direct_supported, .mapping_error = dma_direct_mapping_error, .cache_sync = arch_dma_cache_sync, -- cgit v1.2.3 From 7d21ee4c719f00896767ce19c4c01a56374c2ced Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 6 Sep 2018 20:30:54 -0400 Subject: dma-direct: refine dma_direct_alloc zone selection We need to take the DMA offset and encryption bit into account when selecting a zone. User the opportunity to factor out the zone selection into a helper for reuse. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f32b33cfa331..e78548397a92 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -69,6 +69,22 @@ u64 dma_direct_get_required_mask(struct device *dev) return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } +static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, + u64 *phys_mask) +{ + if (force_dma_unencrypted()) + *phys_mask = __dma_to_phys(dev, dma_mask); + else + *phys_mask = dma_to_phys(dev, dma_mask); + + /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ + if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) + return GFP_DMA; + if (*phys_mask <= DMA_BIT_MASK(32)) + return GFP_DMA32; + return 0; +} + static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= @@ -81,17 +97,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; int page_order = get_order(size); struct page *page = NULL; + u64 phys_mask; void *ret; /* we always manually zero the memory once we are done: */ gfp &= ~__GFP_ZERO; - - /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ - if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) - gfp |= GFP_DMA; - if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) - gfp |= GFP_DMA32; - + gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, + &phys_mask); again: /* CMA can be used only in the context which permits sleeping */ if (gfpflags_allow_blocking(gfp)) { @@ -110,15 +122,14 @@ again: page = NULL; if (IS_ENABLED(CONFIG_ZONE_DMA32) && - dev->coherent_dma_mask < DMA_BIT_MASK(64) && + phys_mask < DMA_BIT_MASK(64) && !(gfp & (GFP_DMA32 | GFP_DMA))) { gfp |= GFP_DMA32; goto again; } if (IS_ENABLED(CONFIG_ZONE_DMA) && - dev->coherent_dma_mask < DMA_BIT_MASK(32) && - !(gfp & GFP_DMA)) { + phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) { gfp = (gfp & ~GFP_DMA32) | GFP_DMA; goto again; } -- cgit v1.2.3 From b4ebe6063204da58e48600b810a97c29ae9e5d12 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Sep 2018 14:04:08 +0200 Subject: dma-direct: implement complete bus_dma_mask handling Instead of rejecting devices with a too small bus_dma_mask we can handle by taking the bus dma_mask into account for allocations and bounce buffering decisions. Signed-off-by: Christoph Hellwig --- include/linux/dma-direct.h | 3 ++- kernel/dma/direct.c | 21 +++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index b79496d8c75b..fbca184ff5a0 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return false; - return addr + size - 1 <= *dev->dma_mask; + return addr + size - 1 <= + min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index e78548397a92..60c433b880e0 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return false; } - if (*dev->dma_mask >= DMA_BIT_MASK(32)) { + if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { dev_err(dev, - "%s: overflow %pad+%zu of device mask %llx\n", - caller, &dma_addr, size, *dev->dma_mask); + "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", + caller, &dma_addr, size, + *dev->dma_mask, dev->bus_dma_mask); } return false; } @@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev) { u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); + if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) + max_dma = dev->bus_dma_mask; + return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_mask) { + if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) + dma_mask = dev->bus_dma_mask; + if (force_dma_unencrypted()) *phys_mask = __dma_to_phys(dev, dma_mask); else @@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= - dev->coherent_dma_mask; + min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); } void *dma_direct_alloc_pages(struct device *dev, size_t size, @@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask) if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) return 0; #endif - /* - * Upstream PCI/PCIe bridges or SoC interconnects may not carry - * as many DMA address bits as the device itself supports. - */ - if (dev->bus_dma_mask && mask > dev->bus_dma_mask) - return 0; return 1; } -- cgit v1.2.3 From 9d7a224b463e1cf1178570b57b6497240fd79bc3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Sep 2018 09:31:58 +0200 Subject: dma-direct: always allow dma mask <= physiscal memory size This way an architecture with less than 4G of RAM can support dma_mask smaller than 32-bit without a ZONE_DMA. Apparently that is a common case on powerpc. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 60c433b880e0..170bd322a94a 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -284,21 +284,25 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, return nents; } +/* + * Because 32-bit DMA masks are so common we expect every architecture to be + * able to satisfy them - either by not supporting more physical memory, or by + * providing a ZONE_DMA32. If neither is the case, the architecture needs to + * use an IOMMU instead of the direct mapping. + */ int dma_direct_supported(struct device *dev, u64 mask) { -#ifdef CONFIG_ZONE_DMA - if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))) - return 0; -#else - /* - * Because 32-bit DMA masks are so common we expect every architecture - * to be able to satisfy them - either by not supporting more physical - * memory, or by providing a ZONE_DMA32. If neither is the case, the - * architecture needs to use an IOMMU instead of the direct mapping. - */ - if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) + u64 min_mask; + + if (IS_ENABLED(CONFIG_ZONE_DMA)) + min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); + else + min_mask = DMA_BIT_MASK(32); + + min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); + + if (mask >= phys_to_dma(dev, min_mask)) return 0; -#endif return 1; } -- cgit v1.2.3 From 684bec1092b6991ff2a7751e8a763898576eb5c2 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Mon, 1 Oct 2018 15:55:22 -0700 Subject: Input: i8042 - enable keyboard wakeups by default when s2idle is used Previously, on typical consumer laptops, pressing a key on the keyboard when the system is in suspend would cause it to wake up (default or unconditional behaviour). This happens because the EC generates a SCI interrupt in this scenario. That is no longer true on modern laptops based on Intel WhiskeyLake, including Acer Swift SF314-55G, Asus UX333FA, Asus UX433FN and Asus UX533FD. We confirmed with Asus EC engineers that the "Modern Standby" design has been modified so that the EC no longer generates a SCI in this case; the keyboard controller itself should be used for wakeup. In order to retain the standard behaviour of being able to use the keyboard to wake up the system, enable serio wakeups by default on platforms that are using s2idle. Link: https://lkml.kernel.org/r/CAB4CAwfQ0mPMqCLp95TVjw4J0r5zKPWkSvvkK4cpZUGE--w8bQ@mail.gmail.com Reviewed-by: Rafael J. Wysocki Signed-off-by: Daniel Drake Signed-off-by: Dmitry Torokhov --- drivers/input/serio/i8042.c | 29 ++++++++++++++++++++--------- include/linux/suspend.h | 2 ++ kernel/power/suspend.c | 6 ++++++ 3 files changed, 28 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index b8bc71569349..95a78ccbd847 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void) for (i = 0; i < I8042_NUM_PORTS; i++) { struct serio *serio = i8042_ports[i].serio; - if (serio) { - printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n", - serio->name, - (unsigned long) I8042_DATA_REG, - (unsigned long) I8042_COMMAND_REG, - i8042_ports[i].irq); - serio_register_port(serio); - device_set_wakeup_capable(&serio->dev, true); - } + if (!serio) + continue; + + printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n", + serio->name, + (unsigned long) I8042_DATA_REG, + (unsigned long) I8042_COMMAND_REG, + i8042_ports[i].irq); + serio_register_port(serio); + device_set_wakeup_capable(&serio->dev, true); + + /* + * On platforms using suspend-to-idle, allow the keyboard to + * wake up the system from sleep by enabling keyboard wakeups + * by default. This is consistent with keyboard wakeup + * behavior on many platforms using suspend-to-RAM (ACPI S3) + * by default. + */ + if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO) + device_set_wakeup_enable(&serio->dev, true); } } diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 440b62f7502e..206b735f383f 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void) return unlikely(s2idle_state == S2IDLE_STATE_ENTER); } +extern bool pm_suspend_via_s2idle(void); extern void __init pm_states_init(void); extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); extern void s2idle_wake(void); @@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {} static inline void pm_set_resume_via_firmware(void) {} static inline bool pm_suspend_via_firmware(void) { return false; } static inline bool pm_resume_via_firmware(void) { return false; } +static inline bool pm_suspend_via_s2idle(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4c10be0f4843..be3d0d477661 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -62,6 +62,12 @@ static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head); enum s2idle_states __read_mostly s2idle_state; static DEFINE_SPINLOCK(s2idle_lock); +bool pm_suspend_via_s2idle(void) +{ + return mem_sleep_current == PM_SUSPEND_TO_IDLE; +} +EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle); + void s2idle_set_ops(const struct platform_s2idle_ops *ops) { lock_system_sleep(); -- cgit v1.2.3 From 77ac1c02d9f20a0d72fa992c88b98c15d087dbca Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 1 Oct 2018 10:13:24 +0200 Subject: jump_label: Fix NULL dereference bug in __jump_label_mod_update() Commit 19483677684b ("jump_label: Annotate entries that operate on __init code earlier") refactored the code that manages runtime patching of jump labels in modules that are tied to static keys defined in other modules or in the core kernel. In the latter case, we may iterate over the static_key_mod linked list until we hit the entry for the core kernel, whose 'mod' field will be NULL, and attempt to dereference it to get at its 'state' member. So let's add a non-NULL check: this forces the 'init' argument of __jump_label_update() to false for static keys that are defined in the core kernel, which is appropriate given that __init annotated jump_label entries in the core kernel should no longer be active at this point (i.e., when loading modules). Fixes: 19483677684b ("jump_label: Annotate entries that operate on ...") Reported-by: Dan Carpenter Signed-off-by: Ard Biesheuvel Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Cc: Jessica Yu Cc: Peter Zijlstra Link: https://lkml.kernel.org/r/20181001081324.11553-1-ard.biesheuvel@linaro.org --- kernel/jump_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index e8cf3ff3149c..14a7f9881745 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -516,7 +516,7 @@ static void __jump_label_mod_update(struct static_key *key) else stop = m->jump_entries + m->num_jump_entries; __jump_label_update(key, mod->entries, stop, - m->state == MODULE_STATE_COMING); + m && m->state == MODULE_STATE_COMING); } } -- cgit v1.2.3 From a9f9772114c8b07ae75bcb3654bd017461248095 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 Sep 2018 17:58:35 +0200 Subject: perf/core: Fix perf_pmu_unregister() locking When we unregister a PMU, we fail to serialize the @pmu_idr properly. Fix that by doing the entire thing under pmu_lock. Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Fixes: 2e80a82a49c4 ("perf: Dynamic pmu types") Signed-off-by: Ingo Molnar --- kernel/events/core.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index dcb093e7b377..dfb1d951789e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9431,9 +9431,7 @@ static void free_pmu_context(struct pmu *pmu) if (pmu->task_ctx_nr > perf_invalid_context) return; - mutex_lock(&pmus_lock); free_percpu(pmu->pmu_cpu_context); - mutex_unlock(&pmus_lock); } /* @@ -9689,12 +9687,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register); void perf_pmu_unregister(struct pmu *pmu) { - int remove_device; - mutex_lock(&pmus_lock); - remove_device = pmu_bus_running; list_del_rcu(&pmu->entry); - mutex_unlock(&pmus_lock); /* * We dereference the pmu list under both SRCU and regular RCU, so @@ -9706,13 +9700,14 @@ void perf_pmu_unregister(struct pmu *pmu) free_percpu(pmu->pmu_disable_count); if (pmu->type >= PERF_TYPE_MAX) idr_remove(&pmu_idr, pmu->type); - if (remove_device) { + if (pmu_bus_running) { if (pmu->nr_addr_filters) device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); device_del(pmu->dev); put_device(pmu->dev); } free_pmu_context(pmu); + mutex_unlock(&pmus_lock); } EXPORT_SYMBOL_GPL(perf_pmu_unregister); -- cgit v1.2.3 From cd6fb677ce7e460c25bdd66f689734102ec7d642 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 23 Sep 2018 18:13:43 +0200 Subject: perf/ring_buffer: Prevent concurent ring buffer access Some of the scheduling tracepoints allow the perf_tp_event code to write to ring buffer under different cpu than the code is running on. This results in corrupted ring buffer data demonstrated in following perf commands: # perf record -e 'sched:sched_switch,sched:sched_wakeup' perf bench sched messaging # Running 'sched/messaging' benchmark: # 20 sender and receiver processes per group # 10 groups == 400 processes run Total time: 0.383 [sec] [ perf record: Woken up 8 times to write data ] 0x42b890 [0]: failed to process type: -1765585640 [ perf record: Captured and wrote 4.825 MB perf.data (29669 samples) ] # perf report --stdio 0x42b890 [0]: failed to process type: -1765585640 The reason for the corruption are some of the scheduling tracepoints, that have __perf_task dfined and thus allow to store data to another cpu ring buffer: sched_waking sched_wakeup sched_wakeup_new sched_stat_wait sched_stat_sleep sched_stat_iowait sched_stat_blocked The perf_tp_event function first store samples for current cpu related events defined for tracepoint: hlist_for_each_entry_rcu(event, head, hlist_entry) perf_swevent_event(event, count, &data, regs); And then iterates events of the 'task' and store the sample for any task's event that passes tracepoint checks: ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (event->attr.type != PERF_TYPE_TRACEPOINT) continue; if (event->attr.config != entry->type) continue; perf_swevent_event(event, count, &data, regs); } Above code can race with same code running on another cpu, ending up with 2 cpus trying to store under the same ring buffer, which is specifically not allowed. This patch prevents the problem, by allowing only events with the same current cpu to receive the event. NOTE: this requires the use of (per-task-)per-cpu buffers for this feature to work; perf-record does this. Signed-off-by: Jiri Olsa [peterz: small edits to Changelog] Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Andrew Vagin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Fixes: e6dab5ffab59 ("perf/trace: Add ability to set a target task for events") Link: http://lkml.kernel.org/r/20180923161343.GB15054@krava Signed-off-by: Ingo Molnar --- kernel/events/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index dfb1d951789e..5a97f34bc14c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8314,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, goto unlock; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (event->cpu != smp_processor_id()) + continue; if (event->attr.type != PERF_TYPE_TRACEPOINT) continue; if (event->attr.config != entry->type) -- cgit v1.2.3 From a4739eca4456e3d140cc656c5331d42b7465f91d Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 21 Sep 2018 23:18:56 +0530 Subject: sched/numa: Stop multiple tasks from moving to the CPU at the same time Task migration under NUMA balancing can happen in parallel. More than one task might choose to migrate to the same CPU at the same time. This can result in: - During task swap, choosing a task that was not part of the evaluation. - During task swap, task which just got moved into its preferred node, moving to a completely different node. - During task swap, task failing to move to the preferred node, will have to wait an extra interval for the next migrate opportunity. - During task movement, multiple task movements can cause load imbalance. This problem is more likely if there are more cores per node or more nodes in the system. Use a per run-queue variable to check if NUMA-balance is active on the run-queue. Specjbb2005 results (8 warehouses) Higher bops are better 2 Socket - 2 Node Haswell - X86 JVMS Prev Current %Change 4 200194 203353 1.57797 1 311331 328205 5.41995 2 Socket - 4 Node Power8 - PowerNV JVMS Prev Current %Change 1 197654 214384 8.46429 2 Socket - 2 Node Power9 - PowerNV JVMS Prev Current %Change 4 192605 188553 -2.10379 1 213402 196273 -8.02664 4 Socket - 4 Node Power7 - PowerVM JVMS Prev Current %Change 8 52227.1 57581.2 10.2516 1 102529 103468 0.915838 There is a regression on power 9 box. If we look at the details, that box has a sudden jump in cache-misses with this patch. All other parameters seem to be pointing towards NUMA consolidation. perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 13,345,784 13,941,377 migrations 1,127,820 1,157,323 faults 374,736 382,175 cache-misses 55,132,054,603 54,993,823,500 sched:sched_move_numa 1,923 2,005 sched:sched_stick_numa 52 14 sched:sched_swap_numa 595 529 migrate:mm_migrate_pages 1,932 1,573 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 60605 67099 numa_hint_faults_local 51804 58456 numa_hit 239945 240416 numa_huge_pte_updates 14 18 numa_interleave 60 65 numa_local 239865 240339 numa_other 80 77 numa_pages_migrated 1931 1574 numa_pte_updates 67823 77182 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 3,016,467 3,176,453 migrations 37,326 30,238 faults 115,342 87,869 cache-misses 11,692,155,554 12,544,479,391 sched:sched_move_numa 965 23 sched:sched_stick_numa 8 0 sched:sched_swap_numa 35 6 migrate:mm_migrate_pages 1,168 10 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 16286 236 numa_hint_faults_local 11863 201 numa_hit 112482 72293 numa_huge_pte_updates 33 0 numa_interleave 20 26 numa_local 112419 72233 numa_other 63 60 numa_pages_migrated 1144 8 numa_pte_updates 32859 0 perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 8,629,724 8,478,820 migrations 221,052 171,323 faults 308,661 307,499 cache-misses 135,574,913 240,353,599 sched:sched_move_numa 147 214 sched:sched_stick_numa 0 0 sched:sched_swap_numa 2 4 migrate:mm_migrate_pages 64 89 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 11481 5301 numa_hint_faults_local 10968 4745 numa_hit 89773 92943 numa_huge_pte_updates 0 0 numa_interleave 1116 899 numa_local 89220 92345 numa_other 553 598 numa_pages_migrated 62 88 numa_pte_updates 11694 5505 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 2,272,887 2,066,172 migrations 12,206 11,076 faults 163,704 149,544 cache-misses 4,801,186 10,398,067 sched:sched_move_numa 44 43 sched:sched_stick_numa 0 0 sched:sched_swap_numa 0 0 migrate:mm_migrate_pages 17 6 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 2261 3552 numa_hint_faults_local 1993 3347 numa_hit 25726 25611 numa_huge_pte_updates 0 0 numa_interleave 239 213 numa_local 25498 25583 numa_other 228 28 numa_pages_migrated 17 6 numa_pte_updates 2266 3535 perf stats 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 117,980,962 99,358,136 migrations 3,950,220 4,041,607 faults 736,979 749,653 cache-misses 224,976,072,879 225,562,543,251 sched:sched_move_numa 504 771 sched:sched_stick_numa 50 14 sched:sched_swap_numa 239 204 migrate:mm_migrate_pages 1,260 1,180 vmstat 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 18293 27409 numa_hint_faults_local 11969 20677 numa_hit 240854 239988 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 240851 239983 numa_other 3 5 numa_pages_migrated 1190 1016 numa_pte_updates 18106 27916 perf stats 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 61,053,158 60,899,307 migrations 551,586 544,668 faults 244,174 270,834 cache-misses 74,326,766,973 74,543,455,635 sched:sched_move_numa 344 735 sched:sched_stick_numa 24 25 sched:sched_swap_numa 140 174 migrate:mm_migrate_pages 568 816 vmstat 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 6461 11059 numa_hint_faults_local 2283 4733 numa_hit 35661 41384 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 35661 41383 numa_other 0 1 numa_pages_migrated 568 815 numa_pte_updates 6518 11323 Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Rik van Riel Acked-by: Mel Gorman Cc: Jirka Hladky Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1537552141-27815-2-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 22 ++++++++++++++++++++++ kernel/sched/sched.h | 1 + 2 files changed, 23 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f808ddf2a868..3b0b75de1141 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1514,6 +1514,21 @@ struct task_numa_env { static void task_numa_assign(struct task_numa_env *env, struct task_struct *p, long imp) { + struct rq *rq = cpu_rq(env->dst_cpu); + + /* Bail out if run-queue part of active NUMA balance. */ + if (xchg(&rq->numa_migrate_on, 1)) + return; + + /* + * Clear previous best_cpu/rq numa-migrate flag, since task now + * found a better CPU to move/swap. + */ + if (env->best_cpu != -1) { + rq = cpu_rq(env->best_cpu); + WRITE_ONCE(rq->numa_migrate_on, 0); + } + if (env->best_task) put_task_struct(env->best_task); if (p) @@ -1569,6 +1584,9 @@ static void task_numa_compare(struct task_numa_env *env, long moveimp = imp; int dist = env->dist; + if (READ_ONCE(dst_rq->numa_migrate_on)) + return; + rcu_read_lock(); cur = task_rcu_dereference(&dst_rq->curr); if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) @@ -1710,6 +1728,7 @@ static int task_numa_migrate(struct task_struct *p) .best_cpu = -1, }; struct sched_domain *sd; + struct rq *best_rq; unsigned long taskweight, groupweight; int nid, ret, dist; long taskimp, groupimp; @@ -1811,14 +1830,17 @@ static int task_numa_migrate(struct task_struct *p) */ p->numa_scan_period = task_scan_start(p); + best_rq = cpu_rq(env.best_cpu); if (env.best_task == NULL) { ret = migrate_task_to(p, env.best_cpu); + WRITE_ONCE(best_rq->numa_migrate_on, 0); if (ret != 0) trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); return ret; } ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); + WRITE_ONCE(best_rq->numa_migrate_on, 0); if (ret != 0) trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4a2e8cae63c4..0b9161241bda 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -783,6 +783,7 @@ struct rq { #ifdef CONFIG_NUMA_BALANCING unsigned int nr_numa_running; unsigned int nr_preferred_running; + unsigned int numa_migrate_on; #endif #define CPU_LOAD_IDX_MAX 5 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; -- cgit v1.2.3 From 1327237a5978b00bcc665c33046c9bae75da1154 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 21 Sep 2018 23:18:57 +0530 Subject: sched/numa: Pass destination CPU as a parameter to migrate_task_rq This additional parameter (new_cpu) is used later for identifying if task migration is across nodes. No functional change. Specjbb2005 results (8 warehouses) Higher bops are better 2 Socket - 2 Node Haswell - X86 JVMS Prev Current %Change 4 203353 200668 -1.32036 1 328205 321791 -1.95427 2 Socket - 4 Node Power8 - PowerNV JVMS Prev Current %Change 1 214384 204848 -4.44809 2 Socket - 2 Node Power9 - PowerNV JVMS Prev Current %Change 4 188553 188098 -0.241311 1 196273 200351 2.07772 4 Socket - 4 Node Power7 - PowerVM JVMS Prev Current %Change 8 57581.2 58145.9 0.980702 1 103468 103798 0.318939 Brings out the variance between different specjbb2005 runs. Some events stats before and after applying the patch. perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 13,941,377 13,912,183 migrations 1,157,323 1,155,931 faults 382,175 367,139 cache-misses 54,993,823,500 54,240,196,814 sched:sched_move_numa 2,005 1,571 sched:sched_stick_numa 14 9 sched:sched_swap_numa 529 463 migrate:mm_migrate_pages 1,573 703 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 67099 50155 numa_hint_faults_local 58456 45264 numa_hit 240416 239652 numa_huge_pte_updates 18 36 numa_interleave 65 68 numa_local 240339 239576 numa_other 77 76 numa_pages_migrated 1574 680 numa_pte_updates 77182 71146 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 3,176,453 3,156,720 migrations 30,238 30,354 faults 87,869 97,261 cache-misses 12,544,479,391 12,400,026,826 sched:sched_move_numa 23 4 sched:sched_stick_numa 0 0 sched:sched_swap_numa 6 1 migrate:mm_migrate_pages 10 20 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 236 272 numa_hint_faults_local 201 186 numa_hit 72293 71362 numa_huge_pte_updates 0 0 numa_interleave 26 23 numa_local 72233 71299 numa_other 60 63 numa_pages_migrated 8 2 numa_pte_updates 0 0 perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 8,478,820 8,606,824 migrations 171,323 155,352 faults 307,499 301,409 cache-misses 240,353,599 157,759,224 sched:sched_move_numa 214 168 sched:sched_stick_numa 0 0 sched:sched_swap_numa 4 3 migrate:mm_migrate_pages 89 125 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 5301 4650 numa_hint_faults_local 4745 3946 numa_hit 92943 90489 numa_huge_pte_updates 0 0 numa_interleave 899 892 numa_local 92345 90034 numa_other 598 455 numa_pages_migrated 88 124 numa_pte_updates 5505 4818 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 2,066,172 2,113,167 migrations 11,076 10,533 faults 149,544 142,727 cache-misses 10,398,067 5,594,192 sched:sched_move_numa 43 10 sched:sched_stick_numa 0 0 sched:sched_swap_numa 0 0 migrate:mm_migrate_pages 6 6 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 3552 744 numa_hint_faults_local 3347 584 numa_hit 25611 25551 numa_huge_pte_updates 0 0 numa_interleave 213 263 numa_local 25583 25302 numa_other 28 249 numa_pages_migrated 6 6 numa_pte_updates 3535 744 perf stats 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 99,358,136 101,227,352 migrations 4,041,607 4,151,829 faults 749,653 745,233 cache-misses 225,562,543,251 224,669,561,766 sched:sched_move_numa 771 617 sched:sched_stick_numa 14 2 sched:sched_swap_numa 204 187 migrate:mm_migrate_pages 1,180 316 vmstat 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 27409 24195 numa_hint_faults_local 20677 21639 numa_hit 239988 238331 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 239983 238331 numa_other 5 0 numa_pages_migrated 1016 204 numa_pte_updates 27916 24561 perf stats 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 60,899,307 62,738,978 migrations 544,668 562,702 faults 270,834 228,465 cache-misses 74,543,455,635 75,778,067,952 sched:sched_move_numa 735 648 sched:sched_stick_numa 25 13 sched:sched_swap_numa 174 137 migrate:mm_migrate_pages 816 733 vmstat 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 11059 10281 numa_hint_faults_local 4733 3242 numa_hit 41384 36338 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 41383 36338 numa_other 1 0 numa_pages_migrated 815 706 numa_pte_updates 11323 10176 Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Cc: Jirka Hladky Cc: Linus Torvalds Cc: Mel Gorman Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1537552141-27815-3-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 +- kernel/sched/deadline.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 625bc9897f62..ad97f3ba5ec5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1167,7 +1167,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) if (task_cpu(p) != new_cpu) { if (p->sched_class->migrate_task_rq) - p->sched_class->migrate_task_rq(p); + p->sched_class->migrate_task_rq(p, new_cpu); p->se.nr_migrations++; rseq_migrate(p); perf_event_task_migrate(p); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 997ea7b839fa..91e4202b0634 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1607,7 +1607,7 @@ out: return cpu; } -static void migrate_task_rq_dl(struct task_struct *p) +static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) { struct rq *rq; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3b0b75de1141..bc768156239f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6297,7 +6297,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se); * cfs_rq_of(p) references at time of call are still valid and identify the * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. */ -static void migrate_task_rq_fair(struct task_struct *p) +static void migrate_task_rq_fair(struct task_struct *p, int new_cpu __maybe_unused) { /* * As blocked tasks retain absolute vruntime the migration needs to diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0b9161241bda..455fa330de04 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1524,7 +1524,7 @@ struct sched_class { #ifdef CONFIG_SMP int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); - void (*migrate_task_rq)(struct task_struct *p); + void (*migrate_task_rq)(struct task_struct *p, int new_cpu); void (*task_woken)(struct rq *this_rq, struct task_struct *task); -- cgit v1.2.3 From 3f9672baaa70fc62765857f13f007feb01f9ad33 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 21 Sep 2018 23:18:58 +0530 Subject: sched/numa: Reset scan rate whenever task moves across nodes Currently task scan rate is reset when NUMA balancer migrates the task to a different node. If NUMA balancer initiates a swap, reset is only applicable to the task that initiates the swap. Similarly no scan rate reset is done if the task is migrated across nodes by traditional load balancer. Instead move the scan reset to the migrate_task_rq. This ensures the task moved out of its preferred node, either gets back to its preferred node quickly or finds a new preferred node. Doing so, would be fair to all tasks migrating across nodes. Specjbb2005 results (8 warehouses) Higher bops are better 2 Socket - 2 Node Haswell - X86 JVMS Prev Current %Change 4 200668 203370 1.3465 1 321791 328431 2.06345 2 Socket - 4 Node Power8 - PowerNV JVMS Prev Current %Change 1 204848 206070 0.59654 2 Socket - 2 Node Power9 - PowerNV JVMS Prev Current %Change 4 188098 188386 0.153112 1 200351 201566 0.606436 4 Socket - 4 Node Power7 - PowerVM JVMS Prev Current %Change 8 58145.9 59157.4 1.73959 1 103798 105495 1.63491 Some events stats before and after applying the patch. perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 13,912,183 13,825,492 migrations 1,155,931 1,152,509 faults 367,139 371,948 cache-misses 54,240,196,814 55,654,206,041 sched:sched_move_numa 1,571 1,856 sched:sched_stick_numa 9 4 sched:sched_swap_numa 463 428 migrate:mm_migrate_pages 703 898 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 50155 57146 numa_hint_faults_local 45264 51612 numa_hit 239652 238164 numa_huge_pte_updates 36 16 numa_interleave 68 63 numa_local 239576 238085 numa_other 76 79 numa_pages_migrated 680 883 numa_pte_updates 71146 67540 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 3,156,720 3,288,525 migrations 30,354 38,652 faults 97,261 111,678 cache-misses 12,400,026,826 12,111,197,376 sched:sched_move_numa 4 900 sched:sched_stick_numa 0 0 sched:sched_swap_numa 1 5 migrate:mm_migrate_pages 20 714 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 272 18572 numa_hint_faults_local 186 14850 numa_hit 71362 73197 numa_huge_pte_updates 0 11 numa_interleave 23 25 numa_local 71299 73138 numa_other 63 59 numa_pages_migrated 2 712 numa_pte_updates 0 24021 perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 8,606,824 8,451,543 migrations 155,352 202,804 faults 301,409 310,024 cache-misses 157,759,224 253,522,507 sched:sched_move_numa 168 213 sched:sched_stick_numa 0 0 sched:sched_swap_numa 3 2 migrate:mm_migrate_pages 125 88 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 4650 11830 numa_hint_faults_local 3946 11301 numa_hit 90489 90038 numa_huge_pte_updates 0 0 numa_interleave 892 855 numa_local 90034 89796 numa_other 455 242 numa_pages_migrated 124 88 numa_pte_updates 4818 12039 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 2,113,167 2,049,153 migrations 10,533 11,405 faults 142,727 162,309 cache-misses 5,594,192 7,203,343 sched:sched_move_numa 10 22 sched:sched_stick_numa 0 0 sched:sched_swap_numa 0 0 migrate:mm_migrate_pages 6 1 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 744 1693 numa_hint_faults_local 584 1669 numa_hit 25551 25177 numa_huge_pte_updates 0 0 numa_interleave 263 194 numa_local 25302 24993 numa_other 249 184 numa_pages_migrated 6 1 numa_pte_updates 744 1577 perf stats 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 101,227,352 94,515,937 migrations 4,151,829 4,203,554 faults 745,233 832,697 cache-misses 224,669,561,766 226,248,698,331 sched:sched_move_numa 617 1,730 sched:sched_stick_numa 2 14 sched:sched_swap_numa 187 432 migrate:mm_migrate_pages 316 1,398 vmstat 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 24195 80079 numa_hint_faults_local 21639 68620 numa_hit 238331 241187 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 238331 241186 numa_other 0 1 numa_pages_migrated 204 1347 numa_pte_updates 24561 80729 perf stats 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 62,738,978 63,704,961 migrations 562,702 573,404 faults 228,465 230,878 cache-misses 75,778,067,952 76,568,222,781 sched:sched_move_numa 648 509 sched:sched_stick_numa 13 31 sched:sched_swap_numa 137 182 migrate:mm_migrate_pages 733 541 vmstat 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 10281 8501 numa_hint_faults_local 3242 2960 numa_hit 36338 35526 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 36338 35526 numa_other 0 0 numa_pages_migrated 706 539 numa_pte_updates 10176 8433 Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Cc: Jirka Hladky Cc: Linus Torvalds Cc: Mel Gorman Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1537552141-27815-4-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bc768156239f..5cbfb3068bc6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1824,12 +1824,6 @@ static int task_numa_migrate(struct task_struct *p) if (env.best_cpu == -1) return -EAGAIN; - /* - * Reset the scan period if the task is being rescheduled on an - * alternative node to recheck if the tasks is now properly placed. - */ - p->numa_scan_period = task_scan_start(p); - best_rq = cpu_rq(env.best_cpu); if (env.best_task == NULL) { ret = migrate_task_to(p, env.best_cpu); @@ -2618,6 +2612,18 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) } } +static void update_scan_period(struct task_struct *p, int new_cpu) +{ + int src_nid = cpu_to_node(task_cpu(p)); + int dst_nid = cpu_to_node(new_cpu); + + if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) + return; + + if (src_nid != dst_nid) + p->numa_scan_period = task_scan_start(p); +} + #else static void task_tick_numa(struct rq *rq, struct task_struct *curr) { @@ -2631,6 +2637,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) { } +static inline void update_scan_period(struct task_struct *p, int new_cpu) +{ +} + #endif /* CONFIG_NUMA_BALANCING */ static void @@ -6297,7 +6307,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se); * cfs_rq_of(p) references at time of call are still valid and identify the * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. */ -static void migrate_task_rq_fair(struct task_struct *p, int new_cpu __maybe_unused) +static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) { /* * As blocked tasks retain absolute vruntime the migration needs to @@ -6350,6 +6360,8 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu __maybe_unus /* We have migrated, no longer consider this task hot */ p->se.exec_start = 0; + + update_scan_period(p, new_cpu); } static void task_dead_fair(struct task_struct *p) -- cgit v1.2.3 From 05cbdf4f5c191ff378c47bbf66d7230beb725bdb Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 21 Sep 2018 23:18:59 +0530 Subject: sched/numa: Limit the conditions where scan period is reset migrate_task_rq_fair() resets the scan rate for NUMA balancing on every cross-node migration. In the event of excessive load balancing due to saturation, this may result in the scan rate being pegged at maximum and further overloading the machine. This patch only resets the scan if NUMA balancing is active, a preferred node has been selected and the task is being migrated from the preferred node as these are the most harmful. For example, a migration to the preferred node does not justify a faster scan rate. Similarly, a migration between two nodes that are not preferred is probably bouncing due to over-saturation of the machine. In that case, scanning faster and trapping more NUMA faults will further overload the machine. Specjbb2005 results (8 warehouses) Higher bops are better 2 Socket - 2 Node Haswell - X86 JVMS Prev Current %Change 4 203370 205332 0.964744 1 328431 319785 -2.63252 2 Socket - 4 Node Power8 - PowerNV JVMS Prev Current %Change 1 206070 206585 0.249915 2 Socket - 2 Node Power9 - PowerNV JVMS Prev Current %Change 4 188386 189162 0.41192 1 201566 213760 6.04963 4 Socket - 4 Node Power7 - PowerVM JVMS Prev Current %Change 8 59157.4 58736.8 -0.710985 1 105495 105419 -0.0720413 Some events stats before and after applying the patch. perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 13,825,492 14,285,708 migrations 1,152,509 1,180,621 faults 371,948 339,114 cache-misses 55,654,206,041 55,205,631,894 sched:sched_move_numa 1,856 843 sched:sched_stick_numa 4 6 sched:sched_swap_numa 428 219 migrate:mm_migrate_pages 898 365 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 57146 26907 numa_hint_faults_local 51612 24279 numa_hit 238164 239771 numa_huge_pte_updates 16 0 numa_interleave 63 68 numa_local 238085 239688 numa_other 79 83 numa_pages_migrated 883 363 numa_pte_updates 67540 27415 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 3,288,525 3,202,779 migrations 38,652 37,186 faults 111,678 106,076 cache-misses 12,111,197,376 12,024,873,744 sched:sched_move_numa 900 931 sched:sched_stick_numa 0 0 sched:sched_swap_numa 5 1 migrate:mm_migrate_pages 714 637 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 18572 17409 numa_hint_faults_local 14850 14367 numa_hit 73197 73953 numa_huge_pte_updates 11 20 numa_interleave 25 25 numa_local 73138 73892 numa_other 59 61 numa_pages_migrated 712 668 numa_pte_updates 24021 27276 perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 8,451,543 8,474,013 migrations 202,804 254,934 faults 310,024 320,506 cache-misses 253,522,507 110,580,458 sched:sched_move_numa 213 725 sched:sched_stick_numa 0 0 sched:sched_swap_numa 2 7 migrate:mm_migrate_pages 88 145 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 11830 22797 numa_hint_faults_local 11301 21539 numa_hit 90038 89308 numa_huge_pte_updates 0 0 numa_interleave 855 865 numa_local 89796 88955 numa_other 242 353 numa_pages_migrated 88 149 numa_pte_updates 12039 22930 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 2,049,153 2,195,628 migrations 11,405 11,179 faults 162,309 149,656 cache-misses 7,203,343 8,117,515 sched:sched_move_numa 22 49 sched:sched_stick_numa 0 0 sched:sched_swap_numa 0 0 migrate:mm_migrate_pages 1 5 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 1693 3577 numa_hint_faults_local 1669 3476 numa_hit 25177 26142 numa_huge_pte_updates 0 0 numa_interleave 194 358 numa_local 24993 26042 numa_other 184 100 numa_pages_migrated 1 5 numa_pte_updates 1577 3587 perf stats 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 94,515,937 100,602,296 migrations 4,203,554 4,135,630 faults 832,697 789,256 cache-misses 226,248,698,331 226,160,621,058 sched:sched_move_numa 1,730 1,366 sched:sched_stick_numa 14 16 sched:sched_swap_numa 432 374 migrate:mm_migrate_pages 1,398 1,350 vmstat 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 80079 47857 numa_hint_faults_local 68620 39768 numa_hit 241187 240165 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 241186 240165 numa_other 1 0 numa_pages_migrated 1347 1224 numa_pte_updates 80729 48354 perf stats 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 63,704,961 58,515,496 migrations 573,404 564,845 faults 230,878 245,807 cache-misses 76,568,222,781 73,603,757,976 sched:sched_move_numa 509 996 sched:sched_stick_numa 31 10 sched:sched_swap_numa 182 193 migrate:mm_migrate_pages 541 646 vmstat 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 8501 13422 numa_hint_faults_local 2960 5619 numa_hit 35526 36118 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 35526 36116 numa_other 0 2 numa_pages_migrated 539 616 numa_pte_updates 8433 13374 Signed-off-by: Mel Gorman Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Cc: Jirka Hladky Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1537552141-27815-5-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5cbfb3068bc6..3529bf61826b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2617,11 +2617,32 @@ static void update_scan_period(struct task_struct *p, int new_cpu) int src_nid = cpu_to_node(task_cpu(p)); int dst_nid = cpu_to_node(new_cpu); + if (!static_branch_likely(&sched_numa_balancing)) + return; + if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) return; - if (src_nid != dst_nid) - p->numa_scan_period = task_scan_start(p); + if (src_nid == dst_nid) + return; + + /* + * Allow resets if faults have been trapped before one scan + * has completed. This is most likely due to a new task that + * is pulled cross-node due to wakeups or load balancing. + */ + if (p->numa_scan_seq) { + /* + * Avoid scan adjustments if moving to the preferred + * node or if the task was not previously running on + * the preferred node. + */ + if (dst_nid == p->numa_preferred_nid || + (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid)) + return; + } + + p->numa_scan_period = task_scan_start(p); } #else -- cgit v1.2.3 From 6fd98e775f24fd41520928d345f5db3ff52bb35d Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 21 Sep 2018 23:19:01 +0530 Subject: sched/numa: Avoid task migration for small NUMA improvement If NUMA improvement from the task migration is going to be very minimal, then avoid task migration. Specjbb2005 results (8 warehouses) Higher bops are better 2 Socket - 2 Node Haswell - X86 JVMS Prev Current %Change 4 198512 205910 3.72673 1 313559 318491 1.57291 2 Socket - 4 Node Power8 - PowerNV JVMS Prev Current %Change 8 74761.9 74935.9 0.232739 1 214874 226796 5.54837 2 Socket - 2 Node Power9 - PowerNV JVMS Prev Current %Change 4 180536 189780 5.12031 1 210281 205695 -2.18089 4 Socket - 4 Node Power7 - PowerVM JVMS Prev Current %Change 8 56511.4 60370 6.828 1 104899 108100 3.05151 1/7 cases is regressing, if we look at events migrate_pages seem to vary the most especially in the regressing case. Also some amount of variance is expected between different runs of Specjbb2005. Some events stats before and after applying the patch. perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 13,818,546 13,801,554 migrations 1,149,960 1,151,541 faults 385,583 433,246 cache-misses 55,259,546,768 55,168,691,835 sched:sched_move_numa 2,257 2,551 sched:sched_stick_numa 9 24 sched:sched_swap_numa 512 904 migrate:mm_migrate_pages 2,225 1,571 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 72692 113682 numa_hint_faults_local 62270 102163 numa_hit 238762 240181 numa_huge_pte_updates 48 36 numa_interleave 75 64 numa_local 238676 240103 numa_other 86 78 numa_pages_migrated 2225 1564 numa_pte_updates 98557 134080 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After cs 3,173,490 3,079,150 migrations 36,966 31,455 faults 108,776 99,081 cache-misses 12,200,075,320 11,588,126,740 sched:sched_move_numa 1,264 1 sched:sched_stick_numa 0 0 sched:sched_swap_numa 0 0 migrate:mm_migrate_pages 899 36 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Haswell - X86 Event Before After numa_hint_faults 21109 430 numa_hint_faults_local 17120 77 numa_hit 72934 71277 numa_huge_pte_updates 42 0 numa_interleave 33 22 numa_local 72866 71218 numa_other 68 59 numa_pages_migrated 915 23 numa_pte_updates 42326 0 perf stats 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 8,312,022 8,707,565 migrations 231,705 171,342 faults 310,242 310,820 cache-misses 402,324,573 136,115,400 sched:sched_move_numa 193 215 sched:sched_stick_numa 0 6 sched:sched_swap_numa 3 24 migrate:mm_migrate_pages 93 162 vmstat 8th warehouse Multi JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 11838 8985 numa_hint_faults_local 11216 8154 numa_hit 90689 93819 numa_huge_pte_updates 0 0 numa_interleave 1579 882 numa_local 89634 93496 numa_other 1055 323 numa_pages_migrated 92 169 numa_pte_updates 12109 9217 perf stats 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After cs 2,170,481 2,152,072 migrations 10,126 10,704 faults 160,962 164,376 cache-misses 10,834,845 3,818,437 sched:sched_move_numa 10 16 sched:sched_stick_numa 0 0 sched:sched_swap_numa 0 7 migrate:mm_migrate_pages 2 199 vmstat 8th warehouse Single JVM 2 Socket - 2 Node Power9 - PowerNV Event Before After numa_hint_faults 403 2248 numa_hint_faults_local 358 1666 numa_hit 25898 25704 numa_huge_pte_updates 0 0 numa_interleave 207 200 numa_local 25860 25679 numa_other 38 25 numa_pages_migrated 2 197 numa_pte_updates 400 2234 perf stats 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 110,339,633 93,330,595 migrations 4,139,812 4,122,061 faults 863,622 865,979 cache-misses 231,838,045,660 225,395,083,479 sched:sched_move_numa 2,196 2,372 sched:sched_stick_numa 33 24 sched:sched_swap_numa 544 769 migrate:mm_migrate_pages 2,469 1,677 vmstat 8th warehouse Multi JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 85748 91638 numa_hint_faults_local 66831 78096 numa_hit 242213 242225 numa_huge_pte_updates 0 0 numa_interleave 0 2 numa_local 242211 242219 numa_other 2 6 numa_pages_migrated 2376 1515 numa_pte_updates 86233 92274 perf stats 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After cs 59,331,057 51,487,271 migrations 552,019 537,170 faults 266,586 256,921 cache-misses 73,796,312,990 70,073,831,187 sched:sched_move_numa 981 576 sched:sched_stick_numa 54 24 sched:sched_swap_numa 286 327 migrate:mm_migrate_pages 713 726 vmstat 8th warehouse Single JVM 4 Socket - 4 Node Power7 - PowerVM Event Before After numa_hint_faults 14807 12000 numa_hint_faults_local 5738 5024 numa_hit 36230 36470 numa_huge_pte_updates 0 0 numa_interleave 0 0 numa_local 36228 36465 numa_other 2 5 numa_pages_migrated 703 726 numa_pte_updates 14742 11930 Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Cc: Jirka Hladky Cc: Linus Torvalds Cc: Mel Gorman Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1537552141-27815-7-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3529bf61826b..25c7c7e09cbd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1567,6 +1567,13 @@ static bool load_too_imbalanced(long src_load, long dst_load, return (imb > old_imb); } +/* + * Maximum NUMA importance can be 1998 (2*999); + * SMALLIMP @ 30 would be close to 1998/64. + * Used to deter task migration. + */ +#define SMALLIMP 30 + /* * This checks if the overall compute and NUMA accesses of the system would * be improved if the source tasks was migrated to the target dst_cpu taking @@ -1600,7 +1607,7 @@ static void task_numa_compare(struct task_numa_env *env, goto unlock; if (!cur) { - if (maymove || imp > env->best_imp) + if (maymove && moveimp >= env->best_imp) goto assign; else goto unlock; @@ -1643,15 +1650,21 @@ static void task_numa_compare(struct task_numa_env *env, task_weight(cur, env->dst_nid, dist); } - if (imp <= env->best_imp) - goto unlock; - if (maymove && moveimp > imp && moveimp > env->best_imp) { - imp = moveimp - 1; + imp = moveimp; cur = NULL; goto assign; } + /* + * If the NUMA importance is less than SMALLIMP, + * task migration might only result in ping pong + * of tasks and also hurt performance due to cache + * misses. + */ + if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) + goto unlock; + /* * In the overloaded case, try and keep the load balanced. */ -- cgit v1.2.3 From 11d4afd4ff667f9b6178ee8c142c36cb78bd84db Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 25 Sep 2018 11:17:42 +0200 Subject: sched/pelt: Fix warning and clean up IRQ PELT config Create a config for enabling irq load tracking in the scheduler. irq load tracking is useful only when irq or paravirtual time is accounted but it's only possible with SMP for now. Also use __maybe_unused to remove the compilation warning in update_rq_clock_task() that has been introduced by: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()") Suggested-by: Ingo Molnar Reported-by: Dou Liyang Reported-by: Miguel Ojeda Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bp@alien8.de Cc: dou_liyang@163.com Fixes: 2e62c4743adc ("sched/fair: Remove #ifdefs from scale_rt_capacity()") Link: http://lkml.kernel.org/r/1537867062-27285-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- init/Kconfig | 5 +++++ kernel/sched/core.c | 7 +++---- kernel/sched/fair.c | 2 +- kernel/sched/pelt.c | 2 +- kernel/sched/pelt.h | 2 +- kernel/sched/sched.h | 5 ++--- 6 files changed, 13 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/init/Kconfig b/init/Kconfig index 1e234e2f1cba..317d5ccb5191 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING If in doubt, say N here. +config HAVE_SCHED_AVG_IRQ + def_bool y + depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING + depends on SMP + config BSD_PROCESS_ACCT bool "BSD Process Accounting" depends on MULTIUSER diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ad97f3ba5ec5..f2caf1bae4a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) * In theory, the compile should just see 0 here, and optimize out the call * to sched_rt_avg_update. But I don't trust it... */ -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) - s64 steal = 0, irq_delta = 0; -#endif + s64 __maybe_unused steal = 0, irq_delta = 0; + #ifdef CONFIG_IRQ_TIME_ACCOUNTING irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; @@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) rq->clock_task += delta; -#ifdef HAVE_SCHED_AVG_IRQ +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) update_irq_load_avg(rq, irq_delta + steal); #endif diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1d92ed2eca8b..d59307ecd67d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7317,7 +7317,7 @@ static inline bool others_have_blocked(struct rq *rq) if (READ_ONCE(rq->avg_dl.util_avg)) return true; -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ if (READ_ONCE(rq->avg_irq.util_avg)) return true; #endif diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 35475c0c5419..48a126486435 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) return 0; } -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ /* * irq: * diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index d2894db28955..7e56b489ff32 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq); int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ int update_irq_load_avg(struct rq *rq, u64 running); #else static inline int diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 632804fa0b12..798b1afd5092 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -862,8 +862,7 @@ struct rq { struct sched_avg avg_rt; struct sched_avg avg_dl; -#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) -#define HAVE_SCHED_AVG_IRQ +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ struct sched_avg avg_irq; #endif u64 idle_stamp; @@ -2223,7 +2222,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq) } #endif -#ifdef HAVE_SCHED_AVG_IRQ +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ static inline unsigned long cpu_util_irq(struct rq *rq) { return rq->avg_irq.util_avg; -- cgit v1.2.3 From fdf5f315d5cfaefb7bb8a62ec4bf37b9891837aa Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Thu, 9 Aug 2018 14:57:53 +0100 Subject: sched/fair: Disable LB_BIAS by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LB_BIAS allows the adjustment on how conservative load should be balanced. The rq->cpu_load[idx] array is used for this functionality. It contains weighted CPU load decayed average values over different intervals (idx = 1..4). Idx = 0 is the weighted CPU load itself. The values are updated during scheduler_tick, before idle balance and at nohz exit. There are 5 different types of idx's per sched domain (sd). Each of them is used to index into the rq->cpu_load[idx] array in a specific scenario (busy, idle and newidle for load balancing, forkexec for wake-up slow-path load balancing and wake for affine wakeup based on weight). Only the sd idx's for busy and idle load balancing are set to 2,3 or 1,2 respectively. All the other sd idx's are set to 0. Conservative load balancing is achieved for sd idx's >= 1 by using the min/max (source_load()/target_load()) value between the current weighted CPU load and the rq->cpu_load[sd idx -1] for the busiest(idlest)/local CPU load in load balancing or vice versa in the wake-up slow-path load balancing. There is no conservative balancing for sd idx = 0 since only current weighted CPU load is used in this case. It is very likely that LB_BIAS' influence on load balancing can be neglected (see test results below). This is further supported by: (1) Weighted CPU load today is by itself a decayed average value (PELT) (cfs_rq->avg->runnable_load_avg) and not the instantaneous load (rq->load.weight) it was when LB_BIAS was introduced. (2) Sd imbalance_pct is used for CPU_NEWLY_IDLE and CPU_NOT_IDLE (relate to sd's newidle and busy idx) in find_busiest_group() when comparing busiest and local avg load to make load balancing even more conservative. (3) The sd forkexec and newidle idx are always set to 0 so there is no adjustment on how conservatively load balancing is done here. (4) Affine wakeup based on weight (wake_affine_weight()) will not be impacted since the sd wake idx is always set to 0. Let's disable LB_BIAS by default for a few kernel releases to make sure that no workload and no scheduler topology is affected. The benefit of being able to remove the LB_BIAS dependency from source_load() and target_load() is that the entire rq->cpu_load[idx] code could be removed in this case. It is really hard to say if there is no regression w/o testing this with a lot of different workloads on a lot of different platforms, especially NUMA machines. The following 104 LKP (Linux Kernel Performance) tests were run by the 0-Day guys mostly on multi-socket hosts with a larger number of logical cpus (88, 192). The base for the test was commit b3dae109fa89 ("sched/swait: Rename to exclusive") (tip/sched/core v4.18-rc1). Only 2 out of the 104 tests had a significant change in one of the metrics (fsmark/1x-1t-1HDD-btrfs-nfsv4-4M-60G-NoSync-performance +7% files_per_sec, unixbench/300s-100%-syscall-performance -11% score). Tests which showed a change in one of the metrics are marked with a '*' and this change is listed as well. (a) lkp-bdw-ep3: 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz 64G dd-write/10m-1HDD-cfq-btrfs-100dd-performance fsmark/1x-1t-1HDD-xfs-nfsv4-4M-60G-NoSync-performance * fsmark/1x-1t-1HDD-btrfs-nfsv4-4M-60G-NoSync-performance 7.50 7% 8.00 ± 6% fsmark.files_per_sec fsmark/1x-1t-1HDD-btrfs-nfsv4-4M-60G-fsyncBeforeClose-performance fsmark/1x-1t-1HDD-btrfs-4M-60G-NoSync-performance fsmark/1x-1t-1HDD-btrfs-4M-60G-fsyncBeforeClose-performance kbuild/300s-50%-vmlinux_prereq-performance kbuild/300s-200%-vmlinux_prereq-performance kbuild/300s-50%-vmlinux_prereq-performance-1HDD-ext4 kbuild/300s-200%-vmlinux_prereq-performance-1HDD-ext4 (b) lkp-skl-4sp1: 192 threads Intel(R) Xeon(R) Platinum 8160 768G dbench/100%-performance ebizzy/200%-100x-10s-performance hackbench/1600%-process-pipe-performance iperf/300s-cs-localhost-tcp-performance iperf/300s-cs-localhost-udp-performance perf-bench-numa-mem/2t-300M-performance perf-bench-sched-pipe/10000000ops-process-performance perf-bench-sched-pipe/10000000ops-threads-performance schbench/2-16-300-30000-30000-performance tbench/100%-cs-localhost-performance (c) lkp-bdw-ep6: 88 threads Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz 128G stress-ng/100%-60s-pipe-performance unixbench/300s-1-whetstone-double-performance unixbench/300s-1-shell1-performance unixbench/300s-1-shell8-performance unixbench/300s-1-pipe-performance * unixbench/300s-1-context1-performance 312 315 unixbench.score unixbench/300s-1-spawn-performance unixbench/300s-1-syscall-performance unixbench/300s-1-dhry2reg-performance unixbench/300s-1-fstime-performance unixbench/300s-1-fsbuffer-performance unixbench/300s-1-fsdisk-performance unixbench/300s-100%-whetstone-double-performance unixbench/300s-100%-shell1-performance unixbench/300s-100%-shell8-performance unixbench/300s-100%-pipe-performance unixbench/300s-100%-context1-performance unixbench/300s-100%-spawn-performance * unixbench/300s-100%-syscall-performance 3571 ± 3% -11% 3183 ± 4% unixbench.score unixbench/300s-100%-dhry2reg-performance unixbench/300s-100%-fstime-performance unixbench/300s-100%-fsbuffer-performance unixbench/300s-100%-fsdisk-performance unixbench/300s-1-execl-performance unixbench/300s-100%-execl-performance * will-it-scale/brk1-performance 365004 360387 will-it-scale.per_thread_ops * will-it-scale/dup1-performance 432401 437596 will-it-scale.per_thread_ops will-it-scale/eventfd1-performance will-it-scale/futex1-performance will-it-scale/futex2-performance will-it-scale/futex3-performance will-it-scale/futex4-performance will-it-scale/getppid1-performance will-it-scale/lock1-performance will-it-scale/lseek1-performance will-it-scale/lseek2-performance * will-it-scale/malloc1-performance 47025 45817 will-it-scale.per_thread_ops 77499 76529 will-it-scale.per_process_ops will-it-scale/malloc2-performance * will-it-scale/mmap1-performance 123399 120815 will-it-scale.per_thread_ops 152219 149833 will-it-scale.per_process_ops * will-it-scale/mmap2-performance 107327 104714 will-it-scale.per_thread_ops 136405 133765 will-it-scale.per_process_ops will-it-scale/open1-performance * will-it-scale/open2-performance 171570 168805 will-it-scale.per_thread_ops 532644 526202 will-it-scale.per_process_ops will-it-scale/page_fault1-performance will-it-scale/page_fault2-performance will-it-scale/page_fault3-performance will-it-scale/pipe1-performance will-it-scale/poll1-performance * will-it-scale/poll2-performance 176134 172848 will-it-scale.per_thread_ops 281361 275053 will-it-scale.per_process_ops will-it-scale/posix_semaphore1-performance will-it-scale/pread1-performance will-it-scale/pread2-performance will-it-scale/pread3-performance will-it-scale/pthread_mutex1-performance will-it-scale/pthread_mutex2-performance will-it-scale/pwrite1-performance will-it-scale/pwrite2-performance will-it-scale/pwrite3-performance * will-it-scale/read1-performance 1190563 1174833 will-it-scale.per_thread_ops * will-it-scale/read2-performance 1105369 1080427 will-it-scale.per_thread_ops will-it-scale/readseek1-performance * will-it-scale/readseek2-performance 261818 259040 will-it-scale.per_thread_ops will-it-scale/readseek3-performance * will-it-scale/sched_yield-performance 2408059 2382034 will-it-scale.per_thread_ops will-it-scale/signal1-performance will-it-scale/unix1-performance will-it-scale/unlink1-performance will-it-scale/unlink2-performance * will-it-scale/write1-performance 976701 961588 will-it-scale.per_thread_ops * will-it-scale/writeseek1-performance 831898 822448 will-it-scale.per_thread_ops * will-it-scale/writeseek2-performance 228248 225065 will-it-scale.per_thread_ops * will-it-scale/writeseek3-performance 226670 224058 will-it-scale.per_thread_ops will-it-scale/context_switch1-performance aim7/performance-fork_test-2000 * aim7/performance-brk_test-3000 74869 76676 aim7.jobs-per-min aim7/performance-disk_cp-3000 aim7/performance-disk_rd-3000 aim7/performance-sieve-3000 aim7/performance-page_test-3000 aim7/performance-creat-clo-3000 aim7/performance-mem_rtns_1-8000 aim7/performance-disk_wrt-8000 aim7/performance-pipe_cpy-8000 aim7/performance-ram_copy-8000 (d) lkp-avoton3: 8 threads Intel(R) Atom(TM) CPU C2750 @ 2.40GHz 16G netperf/ipv4-900s-200%-cs-localhost-TCP_STREAM-performance Signed-off-by: Dietmar Eggemann Signed-off-by: Peter Zijlstra (Intel) Cc: Fengguang Wu Cc: Li Zhijian Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180809135753.21077-1-dietmar.eggemann@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 85ae8488039c..858589b83377 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -39,7 +39,7 @@ SCHED_FEAT(WAKEUP_PREEMPTION, true) SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false) -SCHED_FEAT(LB_BIAS, true) +SCHED_FEAT(LB_BIAS, false) /* * Decrement CPU capacity based on time not spent running tasks -- cgit v1.2.3 From 4a465e3ebbc8004ce4f7f08f6022ee8315a94edf Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Fri, 3 Aug 2018 15:05:38 +0100 Subject: sched/fair: Remove setting task's se->runnable_weight during PELT update A CFS (SCHED_OTHER, SCHED_BATCH or SCHED_IDLE policy) task's se->runnable_weight must always be in sync with its se->load.weight. se->runnable_weight is set to se->load.weight when the task is forked (init_entity_runnable_average()) or reniced (reweight_entity()). There are two cases in set_load_weight() which since they currently only set se->load.weight could lead to a situation in which se->load.weight is different to se->runnable_weight for a CFS task: (1) A task switches to SCHED_IDLE. (2) A SCHED_FIFO, SCHED_RR or SCHED_DEADLINE task which has been reniced (during which only its static priority gets set) switches to SCHED_OTHER or SCHED_BATCH. Set se->runnable_weight to se->load.weight in these two cases to prevent this. This eliminates the need to explicitly set it to se->load.weight during PELT updates in the CFS scheduler fastpath. Signed-off-by: Dietmar Eggemann Signed-off-by: Peter Zijlstra (Intel) Cc: Joel Fernandes Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Patrick Bellasi Cc: Peter Zijlstra Cc: Quentin Perret Cc: Thomas Gleixner Cc: Vincent Guittot Link: http://lkml.kernel.org/r/20180803140538.1178-1-dietmar.eggemann@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 ++ kernel/sched/pelt.c | 6 ------ 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f2caf1bae4a3..56b3c1781276 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -700,6 +700,7 @@ static void set_load_weight(struct task_struct *p, bool update_load) if (idle_policy(p->policy)) { load->weight = scale_load(WEIGHT_IDLEPRIO); load->inv_weight = WMULT_IDLEPRIO; + p->se.runnable_weight = load->weight; return; } @@ -712,6 +713,7 @@ static void set_load_weight(struct task_struct *p, bool update_load) } else { load->weight = scale_load(sched_prio_to_weight[prio]); load->inv_weight = sched_prio_to_wmult[prio]; + p->se.runnable_weight = load->weight; } } diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 48a126486435..90fb5bc12ad4 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -269,9 +269,6 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se) { - if (entity_is_task(se)) - se->runnable_weight = se->load.weight; - if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) { ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); return 1; @@ -282,9 +279,6 @@ int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se) int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se) { - if (entity_is_task(se)) - se->runnable_weight = se->load.weight; - if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq, cfs_rq->curr == se)) { -- cgit v1.2.3 From 37355bdc5a129899f6b245900a8eb944a092f7fd Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 1 Oct 2018 11:05:25 +0100 Subject: sched/numa: Migrate pages to local nodes quicker early in the lifetime of a task Automatic NUMA Balancing uses a multi-stage pass to decide whether a page should migrate to a local node. This filter avoids excessive ping-ponging if a page is shared or used by threads that migrate cross-node frequently. Threads inherit both page tables and the preferred node ID from the parent. This means that threads can trigger hinting faults earlier than a new task which delays scanning for a number of seconds. As it can be load balanced very early in its lifetime there can be an unnecessary delay before it starts migrating thread-local data. This patch migrates private pages faster early in the lifetime of a thread using the sequence counter as an identifier of new tasks. With this patch applied, STREAM performance is the same as 4.17 even though processes are not spread cross-node prematurely. Other workloads showed a mix of minor gains and losses. This is somewhat expected most workloads are not very sensitive to the starting conditions of a process. 4.19.0-rc5 4.19.0-rc5 4.17.0 numab-v1r1 fastmigrate-v1r1 vanilla MB/sec copy 43298.52 ( 0.00%) 47335.46 ( 9.32%) 47219.24 ( 9.06%) MB/sec scale 30115.06 ( 0.00%) 32568.12 ( 8.15%) 32527.56 ( 8.01%) MB/sec add 32825.12 ( 0.00%) 36078.94 ( 9.91%) 35928.02 ( 9.45%) MB/sec triad 32549.52 ( 0.00%) 35935.94 ( 10.40%) 35969.88 ( 10.51%) Signed-off-by: Mel Gorman Reviewed-by: Rik van Riel Acked-by: Peter Zijlstra Cc: Jirka Hladky Cc: Linus Torvalds Cc: Linux-MM Cc: Srikar Dronamraju Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20181001100525.29789-3-mgorman@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 25c7c7e09cbd..7fc4a371bdd2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1392,6 +1392,17 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, int last_cpupid, this_cpupid; this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); + last_cpupid = page_cpupid_xchg_last(page, this_cpupid); + + /* + * Allow first faults or private faults to migrate immediately early in + * the lifetime of a task. The magic number 4 is based on waiting for + * two full passes of the "multi-stage node selection" test that is + * executed below. + */ + if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) && + (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) + return true; /* * Multi-stage node selection is used in conjunction with a periodic @@ -1410,7 +1421,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, * This quadric squishes small probabilities, making it less likely we * act on an unlikely task<->page relation. */ - last_cpupid = page_cpupid_xchg_last(page, this_cpupid); if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != dst_nid) return false; -- cgit v1.2.3 From b0584ea66d73919cbf5878a3420a837f06ab8396 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 2 Oct 2018 02:41:53 +0000 Subject: bpf: don't accept cgroup local storage with zero value size Explicitly forbid creating cgroup local storage maps with zero value size, as it makes no sense and might even cause a panic. Reported-by: syzbot+18628320d3b14a5c459c@syzkaller.appspotmail.com Signed-off-by: Roman Gushchin Cc: Alexei Starovoitov Cc: Daniel Borkmann Signed-off-by: Daniel Borkmann --- kernel/bpf/local_storage.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 94126cbffc88..830d7f095748 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -195,6 +195,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) return ERR_PTR(-EINVAL); + if (attr->value_size == 0) + return ERR_PTR(-EINVAL); + if (attr->value_size > PAGE_SIZE) return ERR_PTR(-E2BIG); -- cgit v1.2.3 From f92b070f2dc89a8ff1a0cc8b608e20abef894c7d Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Thu, 13 Sep 2018 14:34:06 +0200 Subject: printk: Do not miss new messages when replaying the log The variable "exclusive_console" is used to reply all existing messages on a newly registered console. It is cleared when all messages are out. The problem is that new messages might appear in the meantime. These are then visible only on the exclusive console. The obvious solution is to clear "exclusive_console" after we replay all messages that were already proceed before we started the reply. Reported-by: Sergey Senozhatsky Link: http://lkml.kernel.org/r/20180913123406.14378-1-pmladek@suse.com To: Steven Rostedt Cc: Peter Zijlstra Cc: Sergey Senozhatsky Cc: linux-kernel@vger.kernel.org Acked-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 9bf5404397e0..cfaa211a8b54 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -423,6 +423,7 @@ static u32 log_next_idx; /* the next printk record to write to the console */ static u64 console_seq; static u32 console_idx; +static u64 exclusive_console_stop_seq; /* the next printk record to read after the last 'clear' command */ static u64 clear_seq; @@ -2009,6 +2010,7 @@ static u64 syslog_seq; static u32 syslog_idx; static u64 console_seq; static u32 console_idx; +static u64 exclusive_console_stop_seq; static u64 log_first_seq; static u32 log_first_idx; static u64 log_next_seq; @@ -2376,6 +2378,12 @@ skip: goto skip; } + /* Output to all consoles once old messages replayed. */ + if (unlikely(exclusive_console && + console_seq >= exclusive_console_stop_seq)) { + exclusive_console = NULL; + } + len += msg_print_text(msg, console_msg_format & MSG_FORMAT_SYSLOG, text + len, @@ -2418,10 +2426,6 @@ skip: console_locked = 0; - /* Release the exclusive_console once it is used */ - if (unlikely(exclusive_console)) - exclusive_console = NULL; - raw_spin_unlock(&logbuf_lock); up_console_sem(); @@ -2706,6 +2710,7 @@ void register_console(struct console *newcon) * the already-registered consoles. */ exclusive_console = newcon; + exclusive_console_stop_seq = console_seq; } console_unlock(); console_sysfs_notify(); -- cgit v1.2.3 From 884e370ea88c109a3b982f4eb9ecd82510a3a1fe Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Fri, 28 Sep 2018 18:53:04 +0900 Subject: printk: CON_PRINTBUFFER console registration is a bit racy CON_PRINTBUFFER console registration requires us to do several preparation steps: - Rollback console_seq to replay logbuf messages which were already seen on other consoles; - Set exclusive_console flag so console_unlock() will ->write() logbuf messages only to the exclusive_console driver. The way we do it, however, is a bit racy logbuf_lock_irqsave(flags); console_seq = syslog_seq; console_idx = syslog_idx; logbuf_unlock_irqrestore(flags); << preemption enabled << irqs enabled exclusive_console = newcon; console_unlock(); We rollback console_seq under logbuf_lock with IRQs disabled, but we set exclusive_console with local IRQs enabled and logbuf unlocked. If the system oops-es or panic-s before we set exclusive_console - and given that we have IRQs and preemption enabled there is such a possibility - we will re-play all logbuf messages to every registered console, which may be a bit annoying and time consuming. Move exclusive_console assignment to the same IRQs-disabled and logbuf_lock-protected section where we rollback console_seq. Link: http://lkml.kernel.org/r/20180928095304.9972-1-sergey.senozhatsky@gmail.com To: Steven Rostedt Cc: Sergey Senozhatsky Cc: linux-kernel@vger.kernel.org Signed-off-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index cfaa211a8b54..c5b568c2d167 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2703,14 +2703,18 @@ void register_console(struct console *newcon) logbuf_lock_irqsave(flags); console_seq = syslog_seq; console_idx = syslog_idx; - logbuf_unlock_irqrestore(flags); /* * We're about to replay the log buffer. Only do this to the * just-registered console to avoid excessive message spam to * the already-registered consoles. + * + * Set exclusive_console with disabled interrupts to reduce + * race window with eventual console_flush_on_panic() that + * ignores console_lock. */ exclusive_console = newcon; exclusive_console_stop_seq = console_seq; + logbuf_unlock_irqrestore(flags); } console_unlock(); console_sysfs_notify(); -- cgit v1.2.3 From f3709f69b7c5cba6323cc03c29b64293b93be817 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:29 -0700 Subject: bpf: Add iterator for spilled registers Add this iterator for spilled registers, it concentrates the details of how to get the current frame's spilled registers into a single macro while clarifying the intention of the code which is calling the macro. Signed-off-by: Joe Stringer Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 11 +++++++++++ kernel/bpf/verifier.c | 16 +++++++--------- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b42b60a83e19..d0e7f97e8b60 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -131,6 +131,17 @@ struct bpf_verifier_state { u32 curframe; }; +#define bpf_get_spilled_reg(slot, frame) \ + (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ + (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ + ? &frame->stack[slot].spilled_ptr : NULL) + +/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ +#define bpf_for_each_spilled_reg(iter, frame, reg) \ + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ + iter < frame->allocated_stack / BPF_REG_SIZE; \ + iter++, reg = bpf_get_spilled_reg(iter, frame)) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a8cc83a970d1..9c82d8f58085 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2252,10 +2252,9 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, if (reg_is_pkt_pointer_any(®s[i])) mark_reg_unknown(env, regs, i); - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) continue; - reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } @@ -3395,10 +3394,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) continue; - reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } @@ -3643,7 +3641,7 @@ static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; - struct bpf_reg_state *regs = state->regs; + struct bpf_reg_state *reg, *regs = state->regs; u32 id = regs[regno].id; int i, j; @@ -3652,8 +3650,8 @@ static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; - for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } -- cgit v1.2.3 From aad2eeaf46973a0968a75640cd1f8f1c650322a0 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:30 -0700 Subject: bpf: Simplify ptr_min_max_vals adjustment An upcoming commit will add another two pointer types that need very similar behaviour, so generalise this function now. Signed-off-by: Joe Stringer Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 22 ++++++++++------------ tools/testing/selftests/bpf/test_verifier.c | 14 +++++++------- 2 files changed, 17 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9c82d8f58085..abf567200574 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2669,20 +2669,18 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return -EACCES; } - if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { - verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", - dst); - return -EACCES; - } - if (ptr_reg->type == CONST_PTR_TO_MAP) { - verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", - dst); + switch (ptr_reg->type) { + case PTR_TO_MAP_VALUE_OR_NULL: + verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", + dst, reg_type_str[ptr_reg->type]); return -EACCES; - } - if (ptr_reg->type == PTR_TO_PACKET_END) { - verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", - dst); + case CONST_PTR_TO_MAP: + case PTR_TO_PACKET_END: + verbose(env, "R%d pointer arithmetic on %s prohibited\n", + dst, reg_type_str[ptr_reg->type]); return -EACCES; + default: + break; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c7d25f23baf9..a90be44f61e0 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3638,7 +3638,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END", + .errstr = "R3 pointer arithmetic on pkt_end", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -4896,7 +4896,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 4 }, - .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL", + .errstr = "R4 pointer arithmetic on map_value_or_null", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, @@ -4917,7 +4917,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 4 }, - .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL", + .errstr = "R4 pointer arithmetic on map_value_or_null", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, @@ -4938,7 +4938,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 4 }, - .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL", + .errstr = "R4 pointer arithmetic on map_value_or_null", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, @@ -7253,7 +7253,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map_in_map = { 3 }, - .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited", + .errstr = "R1 pointer arithmetic on map_ptr prohibited", .result = REJECT, }, { @@ -8927,7 +8927,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END", + .errstr = "R3 pointer arithmetic on pkt_end", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, }, @@ -8946,7 +8946,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END", + .errstr = "R3 pointer arithmetic on pkt_end", .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, }, -- cgit v1.2.3 From 9d2be44a7f33d5ec4fbd3368317bcf5f404bb8f7 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:31 -0700 Subject: bpf: Reuse canonical string formatter for ctx errs The array "reg_type_str" provides canonical formatting of register types, however a couple of places would previously check whether a register represented the context and write the name "context" directly. An upcoming commit will add another pointer type to these statements, so to provide more accurate error messages in the verifier, update these error messages to use "reg_type_str" instead. Signed-off-by: Joe Stringer Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 7 +++---- tools/testing/selftests/bpf/test_verifier.c | 10 +++++----- 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index abf567200574..8b4e70eeced2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1763,8 +1763,7 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", - insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ? - "context" : "packet"); + insn->dst_reg, reg_type_str[insn->dst_reg]); return -EACCES; } @@ -4871,8 +4870,8 @@ static int do_check(struct bpf_verifier_env *env) return err; if (is_ctx_reg(env, insn->dst_reg)) { - verbose(env, "BPF_ST stores into R%d context is not allowed\n", - insn->dst_reg); + verbose(env, "BPF_ST stores into R%d %s is not allowed\n", + insn->dst_reg, reg_type_str[insn->dst_reg]); return -EACCES; } diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index a90be44f61e0..6e0b3f148cdb 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3276,7 +3276,7 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_ST stores into R1 context is not allowed", + .errstr = "BPF_ST stores into R1 inv is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -3288,7 +3288,7 @@ static struct bpf_test tests[] = { BPF_REG_0, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_XADD stores into R1 context is not allowed", + .errstr = "BPF_XADD stores into R1 inv is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -5266,7 +5266,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R2 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 context is not allowed", + .errstr = "BPF_XADD stores into R1 inv is not allowed", }, { "leak pointer into ctx 2", @@ -5281,7 +5281,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R10 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 context is not allowed", + .errstr = "BPF_XADD stores into R1 inv is not allowed", }, { "leak pointer into ctx 3", @@ -12230,7 +12230,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "BPF_XADD stores into R2 packet", + .errstr = "BPF_XADD stores into R2 ctx", .prog_type = BPF_PROG_TYPE_XDP, }, { -- cgit v1.2.3 From 840b9615d6e9d134178b4dd4f3c30aa30643a379 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:32 -0700 Subject: bpf: Generalize ptr_or_null regs check This check will be reused by an upcoming commit for conditional jump checks for sockets. Refactor it a bit to simplify the later commit. Signed-off-by: Joe Stringer Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 43 +++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8b4e70eeced2..98b218bd46e8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -249,6 +249,11 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type) type == PTR_TO_PACKET_META; } +static bool reg_type_may_be_null(enum bpf_reg_type type) +{ + return type == PTR_TO_MAP_VALUE_OR_NULL; +} + /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", @@ -3599,12 +3604,10 @@ static void reg_combine_min_max(struct bpf_reg_state *true_src, } } -static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, - bool is_null) +static void mark_ptr_or_null_reg(struct bpf_reg_state *reg, u32 id, + bool is_null) { - struct bpf_reg_state *reg = ®s[regno]; - - if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { + if (reg_type_may_be_null(reg->type) && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. @@ -3617,11 +3620,13 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, } if (is_null) { reg->type = SCALAR_VALUE; - } else if (reg->map_ptr->inner_map_meta) { - reg->type = CONST_PTR_TO_MAP; - reg->map_ptr = reg->map_ptr->inner_map_meta; - } else { - reg->type = PTR_TO_MAP_VALUE; + } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { + if (reg->map_ptr->inner_map_meta) { + reg->type = CONST_PTR_TO_MAP; + reg->map_ptr = reg->map_ptr->inner_map_meta; + } else { + reg->type = PTR_TO_MAP_VALUE; + } } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances @@ -3634,8 +3639,8 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ -static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, - bool is_null) +static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, + bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg, *regs = state->regs; @@ -3643,14 +3648,14 @@ static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, int i, j; for (i = 0; i < MAX_BPF_REG; i++) - mark_map_reg(regs, i, id, is_null); + mark_ptr_or_null_reg(®s[i], id, is_null); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; - mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); + mark_ptr_or_null_reg(reg, id, is_null); } } } @@ -3852,12 +3857,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && - dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { - /* Mark all identical map registers in each branch as either + reg_type_may_be_null(dst_reg->type)) { + /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ - mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); - mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); + mark_ptr_or_null_regs(this_branch, insn->dst_reg, + opcode == BPF_JNE); + mark_ptr_or_null_regs(other_branch, insn->dst_reg, + opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { -- cgit v1.2.3 From c64b7983288e636356f7f5f652de4813e1cfedac Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:33 -0700 Subject: bpf: Add PTR_TO_SOCKET verifier type Teach the verifier a little bit about a new type of pointer, a PTR_TO_SOCKET. This pointer type is accessed from BPF through the 'struct bpf_sock' structure. Signed-off-by: Joe Stringer Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 34 ++++++++++++ include/linux/bpf_verifier.h | 2 + kernel/bpf/verifier.c | 120 ++++++++++++++++++++++++++++++++++++++----- net/core/filter.c | 30 ++++++----- 4 files changed, 160 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 018299a595c8..027697b6a22f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -154,6 +154,7 @@ enum bpf_arg_type { ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */ }; /* type of values returned from helper functions */ @@ -162,6 +163,7 @@ enum bpf_return_type { RET_VOID, /* function doesn't return anything */ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ + RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -213,6 +215,8 @@ enum bpf_reg_type { PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET_END, /* skb->data + headlen */ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ + PTR_TO_SOCKET, /* reg points to struct bpf_sock */ + PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ }; /* The information passed from prog-specific *_is_valid_access @@ -343,6 +347,11 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void); typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, + u32 *target_size); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); @@ -836,4 +845,29 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto; void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#if defined(CONFIG_NET) +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); +#else +static inline bool bpf_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} +static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} +#endif + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d0e7f97e8b60..a411363098a5 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -58,6 +58,8 @@ struct bpf_reg_state { * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we * came from, when one is tested for != NULL. + * For PTR_TO_SOCKET this is used to share which pointers retain the + * same reference to the socket, to determine proper reference freeing. */ u32 id; /* For scalar types (SCALAR_VALUE), this represents our knowledge of diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 98b218bd46e8..f86386c9affd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -80,8 +80,8 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register - * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer - * types recognized by check_mem_access() function. + * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are + * four pointer types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. @@ -267,6 +267,8 @@ static const char * const reg_type_str[] = { [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", [PTR_TO_FLOW_KEYS] = "flow_keys", + [PTR_TO_SOCKET] = "sock", + [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", }; static char slot_type_char[] = { @@ -973,6 +975,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case CONST_PTR_TO_MAP: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: return true; default: return false; @@ -1341,6 +1345,28 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off, return 0; } +static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, + int size, enum bpf_access_type t) +{ + struct bpf_reg_state *regs = cur_regs(env); + struct bpf_reg_state *reg = ®s[regno]; + struct bpf_insn_access_aux info; + + if (reg->smin_value < 0) { + verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", + regno); + return -EACCES; + } + + if (!bpf_sock_is_valid_access(off, size, t, &info)) { + verbose(env, "invalid bpf_sock access off=%d size=%d\n", + off, size); + return -EACCES; + } + + return 0; +} + static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { @@ -1459,6 +1485,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, */ strict = true; break; + case PTR_TO_SOCKET: + pointer_desc = "sock "; + break; default: break; } @@ -1726,6 +1755,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn err = check_flow_keys_access(env, off, size); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_SOCKET) { + if (t == BPF_WRITE) { + verbose(env, "cannot write into socket\n"); + return -EACCES; + } + err = check_sock_access(env, regno, off, size, t); + if (!err && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -1948,6 +1985,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, err = check_ctx_reg(env, reg, regno); if (err < 0) return err; + } else if (arg_type == ARG_PTR_TO_SOCKET) { + expected_type = PTR_TO_SOCKET; + if (type != expected_type) + goto err_type; } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be @@ -2543,6 +2584,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; + } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; + regs[BPF_REG_0].id = ++env->id_gen; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); @@ -2680,6 +2725,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return -EACCES; case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; @@ -3627,6 +3674,8 @@ static void mark_ptr_or_null_reg(struct bpf_reg_state *reg, u32 id, } else { reg->type = PTR_TO_MAP_VALUE; } + } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { + reg->type = PTR_TO_SOCKET; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances @@ -4402,6 +4451,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: /* Only valid matches are exact, which memcmp() above * would have accepted */ @@ -4679,6 +4730,37 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return 0; } +/* Return true if it's OK to have the same insn return a different type. */ +static bool reg_type_mismatch_ok(enum bpf_reg_type type) +{ + switch (type) { + case PTR_TO_CTX: + case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: + return false; + default: + return true; + } +} + +/* If an instruction was previously used with particular pointer types, then we + * need to be careful to avoid cases such as the below, where it may be ok + * for one branch accessing the pointer, but not ok for the other branch: + * + * R1 = sock_ptr + * goto X; + * ... + * R1 = some_other_valid_ptr; + * goto X; + * ... + * R2 = *(u32 *)(R1 + 0); + */ +static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) +{ + return src != prev && (!reg_type_mismatch_ok(src) || + !reg_type_mismatch_ok(prev)); +} + static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; @@ -4811,9 +4893,7 @@ static int do_check(struct bpf_verifier_env *env) */ *prev_src_type = src_reg_type; - } else if (src_reg_type != *prev_src_type && - (src_reg_type == PTR_TO_CTX || - *prev_src_type == PTR_TO_CTX)) { + } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: @@ -4858,9 +4938,7 @@ static int do_check(struct bpf_verifier_env *env) if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; - } else if (dst_reg_type != *prev_dst_type && - (dst_reg_type == PTR_TO_CTX || - *prev_dst_type == PTR_TO_CTX)) { + } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } @@ -5286,8 +5364,10 @@ static void sanitize_dead_code(struct bpf_verifier_env *env) } } -/* convert load instructions that access fields of 'struct __sk_buff' - * into sequence of instructions that access fields of 'struct sk_buff' +/* convert load instructions that access fields of a context type into a + * sequence of instructions that access fields of the underlying structure: + * struct __sk_buff -> struct sk_buff + * struct bpf_sock_ops -> struct sock */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { @@ -5316,12 +5396,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } } - if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux)) + if (bpf_prog_is_dev_bound(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { + bpf_convert_ctx_access_t convert_ctx_access; + if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || @@ -5363,8 +5445,18 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) continue; } - if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) + switch (env->insn_aux_data[i + delta].ptr_type) { + case PTR_TO_CTX: + if (!ops->convert_ctx_access) + continue; + convert_ctx_access = ops->convert_ctx_access; + break; + case PTR_TO_SOCKET: + convert_ctx_access = bpf_sock_convert_ctx_access; + break; + default: continue; + } ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); @@ -5396,8 +5488,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } target_size = 0; - cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, - &target_size); + cnt = convert_ctx_access(type, insn, insn_buf, env->prog, + &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); diff --git a/net/core/filter.c b/net/core/filter.c index 72db8afb7cb6..b2cb186252e4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5394,23 +5394,29 @@ static bool __sock_filter_check_size(int off, int size, return size == size_default; } -static bool sock_filter_is_valid_access(int off, int size, - enum bpf_access_type type, - const struct bpf_prog *prog, - struct bpf_insn_access_aux *info) +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info) { if (off < 0 || off >= sizeof(struct bpf_sock)) return false; if (off % size != 0) return false; - if (!__sock_filter_check_attach_type(off, type, - prog->expected_attach_type)) - return false; if (!__sock_filter_check_size(off, size, info)) return false; return true; } +static bool sock_filter_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (!bpf_sock_is_valid_access(off, size, type, info)) + return false; + return __sock_filter_check_attach_type(off, type, + prog->expected_attach_type); +} + static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog, int drop_verdict) { @@ -6122,10 +6128,10 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } -static u32 sock_filter_convert_ctx_access(enum bpf_access_type type, - const struct bpf_insn *si, - struct bpf_insn *insn_buf, - struct bpf_prog *prog, u32 *target_size) +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; @@ -7037,7 +7043,7 @@ const struct bpf_prog_ops lwt_seg6local_prog_ops = { const struct bpf_verifier_ops cg_sock_verifier_ops = { .get_func_proto = sock_filter_func_proto, .is_valid_access = sock_filter_is_valid_access, - .convert_ctx_access = sock_filter_convert_ctx_access, + .convert_ctx_access = bpf_sock_convert_ctx_access, }; const struct bpf_prog_ops cg_sock_prog_ops = { -- cgit v1.2.3 From 84dbf3507349696b505b6a500722538b0683e4ac Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:34 -0700 Subject: bpf: Macrofy stack state copy An upcoming commit will need very similar copy/realloc boilerplate, so refactor the existing stack copy/realloc functions into macros to simplify it. Signed-off-by: Joe Stringer Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 106 ++++++++++++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 46 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f86386c9affd..11e982381061 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -388,60 +388,74 @@ static void print_verifier_state(struct bpf_verifier_env *env, verbose(env, "\n"); } -static int copy_stack_state(struct bpf_func_state *dst, - const struct bpf_func_state *src) -{ - if (!src->stack) - return 0; - if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { - /* internal bug, make state invalid to reject the program */ - memset(dst, 0, sizeof(*dst)); - return -EFAULT; - } - memcpy(dst->stack, src->stack, - sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); - return 0; -} +#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ +static int copy_##NAME##_state(struct bpf_func_state *dst, \ + const struct bpf_func_state *src) \ +{ \ + if (!src->FIELD) \ + return 0; \ + if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ + /* internal bug, make state invalid to reject the program */ \ + memset(dst, 0, sizeof(*dst)); \ + return -EFAULT; \ + } \ + memcpy(dst->FIELD, src->FIELD, \ + sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ + return 0; \ +} +/* copy_stack_state() */ +COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) +#undef COPY_STATE_FN + +#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ +static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ + bool copy_old) \ +{ \ + u32 old_size = state->COUNT; \ + struct bpf_##NAME##_state *new_##FIELD; \ + int slot = size / SIZE; \ + \ + if (size <= old_size || !size) { \ + if (copy_old) \ + return 0; \ + state->COUNT = slot * SIZE; \ + if (!size && old_size) { \ + kfree(state->FIELD); \ + state->FIELD = NULL; \ + } \ + return 0; \ + } \ + new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ + GFP_KERNEL); \ + if (!new_##FIELD) \ + return -ENOMEM; \ + if (copy_old) { \ + if (state->FIELD) \ + memcpy(new_##FIELD, state->FIELD, \ + sizeof(*new_##FIELD) * (old_size / SIZE)); \ + memset(new_##FIELD + old_size / SIZE, 0, \ + sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ + } \ + state->COUNT = slot * SIZE; \ + kfree(state->FIELD); \ + state->FIELD = new_##FIELD; \ + return 0; \ +} +/* realloc_stack_state() */ +REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) +#undef REALLOC_STATE_FN /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. - * Note there is a non-zero parent pointer inside each reg of bpf_verifier_state - * which this function copies over. It points to corresponding reg in previous - * bpf_verifier_state which is never reallocated + * Note there is a non-zero 'parent' pointer inside bpf_verifier_state + * which realloc_stack_state() copies over. It points to previous + * bpf_verifier_state which is never reallocated. */ static int realloc_func_state(struct bpf_func_state *state, int size, bool copy_old) { - u32 old_size = state->allocated_stack; - struct bpf_stack_state *new_stack; - int slot = size / BPF_REG_SIZE; - - if (size <= old_size || !size) { - if (copy_old) - return 0; - state->allocated_stack = slot * BPF_REG_SIZE; - if (!size && old_size) { - kfree(state->stack); - state->stack = NULL; - } - return 0; - } - new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), - GFP_KERNEL); - if (!new_stack) - return -ENOMEM; - if (copy_old) { - if (state->stack) - memcpy(new_stack, state->stack, - sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); - memset(new_stack + old_size / BPF_REG_SIZE, 0, - sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); - } - state->allocated_stack = slot * BPF_REG_SIZE; - kfree(state->stack); - state->stack = new_stack; - return 0; + return realloc_stack_state(state, size, copy_old); } static void free_func_state(struct bpf_func_state *state) -- cgit v1.2.3 From fd978bf7fd312581a7ca454a991f0ffb34c4204b Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:35 -0700 Subject: bpf: Add reference tracking to verifier Allow helper functions to acquire a reference and return it into a register. Specific pointer types such as the PTR_TO_SOCKET will implicitly represent such a reference. The verifier must ensure that these references are released exactly once in each path through the program. To achieve this, this commit assigns an id to the pointer and tracks it in the 'bpf_func_state', then when the function or program exits, verifies that all of the acquired references have been freed. When the pointer is passed to a function that frees the reference, it is removed from the 'bpf_func_state` and all existing copies of the pointer in registers are marked invalid. Signed-off-by: Joe Stringer Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 24 +++- kernel/bpf/verifier.c | 306 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 308 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index a411363098a5..7b6fd2ab3263 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -104,6 +104,17 @@ struct bpf_stack_state { u8 slot_type[BPF_REG_SIZE]; }; +struct bpf_reference_state { + /* Track each reference created with a unique id, even if the same + * instruction creates the reference multiple times (eg, via CALL). + */ + int id; + /* Instruction where the allocation of this reference occurred. This + * is used purely to inform the user of a reference leak. + */ + int insn_idx; +}; + /* state of the program: * type of all registers and stack info */ @@ -121,7 +132,9 @@ struct bpf_func_state { */ u32 subprogno; - /* should be second to last. See copy_func_state() */ + /* The following fields should be last. See copy_func_state() */ + int acquired_refs; + struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; }; @@ -217,11 +230,16 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...); -static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; - return cur->frame[cur->curframe]->regs; + return cur->frame[cur->curframe]; +} + +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +{ + return cur_func(env)->regs; } int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 11e982381061..cd0d8bc00bd1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1,5 +1,6 @@ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook + * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -140,6 +141,18 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. + * + * The following reference types represent a potential reference to a kernel + * resource which, after first being allocated, must be checked and freed by + * the BPF program: + * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET + * + * When the verifier sees a helper call return a reference type, it allocates a + * pointer id for the reference and stores it in the current function state. + * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into + * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type + * passes through a NULL-check conditional. For the branch wherein the state is + * changed to CONST_IMM, the verifier releases the reference. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ @@ -189,6 +202,7 @@ struct bpf_call_arg_meta { int access_size; s64 msize_smax_value; u64 msize_umax_value; + int ptr_id; }; static DEFINE_MUTEX(bpf_verifier_lock); @@ -251,7 +265,42 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type) static bool reg_type_may_be_null(enum bpf_reg_type type) { - return type == PTR_TO_MAP_VALUE_OR_NULL; + return type == PTR_TO_MAP_VALUE_OR_NULL || + type == PTR_TO_SOCKET_OR_NULL; +} + +static bool type_is_refcounted(enum bpf_reg_type type) +{ + return type == PTR_TO_SOCKET; +} + +static bool type_is_refcounted_or_null(enum bpf_reg_type type) +{ + return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL; +} + +static bool reg_is_refcounted(const struct bpf_reg_state *reg) +{ + return type_is_refcounted(reg->type); +} + +static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg) +{ + return type_is_refcounted_or_null(reg->type); +} + +static bool arg_type_is_refcounted(enum bpf_arg_type type) +{ + return type == ARG_PTR_TO_SOCKET; +} + +/* Determine whether the function releases some resources allocated by another + * function call. The first reference type argument will be assumed to be + * released by release_reference(). + */ +static bool is_release_function(enum bpf_func_id func_id) +{ + return false; } /* string representation of 'enum bpf_reg_type' */ @@ -385,6 +434,12 @@ static void print_verifier_state(struct bpf_verifier_env *env, else verbose(env, "=%s", types_buf); } + if (state->acquired_refs && state->refs[0].id) { + verbose(env, " refs=%d", state->refs[0].id); + for (i = 1; i < state->acquired_refs; i++) + if (state->refs[i].id) + verbose(env, ",%d", state->refs[i].id); + } verbose(env, "\n"); } @@ -403,6 +458,8 @@ static int copy_##NAME##_state(struct bpf_func_state *dst, \ sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ return 0; \ } +/* copy_reference_state() */ +COPY_STATE_FN(reference, acquired_refs, refs, 1) /* copy_stack_state() */ COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef COPY_STATE_FN @@ -441,6 +498,8 @@ static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ state->FIELD = new_##FIELD; \ return 0; \ } +/* realloc_reference_state() */ +REALLOC_STATE_FN(reference, acquired_refs, refs, 1) /* realloc_stack_state() */ REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) #undef REALLOC_STATE_FN @@ -452,16 +511,89 @@ REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) * which realloc_stack_state() copies over. It points to previous * bpf_verifier_state which is never reallocated. */ -static int realloc_func_state(struct bpf_func_state *state, int size, - bool copy_old) +static int realloc_func_state(struct bpf_func_state *state, int stack_size, + int refs_size, bool copy_old) { - return realloc_stack_state(state, size, copy_old); + int err = realloc_reference_state(state, refs_size, copy_old); + if (err) + return err; + return realloc_stack_state(state, stack_size, copy_old); +} + +/* Acquire a pointer id from the env and update the state->refs to include + * this new pointer reference. + * On success, returns a valid pointer id to associate with the register + * On failure, returns a negative errno. + */ +static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) +{ + struct bpf_func_state *state = cur_func(env); + int new_ofs = state->acquired_refs; + int id, err; + + err = realloc_reference_state(state, state->acquired_refs + 1, true); + if (err) + return err; + id = ++env->id_gen; + state->refs[new_ofs].id = id; + state->refs[new_ofs].insn_idx = insn_idx; + + return id; +} + +/* release function corresponding to acquire_reference_state(). Idempotent. */ +static int __release_reference_state(struct bpf_func_state *state, int ptr_id) +{ + int i, last_idx; + + if (!ptr_id) + return -EFAULT; + + last_idx = state->acquired_refs - 1; + for (i = 0; i < state->acquired_refs; i++) { + if (state->refs[i].id == ptr_id) { + if (last_idx && i != last_idx) + memcpy(&state->refs[i], &state->refs[last_idx], + sizeof(*state->refs)); + memset(&state->refs[last_idx], 0, sizeof(*state->refs)); + state->acquired_refs--; + return 0; + } + } + return -EFAULT; +} + +/* variation on the above for cases where we expect that there must be an + * outstanding reference for the specified ptr_id. + */ +static int release_reference_state(struct bpf_verifier_env *env, int ptr_id) +{ + struct bpf_func_state *state = cur_func(env); + int err; + + err = __release_reference_state(state, ptr_id); + if (WARN_ON_ONCE(err != 0)) + verbose(env, "verifier internal error: can't release reference\n"); + return err; +} + +static int transfer_reference_state(struct bpf_func_state *dst, + struct bpf_func_state *src) +{ + int err = realloc_reference_state(dst, src->acquired_refs, false); + if (err) + return err; + err = copy_reference_state(dst, src); + if (err) + return err; + return 0; } static void free_func_state(struct bpf_func_state *state) { if (!state) return; + kfree(state->refs); kfree(state->stack); kfree(state); } @@ -487,10 +619,14 @@ static int copy_func_state(struct bpf_func_state *dst, { int err; - err = realloc_func_state(dst, src->allocated_stack, false); + err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, + false); + if (err) + return err; + memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); + err = copy_reference_state(dst, src); if (err) return err; - memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack)); return copy_stack_state(dst, src); } @@ -1015,7 +1151,7 @@ static int check_stack_write(struct bpf_verifier_env *env, enum bpf_reg_type type; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), - true); + state->acquired_refs, true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, @@ -1399,7 +1535,8 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = cur_regs(env) + regno; - return reg->type == PTR_TO_CTX; + return reg->type == PTR_TO_CTX || + reg->type == PTR_TO_SOCKET; } static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) @@ -2003,6 +2140,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, expected_type = PTR_TO_SOCKET; if (type != expected_type) goto err_type; + if (meta->ptr_id || !reg->id) { + verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n", + meta->ptr_id, reg->id); + return -EFAULT; + } + meta->ptr_id = reg->id; } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be @@ -2292,10 +2435,32 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn) return true; } +static bool check_refcount_ok(const struct bpf_func_proto *fn) +{ + int count = 0; + + if (arg_type_is_refcounted(fn->arg1_type)) + count++; + if (arg_type_is_refcounted(fn->arg2_type)) + count++; + if (arg_type_is_refcounted(fn->arg3_type)) + count++; + if (arg_type_is_refcounted(fn->arg4_type)) + count++; + if (arg_type_is_refcounted(fn->arg5_type)) + count++; + + /* We only support one arg being unreferenced at the moment, + * which is sufficient for the helper functions we have right now. + */ + return count <= 1; +} + static int check_func_proto(const struct bpf_func_proto *fn) { return check_raw_mode_ok(fn) && - check_arg_pair_ok(fn) ? 0 : -EINVAL; + check_arg_pair_ok(fn) && + check_refcount_ok(fn) ? 0 : -EINVAL; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] @@ -2328,12 +2493,45 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) __clear_all_pkt_pointers(env, vstate->frame[i]); } +static void release_reg_references(struct bpf_verifier_env *env, + struct bpf_func_state *state, int id) +{ + struct bpf_reg_state *regs = state->regs, *reg; + int i; + + for (i = 0; i < MAX_BPF_REG; i++) + if (regs[i].id == id) + mark_reg_unknown(env, regs, i); + + bpf_for_each_spilled_reg(i, state, reg) { + if (!reg) + continue; + if (reg_is_refcounted(reg) && reg->id == id) + __mark_reg_unknown(reg); + } +} + +/* The pointer with the specified id has released its reference to kernel + * resources. Identify all copies of the same pointer and clear the reference. + */ +static int release_reference(struct bpf_verifier_env *env, + struct bpf_call_arg_meta *meta) +{ + struct bpf_verifier_state *vstate = env->cur_state; + int i; + + for (i = 0; i <= vstate->curframe; i++) + release_reg_references(env, vstate->frame[i], meta->ptr_id); + + return release_reference_state(env, meta->ptr_id); +} + static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; - int i, subprog, target_insn; + int i, err, subprog, target_insn; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", @@ -2371,6 +2569,11 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); + /* Transfer references to the callee */ + err = transfer_reference_state(callee, caller); + if (err) + return err; + /* copy r1 - r5 args that callee can access. The copy includes parent * pointers, which connects us up to the liveness chain */ @@ -2403,6 +2606,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; + int err; callee = state->frame[state->curframe]; r0 = &callee->regs[BPF_REG_0]; @@ -2422,6 +2626,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; + /* Transfer references to the caller */ + err = transfer_reference_state(caller, callee); + if (err) + return err; + *insn_idx = callee->callsite + 1; if (env->log.level) { verbose(env, "returning from callee:\n"); @@ -2478,6 +2687,18 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, return 0; } +static int check_reference_leak(struct bpf_verifier_env *env) +{ + struct bpf_func_state *state = cur_func(env); + int i; + + for (i = 0; i < state->acquired_refs; i++) { + verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", + state->refs[i].id, state->refs[i].insn_idx); + } + return state->acquired_refs ? -EINVAL : 0; +} + static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; @@ -2556,6 +2777,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return err; } + if (func_id == BPF_FUNC_tail_call) { + err = check_reference_leak(env); + if (err) { + verbose(env, "tail_call would lead to reference leak\n"); + return err; + } + } else if (is_release_function(func_id)) { + err = release_reference(env, &meta); + if (err) + return err; + } + regs = cur_regs(env); /* check that flags argument in get_local_storage(map, flags) is 0, @@ -2599,9 +2832,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { + int id = acquire_reference_state(env, insn_idx); + if (id < 0) + return id; mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; - regs[BPF_REG_0].id = ++env->id_gen; + regs[BPF_REG_0].id = id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); @@ -3665,7 +3901,8 @@ static void reg_combine_min_max(struct bpf_reg_state *true_src, } } -static void mark_ptr_or_null_reg(struct bpf_reg_state *reg, u32 id, +static void mark_ptr_or_null_reg(struct bpf_func_state *state, + struct bpf_reg_state *reg, u32 id, bool is_null) { if (reg_type_may_be_null(reg->type) && reg->id == id) { @@ -3691,11 +3928,13 @@ static void mark_ptr_or_null_reg(struct bpf_reg_state *reg, u32 id, } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { reg->type = PTR_TO_SOCKET; } - /* We don't need id from this point onwards anymore, thus we - * should better reset it, so that state pruning has chances - * to take effect. - */ - reg->id = 0; + if (is_null || !reg_is_refcounted(reg)) { + /* We don't need id from this point onwards anymore, + * thus we should better reset it, so that state + * pruning has chances to take effect. + */ + reg->id = 0; + } } } @@ -3710,15 +3949,18 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, u32 id = regs[regno].id; int i, j; + if (reg_is_refcounted_or_null(®s[regno]) && is_null) + __release_reference_state(state, id); + for (i = 0; i < MAX_BPF_REG; i++) - mark_ptr_or_null_reg(®s[i], id, is_null); + mark_ptr_or_null_reg(state, ®s[i], id, is_null); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; bpf_for_each_spilled_reg(i, state, reg) { if (!reg) continue; - mark_ptr_or_null_reg(reg, id, is_null); + mark_ptr_or_null_reg(state, reg, id, is_null); } } } @@ -4050,6 +4292,16 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) if (err) return err; + /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as + * gen_ld_abs() may terminate the program at runtime, leading to + * reference leak. + */ + err = check_reference_leak(env); + if (err) { + verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); + return err; + } + if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); @@ -4542,6 +4794,14 @@ static bool stacksafe(struct bpf_func_state *old, return true; } +static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) +{ + if (old->acquired_refs != cur->acquired_refs) + return false; + return !memcmp(old->refs, cur->refs, + sizeof(*old->refs) * old->acquired_refs); +} + /* compare two verifier states * * all states stored in state_list are known to be valid, since @@ -4587,6 +4847,9 @@ static bool func_states_equal(struct bpf_func_state *old, if (!stacksafe(old, cur, idmap)) goto out_free; + + if (!refsafe(old, cur)) + goto out_free; ret = true; out_free: kfree(idmap); @@ -4868,6 +5131,7 @@ static int do_check(struct bpf_verifier_env *env) regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; + if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) @@ -5032,6 +5296,10 @@ static int do_check(struct bpf_verifier_env *env) continue; } + err = check_reference_leak(env); + if (err) + return err; + /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time -- cgit v1.2.3 From 6acc9b432e6714d72d7d77ec7c27f6f8358d0c71 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Tue, 2 Oct 2018 13:35:36 -0700 Subject: bpf: Add helper to retrieve socket in BPF This patch adds new BPF helper functions, bpf_sk_lookup_tcp() and bpf_sk_lookup_udp() which allows BPF programs to find out if there is a socket listening on this host, and returns a socket pointer which the BPF program can then access to determine, for instance, whether to forward or drop traffic. bpf_sk_lookup_xxx() may take a reference on the socket, so when a BPF program makes use of this function, it must subsequently pass the returned pointer into the newly added sk_release() to return the reference. By way of example, the following pseudocode would filter inbound connections at XDP if there is no corresponding service listening for the traffic: struct bpf_sock_tuple tuple; struct bpf_sock_ops *sk; populate_tuple(ctx, &tuple); // Extract the 5tuple from the packet sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof tuple, netns, 0); if (!sk) { // Couldn't find a socket listening for this traffic. Drop. return TC_ACT_SHOT; } bpf_sk_release(sk, 0); return TC_ACT_OK; Signed-off-by: Joe Stringer Acked-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 93 +++++++++++++++++- kernel/bpf/verifier.c | 8 +- net/core/filter.c | 151 ++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 93 +++++++++++++++++- tools/testing/selftests/bpf/bpf_helpers.h | 12 +++ 5 files changed, 354 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e2070d819e04..f9187b41dff6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2144,6 +2144,77 @@ union bpf_attr { * request in the skb. * Return * 0 on success, or a negative error in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * int bpf_sk_release(struct bpf_sock *sk) + * Description + * Release the reference held by *sock*. *sock* must be a non-NULL + * pointer that was returned from bpf_sk_lookup_xxx\ (). + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2229,7 +2300,10 @@ union bpf_attr { FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ - FN(skb_ancestor_cgroup_id), + FN(skb_ancestor_cgroup_id), \ + FN(sk_lookup_tcp), \ + FN(sk_lookup_udp), \ + FN(sk_release), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2399,6 +2473,23 @@ struct bpf_sock { */ }; +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cd0d8bc00bd1..73c81bef6ae8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -153,6 +153,12 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type * passes through a NULL-check conditional. For the branch wherein the state is * changed to CONST_IMM, the verifier releases the reference. + * + * For each helper function that allocates a reference, such as + * bpf_sk_lookup_tcp(), there is a corresponding release function, such as + * bpf_sk_release(). When a reference type passes into the release function, + * the verifier also releases the reference. If any unchecked or unreleased + * reference remains at the end of the program, the verifier rejects it. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ @@ -300,7 +306,7 @@ static bool arg_type_is_refcounted(enum bpf_arg_type type) */ static bool is_release_function(enum bpf_func_id func_id) { - return false; + return func_id == BPF_FUNC_sk_release; } /* string representation of 'enum bpf_reg_type' */ diff --git a/net/core/filter.c b/net/core/filter.c index b2cb186252e4..591c698bc517 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -58,13 +58,17 @@ #include #include #include +#include #include #include #include +#include +#include #include #include #include #include +#include #include #include #include @@ -4813,6 +4817,141 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { }; #endif /* CONFIG_IPV6_SEG6_BPF */ +struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, + struct sk_buff *skb, u8 family, u8 proto) +{ + int dif = skb->dev->ifindex; + bool refcounted = false; + struct sock *sk = NULL; + + if (family == AF_INET) { + __be32 src4 = tuple->ipv4.saddr; + __be32 dst4 = tuple->ipv4.daddr; + int sdif = inet_sdif(skb); + + if (proto == IPPROTO_TCP) + sk = __inet_lookup(net, &tcp_hashinfo, skb, 0, + src4, tuple->ipv4.sport, + dst4, tuple->ipv4.dport, + dif, sdif, &refcounted); + else + sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, + dst4, tuple->ipv4.dport, + dif, sdif, &udp_table, skb); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; + struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; + int sdif = inet6_sdif(skb); + + if (proto == IPPROTO_TCP) + sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, + src6, tuple->ipv6.sport, + dst6, tuple->ipv6.dport, + dif, sdif, &refcounted); + else + sk = __udp6_lib_lookup(net, src6, tuple->ipv6.sport, + dst6, tuple->ipv6.dport, + dif, sdif, &udp_table, skb); +#endif + } + + if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { + WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); + sk = NULL; + } + return sk; +} + +/* bpf_sk_lookup performs the core lookup for different types of sockets, + * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. + * Returns the socket as an 'unsigned long' to simplify the casting in the + * callers to satisfy BPF_CALL declarations. + */ +static unsigned long +bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, + u8 proto, u64 netns_id, u64 flags) +{ + struct net *caller_net; + struct sock *sk = NULL; + u8 family = AF_UNSPEC; + struct net *net; + + family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; + if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) + goto out; + + if (skb->dev) + caller_net = dev_net(skb->dev); + else + caller_net = sock_net(skb->sk); + if (netns_id) { + net = get_net_ns_by_id(caller_net, netns_id); + if (unlikely(!net)) + goto out; + sk = sk_lookup(net, tuple, skb, family, proto); + put_net(net); + } else { + net = caller_net; + sk = sk_lookup(net, tuple, skb, family, proto); + } + + if (sk) + sk = sk_to_full_sk(sk); +out: + return (unsigned long) sk; +} + +BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, + struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) +{ + return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); +} + +static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { + .func = bpf_sk_lookup_tcp, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; + +BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, + struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) +{ + return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags); +} + +static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { + .func = bpf_sk_lookup_udp, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; + +BPF_CALL_1(bpf_sk_release, struct sock *, sk) +{ + if (!sock_flag(sk, SOCK_RCU_FREE)) + sock_gen_put(sk); + return 0; +} + +static const struct bpf_func_proto bpf_sk_release_proto = { + .func = bpf_sk_release, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_SOCKET, +}; + bool bpf_helper_changes_pkt_data(void *func) { if (func == bpf_skb_vlan_push || @@ -5019,6 +5158,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_skb_ancestor_cgroup_id: return &bpf_skb_ancestor_cgroup_id_proto; #endif + case BPF_FUNC_sk_lookup_tcp: + return &bpf_sk_lookup_tcp_proto; + case BPF_FUNC_sk_lookup_udp: + return &bpf_sk_lookup_udp_proto; + case BPF_FUNC_sk_release: + return &bpf_sk_release_proto; default: return bpf_base_func_proto(func_id); } @@ -5119,6 +5264,12 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sk_redirect_hash_proto; case BPF_FUNC_get_local_storage: return &bpf_get_local_storage_proto; + case BPF_FUNC_sk_lookup_tcp: + return &bpf_sk_lookup_tcp_proto; + case BPF_FUNC_sk_lookup_udp: + return &bpf_sk_lookup_udp_proto; + case BPF_FUNC_sk_release: + return &bpf_sk_release_proto; default: return bpf_base_func_proto(func_id); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e2070d819e04..f9187b41dff6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2144,6 +2144,77 @@ union bpf_attr { * request in the skb. * Return * 0 on success, or a negative error in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * int bpf_sk_release(struct bpf_sock *sk) + * Description + * Release the reference held by *sock*. *sock* must be a non-NULL + * pointer that was returned from bpf_sk_lookup_xxx\ (). + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2229,7 +2300,10 @@ union bpf_attr { FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ - FN(skb_ancestor_cgroup_id), + FN(skb_ancestor_cgroup_id), \ + FN(sk_lookup_tcp), \ + FN(sk_lookup_udp), \ + FN(sk_release), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2399,6 +2473,23 @@ struct bpf_sock { */ }; +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index e4be7730222d..1d407b3494f9 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -143,6 +143,18 @@ static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) = (void *) BPF_FUNC_skb_cgroup_id; static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) = (void *) BPF_FUNC_skb_ancestor_cgroup_id; +static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, + struct bpf_sock_tuple *tuple, + int size, unsigned int netns_id, + unsigned long long flags) = + (void *) BPF_FUNC_sk_lookup_tcp; +static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, + struct bpf_sock_tuple *tuple, + int size, unsigned int netns_id, + unsigned long long flags) = + (void *) BPF_FUNC_sk_lookup_udp; +static int (*bpf_sk_release)(struct bpf_sock *sk) = + (void *) BPF_FUNC_sk_release; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions -- cgit v1.2.3 From 44318d5b07be7d7cfe718aa22ea3b2577361a0b5 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 2 Oct 2018 16:19:16 -0400 Subject: locking/lockdep: Remove add_chain_cache_classes() The inline function add_chain_cache_classes() is defined, but has no caller. Just remove it. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1538511560-10090-2-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 70 ------------------------------------------------ 1 file changed, 70 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index e406c5fdb41e..fa82d55279fe 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2148,76 +2148,6 @@ static int check_no_collision(struct task_struct *curr, return 1; } -/* - * This is for building a chain between just two different classes, - * instead of adding a new hlock upon current, which is done by - * add_chain_cache(). - * - * This can be called in any context with two classes, while - * add_chain_cache() must be done within the lock owener's context - * since it uses hlock which might be racy in another context. - */ -static inline int add_chain_cache_classes(unsigned int prev, - unsigned int next, - unsigned int irq_context, - u64 chain_key) -{ - struct hlist_head *hash_head = chainhashentry(chain_key); - struct lock_chain *chain; - - /* - * Allocate a new chain entry from the static array, and add - * it to the hash: - */ - - /* - * We might need to take the graph lock, ensure we've got IRQs - * disabled to make this an IRQ-safe lock.. for recursion reasons - * lockdep won't complain about its own locking errors. - */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return 0; - - if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { - if (!debug_locks_off_graph_unlock()) - return 0; - - print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); - dump_stack(); - return 0; - } - - chain = lock_chains + nr_lock_chains++; - chain->chain_key = chain_key; - chain->irq_context = irq_context; - chain->depth = 2; - if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { - chain->base = nr_chain_hlocks; - nr_chain_hlocks += chain->depth; - chain_hlocks[chain->base] = prev - 1; - chain_hlocks[chain->base + 1] = next -1; - } -#ifdef CONFIG_DEBUG_LOCKDEP - /* - * Important for check_no_collision(). - */ - else { - if (!debug_locks_off_graph_unlock()) - return 0; - - print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); - dump_stack(); - return 0; - } -#endif - - hlist_add_head_rcu(&chain->entry, hash_head); - debug_atomic_inc(chain_lookup_misses); - inc_chains(); - - return 1; -} - /* * Adds a dependency chain into chain hashtable. And must be called with * graph_lock held. -- cgit v1.2.3 From 8ee10862476ef8b9e81e5b521205fd5c620b4ffb Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 2 Oct 2018 16:19:17 -0400 Subject: locking/lockdep: Eliminate redundant IRQs check in __lock_acquire() The static __lock_acquire() function has only two callers: 1) lock_acquire() 2) reacquire_held_locks() In lock_acquire(), raw_local_irq_save() is called beforehand. So IRQs must have been disabled. So the check: DEBUG_LOCKS_WARN_ON(!irqs_disabled()) is kind of redundant in this case. So move the above check to reacquire_held_locks() to eliminate redundant code in the lock_acquire() path. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1538511560-10090-3-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index fa82d55279fe..a5d7db558928 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3193,6 +3193,10 @@ static int __lock_is_held(const struct lockdep_map *lock, int read); /* * This gets called for every mutex_lock*()/spin_lock*() operation. * We maintain the dependency maps and validate the locking attempt: + * + * The callers must make sure that IRQs are disabled before calling it, + * otherwise we could get an interrupt which would want to take locks, + * which would end up in lockdep again. */ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, @@ -3210,14 +3214,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (unlikely(!debug_locks)) return 0; - /* - * Lockdep should run with IRQs disabled, otherwise we could - * get an interrupt which would want to take locks, which would - * end up in lockdep and have you got a head-ache already? - */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return 0; - if (!prove_locking || lock->key == &__lockdep_no_validate__) check = 0; @@ -3474,6 +3470,9 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, { struct held_lock *hlock; + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return 0; + for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { if (!__lock_acquire(hlock->instance, hlock_class(hlock)->subclass, -- cgit v1.2.3 From ce52a18db45842f5b992851a552bd7f6acb2241b Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 2 Oct 2018 16:19:18 -0400 Subject: locking/lockdep: Add a faster path in __lock_release() When __lock_release() is called, the most likely unlock scenario is on the innermost lock in the chain. In this case, we can skip some of the checks and provide a faster path to completion. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1538511560-10090-4-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index a5d7db558928..511d30f88bce 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3626,6 +3626,13 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) curr->lockdep_depth = i; curr->curr_chain_key = hlock->prev_chain_key; + /* + * The most likely case is when the unlock is on the innermost + * lock. In this case, we are done! + */ + if (i == depth-1) + return 1; + if (reacquire_held_locks(curr, depth, i + 1)) return 0; @@ -3633,10 +3640,14 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) * We had N bottles of beer on the wall, we drank one, but now * there's not N-1 bottles of beer left on the wall... */ - if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) - return 0; + DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1); - return 1; + /* + * Since reacquire_held_locks() would have called check_chain_key() + * indirectly via __lock_acquire(), we don't need to do it again + * on return. + */ + return 0; } static int __lock_is_held(const struct lockdep_map *lock, int read) -- cgit v1.2.3 From e4a02ed2aaf447fa849e3254bfdb3b9b01e1e520 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 2 Oct 2018 14:48:49 -0700 Subject: locking/ww_mutex: Fix runtime warning in the WW mutex selftest If CONFIG_WW_MUTEX_SELFTEST=y is enabled, booting an image in an arm64 virtual machine results in the following traceback if 8 CPUs are enabled: DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current) WARNING: CPU: 2 PID: 537 at kernel/locking/mutex.c:1033 __mutex_unlock_slowpath+0x1a8/0x2e0 ... Call trace: __mutex_unlock_slowpath() ww_mutex_unlock() test_cycle_work() process_one_work() worker_thread() kthread() ret_from_fork() If requesting b_mutex fails with -EDEADLK, the error variable is reassigned to the return value from calling ww_mutex_lock on a_mutex again. If this call fails, a_mutex is not locked. It is, however, unconditionally unlocked subsequently, causing the reported warning. Fix the problem by using two error variables. With this change, the selftest still fails as follows: cyclic deadlock not resolved, ret[7/8] = -35 However, the traceback is gone. Signed-off-by: Guenter Roeck Cc: Chris Wilson Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Fixes: d1b42b800e5d0 ("locking/ww_mutex: Add kselftests for resolving ww_mutex cyclic deadlocks") Link: http://lkml.kernel.org/r/1538516929-9734-1-git-send-email-linux@roeck-us.net Signed-off-by: Ingo Molnar --- kernel/locking/test-ww_mutex.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index 0be047dbd897..65a3b7e55b9f 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work) { struct test_cycle *cycle = container_of(work, typeof(*cycle), work); struct ww_acquire_ctx ctx; - int err; + int err, erra = 0; ww_acquire_init(&ctx, &ww_class); ww_mutex_lock(&cycle->a_mutex, &ctx); @@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work) err = ww_mutex_lock(cycle->b_mutex, &ctx); if (err == -EDEADLK) { + err = 0; ww_mutex_unlock(&cycle->a_mutex); ww_mutex_lock_slow(cycle->b_mutex, &ctx); - err = ww_mutex_lock(&cycle->a_mutex, &ctx); + erra = ww_mutex_lock(&cycle->a_mutex, &ctx); } if (!err) ww_mutex_unlock(cycle->b_mutex); - ww_mutex_unlock(&cycle->a_mutex); + if (!erra) + ww_mutex_unlock(&cycle->a_mutex); ww_acquire_fini(&ctx); - cycle->result = err; + cycle->result = err ?: erra; } static int __test_cycle(unsigned int nthreads) -- cgit v1.2.3 From 018303a931a89b91dacd76140b8ebe51893dc5fe Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 18 Apr 2018 19:15:59 -0500 Subject: signal/sparc: Move EMT_TAGOVF into the generic siginfo.h When moving all of the architectures specific si_codes into siginfo.h, I apparently overlooked EMT_TAGOVF. Move it now. Remove the now redundant test in siginfo_layout for SIGEMT as now NSIGEMT is always defined. Signed-off-by: "Eric W. Biederman" --- arch/sparc/include/uapi/asm/siginfo.h | 6 ------ include/uapi/asm-generic/siginfo.h | 6 ++++++ kernel/signal.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/arch/sparc/include/uapi/asm/siginfo.h b/arch/sparc/include/uapi/asm/siginfo.h index e7049550ac82..6c820ea0813b 100644 --- a/arch/sparc/include/uapi/asm/siginfo.h +++ b/arch/sparc/include/uapi/asm/siginfo.h @@ -17,10 +17,4 @@ #define SI_NOINFO 32767 /* no information in siginfo_t */ -/* - * SIGEMT si_codes - */ -#define EMT_TAGOVF 1 /* tag overflow */ -#define NSIGEMT 1 - #endif /* _UAPI__SPARC_SIGINFO_H */ diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 80e2a7227205..1811b8101937 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -285,6 +285,12 @@ typedef struct siginfo { #define SYS_SECCOMP 1 /* seccomp triggered */ #define NSIGSYS 1 +/* + * SIGEMT si_codes + */ +#define EMT_TAGOVF 1 /* tag overflow */ +#define NSIGEMT 1 + /* * sigevent definitions * diff --git a/kernel/signal.c b/kernel/signal.c index e16278710b36..7b49c31d3fdb 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2856,7 +2856,7 @@ enum siginfo_layout siginfo_layout(int sig, int si_code) [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, [SIGBUS] = { NSIGBUS, SIL_FAULT }, [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, -#if defined(SIGEMT) && defined(NSIGEMT) +#if defined(SIGEMT) [SIGEMT] = { NSIGEMT, SIL_FAULT }, #endif [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, -- cgit v1.2.3 From e75dc036c445b91b8b2ad4e6c9b05f04b6be6d3f Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 25 Sep 2018 12:04:47 +0200 Subject: signal: Fail sigqueueinfo if si_signo != sig The kernel needs to validate that the contents of struct siginfo make sense as siginfo is copied into the kernel, so that the proper union members can be put in the appropriate locations. The field si_signo is a fundamental part of that validation. As such changing the contents of si_signo after the validation make no sense and can result in nonsense values in the kernel. As such simply fail if someone is silly enough to set si_signo out of sync with the signal number passed to sigqueueinfo. I don't expect a problem as glibc's sigqueue implementation sets "si_signo = sig" and CRIU just returns to the kernel what the kernel gave to it. If there is some application that calls sigqueueinfo directly that has a problem with this added sanity check we can revisit this when we see what kind of crazy that application is doing. Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 7b49c31d3fdb..e445b0a63faa 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3306,7 +3306,8 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) (task_pid_vnr(current) != pid)) return -EPERM; - info->si_signo = sig; + if (info->si_signo != sig) + return -EINVAL; /* POSIX.1b doesn't mention process groups. */ return kill_proc_info(sig, info, pid); @@ -3354,7 +3355,8 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) (task_pid_vnr(current) != pid)) return -EPERM; - info->si_signo = sig; + if (info->si_signo != sig) + return -EINVAL; return do_send_specific(tgid, pid, sig, info); } -- cgit v1.2.3 From f28380185193610c716a90ec9b9e696638a495ce Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 18 Apr 2018 17:48:49 -0500 Subject: signal: Remove the need for __ARCH_SI_PREABLE_SIZE and SI_PAD_SIZE Rework the defintion of struct siginfo so that the array padding struct siginfo to SI_MAX_SIZE can be placed in a union along side of the rest of the struct siginfo members. The result is that we no longer need the __ARCH_SI_PREAMBLE_SIZE or SI_PAD_SIZE definitions. Signed-off-by: "Eric W. Biederman" --- arch/alpha/include/uapi/asm/siginfo.h | 1 - arch/arm64/include/uapi/asm/Kbuild | 1 + arch/arm64/include/uapi/asm/siginfo.h | 24 ---- arch/ia64/include/uapi/asm/siginfo.h | 2 - arch/mips/include/uapi/asm/siginfo.h | 11 -- arch/parisc/include/uapi/asm/Kbuild | 1 + arch/parisc/include/uapi/asm/siginfo.h | 11 -- arch/powerpc/include/uapi/asm/Kbuild | 1 + arch/powerpc/include/uapi/asm/siginfo.h | 18 --- arch/riscv/include/uapi/asm/Kbuild | 1 + arch/riscv/include/uapi/asm/siginfo.h | 24 ---- arch/s390/include/uapi/asm/Kbuild | 1 + arch/s390/include/uapi/asm/siginfo.h | 17 --- arch/sparc/include/uapi/asm/siginfo.h | 1 - arch/x86/include/uapi/asm/siginfo.h | 2 - include/uapi/asm-generic/siginfo.h | 187 ++++++++++++++++---------------- kernel/signal.c | 3 - 17 files changed, 99 insertions(+), 207 deletions(-) delete mode 100644 arch/arm64/include/uapi/asm/siginfo.h delete mode 100644 arch/parisc/include/uapi/asm/siginfo.h delete mode 100644 arch/powerpc/include/uapi/asm/siginfo.h delete mode 100644 arch/riscv/include/uapi/asm/siginfo.h delete mode 100644 arch/s390/include/uapi/asm/siginfo.h (limited to 'kernel') diff --git a/arch/alpha/include/uapi/asm/siginfo.h b/arch/alpha/include/uapi/asm/siginfo.h index db3f0138536f..6e1a2af2f962 100644 --- a/arch/alpha/include/uapi/asm/siginfo.h +++ b/arch/alpha/include/uapi/asm/siginfo.h @@ -2,7 +2,6 @@ #ifndef _ALPHA_SIGINFO_H #define _ALPHA_SIGINFO_H -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_TRAPNO #include diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index 198afbf0688f..6c5adf458690 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -19,3 +19,4 @@ generic-y += swab.h generic-y += termbits.h generic-y += termios.h generic-y += types.h +generic-y += siginfo.h diff --git a/arch/arm64/include/uapi/asm/siginfo.h b/arch/arm64/include/uapi/asm/siginfo.h deleted file mode 100644 index 574d12f86039..000000000000 --- a/arch/arm64/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_SIGINFO_H -#define __ASM_SIGINFO_H - -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) - -#include - -#endif diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index 52b5af424511..796af1ccaa7e 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -9,8 +9,6 @@ #define _UAPI_ASM_IA64_SIGINFO_H -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) - #include #define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */ diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 262504bd59a5..c34c7eef0a1c 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -14,17 +14,6 @@ #define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int)) #undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */ -/* - * Careful to keep union _sifields from shifting ... - */ -#if _MIPS_SZLONG == 32 -#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) -#elif _MIPS_SZLONG == 64 -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#else -#error _MIPS_SZLONG neither 32 nor 64 -#endif - #define __ARCH_HAS_SWAPPED_SIGINFO #include diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index 286ef5a5904b..adb5c64831c7 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += kvm_para.h generic-y += param.h generic-y += poll.h generic-y += resource.h +generic-y += siginfo.h diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h deleted file mode 100644 index 4a1062e05aaf..000000000000 --- a/arch/parisc/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _PARISC_SIGINFO_H -#define _PARISC_SIGINFO_H - -#if defined(__LP64__) -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#endif - -#include - -#endif diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index 1a6ed5919ffd..a658091a19f9 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += poll.h generic-y += resource.h generic-y += sockios.h generic-y += statfs.h +generic-y += siginfo.h diff --git a/arch/powerpc/include/uapi/asm/siginfo.h b/arch/powerpc/include/uapi/asm/siginfo.h deleted file mode 100644 index 1d51d9b88221..000000000000 --- a/arch/powerpc/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -#ifndef _ASM_POWERPC_SIGINFO_H -#define _ASM_POWERPC_SIGINFO_H - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifdef __powerpc64__ -# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#endif - -#include - -#endif /* _ASM_POWERPC_SIGINFO_H */ diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild index 7e91f4850475..5511b9918131 100644 --- a/arch/riscv/include/uapi/asm/Kbuild +++ b/arch/riscv/include/uapi/asm/Kbuild @@ -26,3 +26,4 @@ generic-y += swab.h generic-y += termbits.h generic-y += termios.h generic-y += types.h +generic-y += siginfo.h diff --git a/arch/riscv/include/uapi/asm/siginfo.h b/arch/riscv/include/uapi/asm/siginfo.h deleted file mode 100644 index f96849aac662..000000000000 --- a/arch/riscv/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * Copyright (C) 2016 SiFive, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_SIGINFO_H -#define __ASM_SIGINFO_H - -#define __ARCH_SI_PREAMBLE_SIZE (__SIZEOF_POINTER__ == 4 ? 12 : 16) - -#include - -#endif diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index e364873e0d10..dc38a90cf091 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -18,3 +18,4 @@ generic-y += shmbuf.h generic-y += sockios.h generic-y += swab.h generic-y += termbits.h +generic-y += siginfo.h \ No newline at end of file diff --git a/arch/s390/include/uapi/asm/siginfo.h b/arch/s390/include/uapi/asm/siginfo.h deleted file mode 100644 index 6984820f2f1c..000000000000 --- a/arch/s390/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * S390 version - * - * Derived from "include/asm-i386/siginfo.h" - */ - -#ifndef _S390_SIGINFO_H -#define _S390_SIGINFO_H - -#ifdef __s390x__ -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#endif - -#include - -#endif diff --git a/arch/sparc/include/uapi/asm/siginfo.h b/arch/sparc/include/uapi/asm/siginfo.h index 6c820ea0813b..68bdde4c2a2e 100644 --- a/arch/sparc/include/uapi/asm/siginfo.h +++ b/arch/sparc/include/uapi/asm/siginfo.h @@ -4,7 +4,6 @@ #if defined(__sparc__) && defined(__arch64__) -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_BAND_T int #endif /* defined(__sparc__) && defined(__arch64__) */ diff --git a/arch/x86/include/uapi/asm/siginfo.h b/arch/x86/include/uapi/asm/siginfo.h index b3d157957177..6642d8be40c4 100644 --- a/arch/x86/include/uapi/asm/siginfo.h +++ b/arch/x86/include/uapi/asm/siginfo.h @@ -7,8 +7,6 @@ typedef long long __kernel_si_clock_t __attribute__((aligned(4))); # define __ARCH_SI_CLOCK_T __kernel_si_clock_t # define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8))) -# else /* x86-64 */ -# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) # endif #endif diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 1811b8101937..cb3d6c267181 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -10,18 +10,7 @@ typedef union sigval { void __user *sival_ptr; } sigval_t; -/* - * This is the size (including padding) of the part of the - * struct siginfo that is before the union. - */ -#ifndef __ARCH_SI_PREAMBLE_SIZE -#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) -#endif - #define SI_MAX_SIZE 128 -#ifndef SI_PAD_SIZE -#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int)) -#endif /* * The default "si_band" type is "long", as specified by POSIX. @@ -40,96 +29,108 @@ typedef union sigval { #define __ARCH_SI_ATTRIBUTES #endif -typedef struct siginfo { - int si_signo; -#ifndef __ARCH_HAS_SWAPPED_SIGINFO - int si_errno; - int si_code; -#else - int si_code; - int si_errno; -#endif - - union { - int _pad[SI_PAD_SIZE]; - - /* kill() */ - struct { - __kernel_pid_t _pid; /* sender's pid */ - __kernel_uid32_t _uid; /* sender's uid */ - } _kill; - - /* POSIX.1b timers */ - struct { - __kernel_timer_t _tid; /* timer id */ - int _overrun; /* overrun count */ - sigval_t _sigval; /* same as below */ - int _sys_private; /* not to be passed to user */ - } _timer; - - /* POSIX.1b signals */ - struct { - __kernel_pid_t _pid; /* sender's pid */ - __kernel_uid32_t _uid; /* sender's uid */ - sigval_t _sigval; - } _rt; - - /* SIGCHLD */ - struct { - __kernel_pid_t _pid; /* which child */ - __kernel_uid32_t _uid; /* sender's uid */ - int _status; /* exit code */ - __ARCH_SI_CLOCK_T _utime; - __ARCH_SI_CLOCK_T _stime; - } _sigchld; - - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ - struct { - void __user *_addr; /* faulting insn/memory ref. */ +union __sifields { + /* kill() */ + struct { + __kernel_pid_t _pid; /* sender's pid */ + __kernel_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + __kernel_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + sigval_t _sigval; /* same as below */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + __kernel_pid_t _pid; /* sender's pid */ + __kernel_uid32_t _uid; /* sender's uid */ + sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + __kernel_pid_t _pid; /* which child */ + __kernel_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + __ARCH_SI_CLOCK_T _utime; + __ARCH_SI_CLOCK_T _stime; + } _sigchld; + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ + struct { + void __user *_addr; /* faulting insn/memory ref. */ #ifdef __ARCH_SI_TRAPNO - int _trapno; /* TRAP # which caused the signal */ + int _trapno; /* TRAP # which caused the signal */ #endif #ifdef __ia64__ - int _imm; /* immediate value for "break" */ - unsigned int _flags; /* see ia64 si_flags */ - unsigned long _isr; /* isr */ + int _imm; /* immediate value for "break" */ + unsigned int _flags; /* see ia64 si_flags */ + unsigned long _isr; /* isr */ #endif #define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \ sizeof(short) : __alignof__(void *)) - union { - /* - * used when si_code=BUS_MCEERR_AR or - * used when si_code=BUS_MCEERR_AO - */ - short _addr_lsb; /* LSB of the reported address */ - /* used when si_code=SEGV_BNDERR */ - struct { - char _dummy_bnd[__ADDR_BND_PKEY_PAD]; - void __user *_lower; - void __user *_upper; - } _addr_bnd; - /* used when si_code=SEGV_PKUERR */ - struct { - char _dummy_pkey[__ADDR_BND_PKEY_PAD]; - __u32 _pkey; - } _addr_pkey; - }; - } _sigfault; - - /* SIGPOLL */ - struct { - __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */ - int _fd; - } _sigpoll; + union { + /* + * used when si_code=BUS_MCEERR_AR or + * used when si_code=BUS_MCEERR_AO + */ + short _addr_lsb; /* LSB of the reported address */ + /* used when si_code=SEGV_BNDERR */ + struct { + char _dummy_bnd[__ADDR_BND_PKEY_PAD]; + void __user *_lower; + void __user *_upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + struct { + char _dummy_pkey[__ADDR_BND_PKEY_PAD]; + __u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + /* SIGPOLL */ + struct { + __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + /* SIGSYS */ + struct { + void __user *_call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; +}; - /* SIGSYS */ - struct { - void __user *_call_addr; /* calling user insn */ - int _syscall; /* triggering system call number */ - unsigned int _arch; /* AUDIT_ARCH_* of syscall */ - } _sigsys; - } _sifields; +#ifndef __ARCH_HAS_SWAPPED_SIGINFO +#define __SIGINFO \ +struct { \ + int si_signo; \ + int si_errno; \ + int si_code; \ + union __sifields _sifields; \ +} +#else +#define __SIGINFO \ +struct { \ + int si_signo; \ + int si_code; \ + int si_errno; \ + union __sifields _sifields; \ +} +#endif /* __ARCH_HAS_SWAPPED_SIGINFO */ + +typedef struct siginfo { + union { + __SIGINFO; + int _si_pad[SI_MAX_SIZE/sizeof(int)]; + }; } __ARCH_SI_ATTRIBUTES siginfo_t; /* diff --git a/kernel/signal.c b/kernel/signal.c index e445b0a63faa..debb485a76db 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3963,9 +3963,6 @@ __weak const char *arch_vma_name(struct vm_area_struct *vma) void __init signals_init(void) { - /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */ - BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE - != offsetof(struct siginfo, _sifields._pad)); BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); -- cgit v1.2.3 From 4cd2e0e70af6897ca2247fa1ffb1553ca16b4903 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 18 Apr 2018 17:30:19 -0500 Subject: signal: Introduce copy_siginfo_from_user and use it's return value In preparation for using a smaller version of siginfo in the kernel introduce copy_siginfo_from_user and use it when siginfo is copied from userspace. Make the pattern for using copy_siginfo_from_user and copy_siginfo_from_user32 to capture the return value and return that value on error. This is a necessary prerequisite for using a smaller siginfo in the kernel than the kernel exports to userspace. Signed-off-by: "Eric W. Biederman" --- include/linux/signal.h | 1 + kernel/ptrace.c | 12 +++++------- kernel/signal.c | 25 ++++++++++++++++--------- 3 files changed, 22 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/include/linux/signal.h b/include/linux/signal.h index 3d4cd5db30a9..de94c159bfb0 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -22,6 +22,7 @@ static inline void clear_siginfo(struct siginfo *info) } int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); +int copy_siginfo_from_user(struct siginfo *to, const struct siginfo __user *from); enum siginfo_layout { SIL_KILL, diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 45f77a1b9c97..a807ff5cc1a9 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -919,9 +919,8 @@ int ptrace_request(struct task_struct *child, long request, break; case PTRACE_SETSIGINFO: - if (copy_from_user(&siginfo, datavp, sizeof siginfo)) - ret = -EFAULT; - else + ret = copy_siginfo_from_user(&siginfo, datavp); + if (!ret) ret = ptrace_setsiginfo(child, &siginfo); break; @@ -1215,10 +1214,9 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, break; case PTRACE_SETSIGINFO: - if (copy_siginfo_from_user32( - &siginfo, (struct compat_siginfo __user *) datap)) - ret = -EFAULT; - else + ret = copy_siginfo_from_user32( + &siginfo, (struct compat_siginfo __user *) datap); + if (!ret) ret = ptrace_setsiginfo(child, &siginfo); break; #ifdef CONFIG_HAVE_ARCH_TRACEHOOK diff --git a/kernel/signal.c b/kernel/signal.c index debb485a76db..c0e289e62d77 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2896,6 +2896,13 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) return 0; } +int copy_siginfo_from_user(siginfo_t *to, const siginfo_t __user *from) +{ + if (copy_from_user(to, from, sizeof(struct siginfo))) + return -EFAULT; + return 0; +} + #ifdef CONFIG_COMPAT int copy_siginfo_to_user32(struct compat_siginfo __user *to, const struct siginfo *from) @@ -3323,8 +3330,9 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { siginfo_t info; - if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) - return -EFAULT; + int ret = copy_siginfo_from_user(&info, uinfo); + if (unlikely(ret)) + return ret; return do_rt_sigqueueinfo(pid, sig, &info); } @@ -3365,10 +3373,9 @@ SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { siginfo_t info; - - if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) - return -EFAULT; - + int ret = copy_siginfo_from_user(&info, uinfo); + if (unlikely(ret)) + return ret; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } @@ -3380,9 +3387,9 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, struct compat_siginfo __user *, uinfo) { siginfo_t info; - - if (copy_siginfo_from_user32(&info, uinfo)) - return -EFAULT; + int ret = copy_siginfo_from_user32(&info, uinfo); + if (unlikely(ret)) + return ret; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } #endif -- cgit v1.2.3 From ae7795bc6187a15ec51cf258abae656a625f9980 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 25 Sep 2018 11:27:20 +0200 Subject: signal: Distinguish between kernel_siginfo and siginfo Linus recently observed that if we did not worry about the padding member in struct siginfo it is only about 48 bytes, and 48 bytes is much nicer than 128 bytes for allocating on the stack and copying around in the kernel. The obvious thing of only adding the padding when userspace is including siginfo.h won't work as there are sigframe definitions in the kernel that embed struct siginfo. So split siginfo in two; kernel_siginfo and siginfo. Keeping the traditional name for the userspace definition. While the version that is used internally to the kernel and ultimately will not be padded to 128 bytes is called kernel_siginfo. The definition of struct kernel_siginfo I have put in include/signal_types.h A set of buildtime checks has been added to verify the two structures have the same field offsets. To make it easy to verify the change kernel_siginfo retains the same size as siginfo. The reduction in size comes in a following change. Signed-off-by: "Eric W. Biederman" --- arch/x86/include/asm/compat.h | 2 +- drivers/usb/core/devio.c | 4 +- fs/binfmt_elf.c | 6 +- fs/coredump.c | 2 +- fs/fcntl.c | 2 +- fs/signalfd.c | 6 +- include/linux/binfmts.h | 2 +- include/linux/compat.h | 4 +- include/linux/coredump.h | 4 +- include/linux/lsm_hooks.h | 4 +- include/linux/posix-timers.h | 2 +- include/linux/ptrace.h | 2 +- include/linux/sched.h | 2 +- include/linux/sched/signal.h | 18 ++--- include/linux/security.h | 6 +- include/linux/signal.h | 15 +++-- include/linux/signal_types.h | 11 ++- include/trace/events/signal.h | 4 +- ipc/mqueue.c | 2 +- kernel/ptrace.c | 10 +-- kernel/seccomp.c | 6 +- kernel/signal.c | 151 +++++++++++++++++++++++++++--------------- kernel/time/posix-timers.c | 2 +- security/apparmor/lsm.c | 2 +- security/security.c | 2 +- security/selinux/hooks.c | 2 +- security/smack/smack_lsm.c | 2 +- 27 files changed, 165 insertions(+), 110 deletions(-) (limited to 'kernel') diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index fb97cf7c4137..a0f46bdd9f24 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -240,6 +240,6 @@ static inline bool in_compat_syscall(void) struct compat_siginfo; int __copy_siginfo_to_user32(struct compat_siginfo __user *to, - const siginfo_t *from, bool x32_ABI); + const kernel_siginfo_t *from, bool x32_ABI); #endif /* _ASM_X86_COMPAT_H */ diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6ce77b33da61..c260ea8808b0 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -582,7 +582,7 @@ static void async_completed(struct urb *urb) { struct async *as = urb->context; struct usb_dev_state *ps = as->ps; - struct siginfo sinfo; + struct kernel_siginfo sinfo; struct pid *pid = NULL; const struct cred *cred = NULL; unsigned long flags; @@ -2599,7 +2599,7 @@ const struct file_operations usbdev_file_operations = { static void usbdev_remove(struct usb_device *udev) { struct usb_dev_state *ps; - struct siginfo sinfo; + struct kernel_siginfo sinfo; while (!list_empty(&udev->filelist)) { ps = list_entry(udev->filelist.next, struct usb_dev_state, list); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index efae2fb0930a..54207327f98f 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1580,7 +1580,7 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) } static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, - const siginfo_t *siginfo) + const kernel_siginfo_t *siginfo) { mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); @@ -1782,7 +1782,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, static int fill_note_info(struct elfhdr *elf, int phdrs, struct elf_note_info *info, - const siginfo_t *siginfo, struct pt_regs *regs) + const kernel_siginfo_t *siginfo, struct pt_regs *regs) { struct task_struct *dump_task = current; const struct user_regset_view *view = task_user_regset_view(dump_task); @@ -2031,7 +2031,7 @@ static int elf_note_info_init(struct elf_note_info *info) static int fill_note_info(struct elfhdr *elf, int phdrs, struct elf_note_info *info, - const siginfo_t *siginfo, struct pt_regs *regs) + const kernel_siginfo_t *siginfo, struct pt_regs *regs) { struct list_head *t; struct core_thread *ct; diff --git a/fs/coredump.c b/fs/coredump.c index 1e2c87acac9b..e42e17e55bfd 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -536,7 +536,7 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) return err; } -void do_coredump(const siginfo_t *siginfo) +void do_coredump(const kernel_siginfo_t *siginfo) { struct core_state core_state; struct core_name cn; diff --git a/fs/fcntl.c b/fs/fcntl.c index 4137d96534a6..083185174c6d 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -735,7 +735,7 @@ static void send_sigio_to_task(struct task_struct *p, return; switch (signum) { - siginfo_t si; + kernel_siginfo_t si; default: /* Queue a rt signal with the appropriate fd as its value. We use SI_SIGIO as the source, not diff --git a/fs/signalfd.c b/fs/signalfd.c index 4fcd1498acf5..757afc7c5895 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -79,7 +79,7 @@ static __poll_t signalfd_poll(struct file *file, poll_table *wait) * Copied from copy_siginfo_to_user() in kernel/signal.c */ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, - siginfo_t const *kinfo) + kernel_siginfo_t const *kinfo) { struct signalfd_siginfo new; @@ -163,7 +163,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, return sizeof(*uinfo); } -static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info, +static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info, int nonblock) { ssize_t ret; @@ -215,7 +215,7 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count, struct signalfd_siginfo __user *siginfo; int nonblock = file->f_flags & O_NONBLOCK; ssize_t ret, total = 0; - siginfo_t info; + kernel_siginfo_t info; count /= sizeof(struct signalfd_siginfo); if (!count) diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index c05f24fac4f6..e9f5fe69df31 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -78,7 +78,7 @@ struct linux_binprm { /* Function parameter for binfmt->coredump */ struct coredump_params { - const siginfo_t *siginfo; + const kernel_siginfo_t *siginfo; struct pt_regs *regs; struct file *file; unsigned long limit; diff --git a/include/linux/compat.h b/include/linux/compat.h index 1a3c4f37e908..4565d65b1776 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -452,8 +452,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size); long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size); -int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from); -int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); +int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from); +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from); int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 207aed96a5b7..abf4b4e65dbb 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -17,9 +17,9 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); extern void dump_truncate(struct coredump_params *cprm); #ifdef CONFIG_COREDUMP -extern void do_coredump(const siginfo_t *siginfo); +extern void do_coredump(const kernel_siginfo_t *siginfo); #else -static inline void do_coredump(const siginfo_t *siginfo) {} +static inline void do_coredump(const kernel_siginfo_t *siginfo) {} #endif #endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 97a020c616ad..bb40f6d34163 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -672,7 +672,7 @@ * Return 0 if permission is granted. * @task_kill: * Check permission before sending signal @sig to @p. @info can be NULL, - * the constant 1, or a pointer to a siginfo structure. If @info is 1 or + * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming * from the kernel and should typically be permitted. * SIGIO signals are handled separately by the send_sigiotask hook in @@ -1606,7 +1606,7 @@ union security_list_options { int (*task_setscheduler)(struct task_struct *p); int (*task_getscheduler)(struct task_struct *p); int (*task_movememory)(struct task_struct *p); - int (*task_kill)(struct task_struct *p, struct siginfo *info, + int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred); int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index ee7e987ea1b4..e96581ca7c9d 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -126,5 +126,5 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); -void posixtimer_rearm(struct siginfo *info); +void posixtimer_rearm(struct kernel_siginfo *info); #endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 1de2235511c8..d19a795100da 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -341,7 +341,7 @@ extern void user_single_step_report(struct pt_regs *regs); #else static inline void user_single_step_report(struct pt_regs *regs) { - siginfo_t info; + kernel_siginfo_t info; clear_siginfo(&info); info.si_signo = SIGTRAP; info.si_errno = 0; diff --git a/include/linux/sched.h b/include/linux/sched.h index 977cb57d7bc9..2ba88082e1ef 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -960,7 +960,7 @@ struct task_struct { /* Ptrace state: */ unsigned long ptrace_message; - siginfo_t *last_siginfo; + kernel_siginfo_t *last_siginfo; struct task_io_accounting ioac; #ifdef CONFIG_TASK_XACCT diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 9e07f3521549..13789d10a50e 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -270,12 +270,12 @@ static inline int signal_group_exit(const struct signal_struct *sig) extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info); static inline int kernel_dequeue_signal(void) { struct task_struct *tsk = current; - siginfo_t __info; + kernel_siginfo_t __info; int ret; spin_lock_irq(&tsk->sighand->siglock); @@ -322,12 +322,12 @@ int force_sig_pkuerr(void __user *addr, u32 pkey); int force_sig_ptrace_errno_trap(int errno, void __user *addr); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern void force_sigsegv(int sig, struct task_struct *p); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); -extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); -extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, +extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *, const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); @@ -475,8 +475,8 @@ static inline int kill_cad_pid(int sig, int priv) } /* These can be the second arg to send_sig_info/send_group_sig_info. */ -#define SEND_SIG_NOINFO ((struct siginfo *) 0) -#define SEND_SIG_PRIV ((struct siginfo *) 1) +#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) +#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) /* * True if we are on the alternate signal stack. diff --git a/include/linux/security.h b/include/linux/security.h index 75f4156c84d7..d170a5b031f3 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -35,7 +35,7 @@ struct linux_binprm; struct cred; struct rlimit; -struct siginfo; +struct kernel_siginfo; struct sembuf; struct kern_ipc_perm; struct audit_context; @@ -361,7 +361,7 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource, int security_task_setscheduler(struct task_struct *p); int security_task_getscheduler(struct task_struct *p); int security_task_movememory(struct task_struct *p); -int security_task_kill(struct task_struct *p, struct siginfo *info, +int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); @@ -1020,7 +1020,7 @@ static inline int security_task_movememory(struct task_struct *p) } static inline int security_task_kill(struct task_struct *p, - struct siginfo *info, int sig, + struct kernel_siginfo *info, int sig, const struct cred *cred) { return 0; diff --git a/include/linux/signal.h b/include/linux/signal.h index de94c159bfb0..70031b10b918 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -11,18 +11,19 @@ struct task_struct; /* for sysctl */ extern int print_fatal_signals; -static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from) +static inline void copy_siginfo(kernel_siginfo_t *to, + const kernel_siginfo_t *from) { memcpy(to, from, sizeof(*to)); } -static inline void clear_siginfo(struct siginfo *info) +static inline void clear_siginfo(kernel_siginfo_t *info) { memset(info, 0, sizeof(*info)); } -int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); -int copy_siginfo_from_user(struct siginfo *to, const struct siginfo __user *from); +int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); +int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); enum siginfo_layout { SIL_KILL, @@ -258,11 +259,11 @@ struct pt_regs; enum pid_type; extern int next_signal(struct sigpending *pending, sigset_t *mask); -extern int do_send_sig_info(int sig, struct siginfo *info, +extern int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); -extern int group_send_sig_info(int sig, struct siginfo *info, +extern int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); -extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); +extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern int sigprocmask(int, sigset_t *, sigset_t *); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index 222ae696000b..2a40a9c5e4ad 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -9,6 +9,13 @@ #include #include +typedef struct kernel_siginfo { + union { + __SIGINFO; + int _si_pad[SI_MAX_SIZE/sizeof(int)]; + }; +} kernel_siginfo_t; + /* * Real Time signals may be queued. */ @@ -16,7 +23,7 @@ struct sigqueue { struct list_head list; int flags; - siginfo_t info; + kernel_siginfo_t info; struct user_struct *user; }; @@ -60,7 +67,7 @@ struct old_sigaction { struct ksignal { struct k_sigaction ka; - siginfo_t info; + kernel_siginfo_t info; int sig; }; diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index 3deeed50ffd0..1db7e4b07c01 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -49,7 +49,7 @@ enum { */ TRACE_EVENT(signal_generate, - TP_PROTO(int sig, struct siginfo *info, struct task_struct *task, + TP_PROTO(int sig, struct kernel_siginfo *info, struct task_struct *task, int group, int result), TP_ARGS(sig, info, task, group, result), @@ -95,7 +95,7 @@ TRACE_EVENT(signal_generate, */ TRACE_EVENT(signal_deliver, - TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka), + TP_PROTO(int sig, struct kernel_siginfo *info, struct k_sigaction *ka), TP_ARGS(sig, info, ka), diff --git a/ipc/mqueue.c b/ipc/mqueue.c index c0d58f390c3b..cc41de3b8deb 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -655,7 +655,7 @@ static void __do_notify(struct mqueue_inode_info *info) * synchronously. */ if (info->notify_owner && info->attr.mq_curmsgs == 1) { - struct siginfo sig_i; + struct kernel_siginfo sig_i; switch (info->notify.sigev_notify) { case SIGEV_NONE: break; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index a807ff5cc1a9..c2cee9db5204 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -651,7 +651,7 @@ static int ptrace_setoptions(struct task_struct *child, unsigned long data) return 0; } -static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) +static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info) { unsigned long flags; int error = -ESRCH; @@ -667,7 +667,7 @@ static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) return error; } -static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) +static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info) { unsigned long flags; int error = -ESRCH; @@ -709,7 +709,7 @@ static int ptrace_peek_siginfo(struct task_struct *child, pending = &child->pending; for (i = 0; i < arg.nr; ) { - siginfo_t info; + kernel_siginfo_t info; s32 off = arg.off + i; spin_lock_irq(&child->sighand->siglock); @@ -885,7 +885,7 @@ int ptrace_request(struct task_struct *child, long request, { bool seized = child->ptrace & PT_SEIZED; int ret = -EIO; - siginfo_t siginfo, *si; + kernel_siginfo_t siginfo, *si; void __user *datavp = (void __user *) data; unsigned long __user *datalp = datavp; unsigned long flags; @@ -1180,7 +1180,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request, { compat_ulong_t __user *datap = compat_ptr(data); compat_ulong_t word; - siginfo_t siginfo; + kernel_siginfo_t siginfo; int ret; switch (request) { diff --git a/kernel/seccomp.c b/kernel/seccomp.c index fd023ac24e10..4d7809cdd27d 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -522,7 +522,7 @@ void put_seccomp_filter(struct task_struct *tsk) __put_seccomp_filter(tsk->seccomp.filter); } -static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason) +static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason) { clear_siginfo(info); info->si_signo = SIGSYS; @@ -542,7 +542,7 @@ static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason) */ static void seccomp_send_sigsys(int syscall, int reason) { - struct siginfo info; + struct kernel_siginfo info; seccomp_init_siginfo(&info, syscall, reason); force_sig_info(SIGSYS, &info, current); } @@ -747,7 +747,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, /* Dump core only if this is the last remaining thread. */ if (action == SECCOMP_RET_KILL_PROCESS || get_nr_threads(current) == 1) { - siginfo_t info; + kernel_siginfo_t info; /* Show the original registers in the dump. */ syscall_rollback(current, task_pt_regs(current)); diff --git a/kernel/signal.c b/kernel/signal.c index c0e289e62d77..161cad4e448c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -549,7 +549,7 @@ bool unhandled_signal(struct task_struct *tsk, int sig) return !tsk->ptrace; } -static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, +static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, bool *resched_timer) { struct sigqueue *q, *first = NULL; @@ -595,7 +595,7 @@ still_pending: } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - siginfo_t *info, bool *resched_timer) + kernel_siginfo_t *info, bool *resched_timer) { int sig = next_signal(pending, mask); @@ -610,7 +610,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, * * All callers have to hold the siglock. */ -int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) +int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) { bool resched_timer = false; int signr; @@ -737,12 +737,12 @@ static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) } } -static inline int is_si_special(const struct siginfo *info) +static inline int is_si_special(const struct kernel_siginfo *info) { return info <= SEND_SIG_PRIV; } -static inline bool si_fromuser(const struct siginfo *info) +static inline bool si_fromuser(const struct kernel_siginfo *info) { return info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)); @@ -767,7 +767,7 @@ static bool kill_ok_by_cred(struct task_struct *t) * Bad permissions for sending the signal * - the caller must hold the RCU read lock */ -static int check_kill_permission(int sig, struct siginfo *info, +static int check_kill_permission(int sig, struct kernel_siginfo *info, struct task_struct *t) { struct pid *sid; @@ -1010,7 +1010,7 @@ static inline bool legacy_queue(struct sigpending *signals, int sig) } #ifdef CONFIG_USER_NS -static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) +static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t) { if (current_user_ns() == task_cred_xxx(t, user_ns)) return; @@ -1024,13 +1024,13 @@ static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_str rcu_read_unlock(); } #else -static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) +static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t) { return; } #endif -static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, +static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, enum pid_type type, int from_ancestor_ns) { struct sigpending *pending; @@ -1150,7 +1150,7 @@ ret: return ret; } -static int send_signal(int sig, struct siginfo *info, struct task_struct *t, +static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, enum pid_type type) { int from_ancestor_ns = 0; @@ -1197,12 +1197,12 @@ static int __init setup_print_fatal_signals(char *str) __setup("print-fatal-signals=", setup_print_fatal_signals); int -__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) { return send_signal(sig, info, p, PIDTYPE_TGID); } -int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, +int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type) { unsigned long flags; @@ -1228,7 +1228,7 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, * that is why we also clear SIGNAL_UNKILLABLE. */ int -force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t) { unsigned long int flags; int ret, blocked, ignored; @@ -1316,8 +1316,8 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, /* * send signal info to all the members of a group */ -int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, - enum pid_type type) +int group_send_sig_info(int sig, struct kernel_siginfo *info, + struct task_struct *p, enum pid_type type) { int ret; @@ -1336,7 +1336,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, * control characters do (^C, ^Z etc) * - the caller must hold at least a readlock on tasklist_lock */ -int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) +int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; int retval, success; @@ -1351,7 +1351,7 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) return success ? 0 : retval; } -int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) +int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) { int error = -ESRCH; struct task_struct *p; @@ -1373,7 +1373,7 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) } } -static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) +static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) { int error; rcu_read_lock(); @@ -1394,7 +1394,7 @@ static inline bool kill_as_cred_perm(const struct cred *cred, } /* like kill_pid_info(), but doesn't use uid/euid of "current" */ -int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, +int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid, const struct cred *cred) { int ret = -EINVAL; @@ -1438,7 +1438,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); * is probably wrong. Should make it like BSD or SYSV. */ -static int kill_something_info(int sig, struct siginfo *info, pid_t pid) +static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) { int ret; @@ -1482,7 +1482,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid) * These are for backward compatibility with the rest of the kernel source. */ -int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) { /* * Make sure legacy kernel users don't send in bad values @@ -1533,7 +1533,7 @@ int force_sig_fault(int sig, int code, void __user *addr ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t) { - struct siginfo info; + struct kernel_siginfo info; clear_siginfo(&info); info.si_signo = sig; @@ -1556,7 +1556,7 @@ int send_sig_fault(int sig, int code, void __user *addr ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t) { - struct siginfo info; + struct kernel_siginfo info; clear_siginfo(&info); info.si_signo = sig; @@ -1576,7 +1576,7 @@ int send_sig_fault(int sig, int code, void __user *addr int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) { - struct siginfo info; + struct kernel_siginfo info; WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); clear_siginfo(&info); @@ -1590,7 +1590,7 @@ int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) { - struct siginfo info; + struct kernel_siginfo info; WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); clear_siginfo(&info); @@ -1605,7 +1605,7 @@ EXPORT_SYMBOL(send_sig_mceerr); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) { - struct siginfo info; + struct kernel_siginfo info; clear_siginfo(&info); info.si_signo = SIGSEGV; @@ -1620,7 +1620,7 @@ int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) #ifdef SEGV_PKUERR int force_sig_pkuerr(void __user *addr, u32 pkey) { - struct siginfo info; + struct kernel_siginfo info; clear_siginfo(&info); info.si_signo = SIGSEGV; @@ -1637,7 +1637,7 @@ int force_sig_pkuerr(void __user *addr, u32 pkey) */ int force_sig_ptrace_errno_trap(int errno, void __user *addr) { - struct siginfo info; + struct kernel_siginfo info; clear_siginfo(&info); info.si_signo = SIGTRAP; @@ -1766,7 +1766,7 @@ ret: */ bool do_notify_parent(struct task_struct *tsk, int sig) { - struct siginfo info; + struct kernel_siginfo info; unsigned long flags; struct sighand_struct *psig; bool autoreap = false; @@ -1871,7 +1871,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) static void do_notify_parent_cldstop(struct task_struct *tsk, bool for_ptracer, int why) { - struct siginfo info; + struct kernel_siginfo info; unsigned long flags; struct task_struct *parent; struct sighand_struct *sighand; @@ -1971,7 +1971,7 @@ static bool sigkill_pending(struct task_struct *tsk) * If we actually decide not to stop at all because the tracer * is gone, we keep current->exit_code unless clear_code. */ -static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) +static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) __releases(¤t->sighand->siglock) __acquires(¤t->sighand->siglock) { @@ -2108,7 +2108,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) static void ptrace_do_notify(int signr, int exit_code, int why) { - siginfo_t info; + kernel_siginfo_t info; clear_siginfo(&info); info.si_signo = signr; @@ -2289,7 +2289,7 @@ static void do_jobctl_trap(void) } } -static int ptrace_signal(int signr, siginfo_t *info) +static int ptrace_signal(int signr, kernel_siginfo_t *info) { /* * We do not check sig_kernel_stop(signr) but set this marker @@ -2889,14 +2889,14 @@ enum siginfo_layout siginfo_layout(int sig, int si_code) return layout; } -int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) +int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) { - if (copy_to_user(to, from , sizeof(struct siginfo))) + if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) return -EFAULT; return 0; } -int copy_siginfo_from_user(siginfo_t *to, const siginfo_t __user *from) +int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) { if (copy_from_user(to, from, sizeof(struct siginfo))) return -EFAULT; @@ -2905,13 +2905,13 @@ int copy_siginfo_from_user(siginfo_t *to, const siginfo_t __user *from) #ifdef CONFIG_COMPAT int copy_siginfo_to_user32(struct compat_siginfo __user *to, - const struct siginfo *from) + const struct kernel_siginfo *from) #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) { return __copy_siginfo_to_user32(to, from, in_x32_syscall()); } int __copy_siginfo_to_user32(struct compat_siginfo __user *to, - const struct siginfo *from, bool x32_ABI) + const struct kernel_siginfo *from, bool x32_ABI) #endif { struct compat_siginfo new; @@ -2995,7 +2995,7 @@ int __copy_siginfo_to_user32(struct compat_siginfo __user *to, return 0; } -int copy_siginfo_from_user32(struct siginfo *to, +int copy_siginfo_from_user32(struct kernel_siginfo *to, const struct compat_siginfo __user *ufrom) { struct compat_siginfo from; @@ -3085,7 +3085,7 @@ int copy_siginfo_from_user32(struct siginfo *to, * @info: if non-null, the signal's siginfo is returned here * @ts: upper bound on process time suspension */ -static int do_sigtimedwait(const sigset_t *which, siginfo_t *info, +static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, const struct timespec *ts) { ktime_t *to = NULL, timeout = KTIME_MAX; @@ -3149,7 +3149,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, { sigset_t these; struct timespec ts; - siginfo_t info; + kernel_siginfo_t info; int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ @@ -3181,7 +3181,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, { sigset_t s; struct timespec t; - siginfo_t info; + kernel_siginfo_t info; long ret; if (sigsetsize != sizeof(sigset_t)) @@ -3213,7 +3213,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, */ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { - struct siginfo info; + struct kernel_siginfo info; clear_siginfo(&info); info.si_signo = sig; @@ -3226,7 +3226,7 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) } static int -do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) +do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) { struct task_struct *p; int error = -ESRCH; @@ -3257,7 +3257,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) static int do_tkill(pid_t tgid, pid_t pid, int sig) { - struct siginfo info; + struct kernel_siginfo info; clear_siginfo(&info); info.si_signo = sig; @@ -3304,7 +3304,7 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) return do_tkill(0, pid, sig); } -static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) +static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) { /* Not even root can pretend to send signals from the kernel. * Nor can they impersonate a kill()/tgkill(), which adds source info. @@ -3329,7 +3329,7 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { - siginfo_t info; + kernel_siginfo_t info; int ret = copy_siginfo_from_user(&info, uinfo); if (unlikely(ret)) return ret; @@ -3342,7 +3342,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + kernel_siginfo_t info; int ret = copy_siginfo_from_user32(&info, uinfo); if (unlikely(ret)) return ret; @@ -3350,7 +3350,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, } #endif -static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) +static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) { /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) @@ -3372,7 +3372,7 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { - siginfo_t info; + kernel_siginfo_t info; int ret = copy_siginfo_from_user(&info, uinfo); if (unlikely(ret)) return ret; @@ -3386,7 +3386,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + kernel_siginfo_t info; int ret = copy_siginfo_from_user32(&info, uinfo); if (unlikely(ret)) return ret; @@ -3968,10 +3968,57 @@ __weak const char *arch_vma_name(struct vm_area_struct *vma) return NULL; } -void __init signals_init(void) +static inline void siginfo_buildtime_checks(void) { BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); + /* Verify the offsets in the two siginfos match */ +#define CHECK_OFFSET(field) \ + BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) + + /* kill */ + CHECK_OFFSET(si_pid); + CHECK_OFFSET(si_uid); + + /* timer */ + CHECK_OFFSET(si_tid); + CHECK_OFFSET(si_overrun); + CHECK_OFFSET(si_value); + + /* rt */ + CHECK_OFFSET(si_pid); + CHECK_OFFSET(si_uid); + CHECK_OFFSET(si_value); + + /* sigchld */ + CHECK_OFFSET(si_pid); + CHECK_OFFSET(si_uid); + CHECK_OFFSET(si_status); + CHECK_OFFSET(si_utime); + CHECK_OFFSET(si_stime); + + /* sigfault */ + CHECK_OFFSET(si_addr); + CHECK_OFFSET(si_addr_lsb); + CHECK_OFFSET(si_lower); + CHECK_OFFSET(si_upper); + CHECK_OFFSET(si_pkey); + + /* sigpoll */ + CHECK_OFFSET(si_band); + CHECK_OFFSET(si_fd); + + /* sigsys */ + CHECK_OFFSET(si_call_addr); + CHECK_OFFSET(si_syscall); + CHECK_OFFSET(si_arch); +#undef CHECK_OFFSET +} + +void __init signals_init(void) +{ + siginfo_buildtime_checks(); + sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 4b9127e95430..eabb4c22728d 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -308,7 +308,7 @@ static void common_hrtimer_rearm(struct k_itimer *timr) * To protect against the timer going away while the interrupt is queued, * we require that the it_requeue_pending flag be set. */ -void posixtimer_rearm(struct siginfo *info) +void posixtimer_rearm(struct kernel_siginfo *info) { struct k_itimer *timr; unsigned long flags; diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 8b8b70620bbe..cbcb8ba51142 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -732,7 +732,7 @@ static int apparmor_task_setrlimit(struct task_struct *task, return error; } -static int apparmor_task_kill(struct task_struct *target, struct siginfo *info, +static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info, int sig, const struct cred *cred) { struct aa_label *cl, *tl; diff --git a/security/security.c b/security/security.c index 736e78da1ab9..0d504fceda8b 100644 --- a/security/security.c +++ b/security/security.c @@ -1147,7 +1147,7 @@ int security_task_movememory(struct task_struct *p) return call_int_hook(task_movememory, 0, p); } -int security_task_kill(struct task_struct *p, struct siginfo *info, +int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { return call_int_hook(task_kill, 0, p, info, sig, cred); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index ad9a9b8e9979..1b500b4c78a7 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4186,7 +4186,7 @@ static int selinux_task_movememory(struct task_struct *p) PROCESS__SETSCHED, NULL); } -static int selinux_task_kill(struct task_struct *p, struct siginfo *info, +static int selinux_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { u32 secid; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 340fc30ad85d..025de76af1db 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2251,7 +2251,7 @@ static int smack_task_movememory(struct task_struct *p) * Return 0 if write access is permitted * */ -static int smack_task_kill(struct task_struct *p, struct siginfo *info, +static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { struct smk_audit_info ad; -- cgit v1.2.3 From 4ce5f9c9e7546915c559ffae594e6d73f918db00 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 25 Sep 2018 12:59:31 +0200 Subject: signal: Use a smaller struct siginfo in the kernel We reserve 128 bytes for struct siginfo but only use about 48 bytes on 64bit and 32 bytes on 32bit. Someday we might use more but it is unlikely to be anytime soon. Userspace seems content with just enough bytes of siginfo to implement sigqueue. Or in the case of checkpoint/restart reinjecting signals the kernel has sent. Reducing the stack footprint and the work to copy siginfo around from 2 cachelines to 1 cachelines seems worth doing even if I don't have benchmarks to show a performance difference. Suggested-by: Linus Torvalds Signed-off-by: "Eric W. Biederman" --- include/linux/signal.h | 2 ++ include/linux/signal_types.h | 5 +-- kernel/signal.c | 82 ++++++++++++++++++++++++++++++++++---------- 3 files changed, 67 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/include/linux/signal.h b/include/linux/signal.h index 70031b10b918..706a499d1eb1 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -22,6 +22,8 @@ static inline void clear_siginfo(kernel_siginfo_t *info) memset(info, 0, sizeof(*info)); } +#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo)) + int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index 2a40a9c5e4ad..f8a90ae9c6ec 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -10,10 +10,7 @@ #include typedef struct kernel_siginfo { - union { - __SIGINFO; - int _si_pad[SI_MAX_SIZE/sizeof(int)]; - }; + __SIGINFO; } kernel_siginfo_t; /* diff --git a/kernel/signal.c b/kernel/signal.c index 161cad4e448c..1c2dd117fee0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2844,27 +2844,48 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, } #endif +static const struct { + unsigned char limit, layout; +} sig_sicodes[] = { + [SIGILL] = { NSIGILL, SIL_FAULT }, + [SIGFPE] = { NSIGFPE, SIL_FAULT }, + [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, + [SIGBUS] = { NSIGBUS, SIL_FAULT }, + [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, +#if defined(SIGEMT) + [SIGEMT] = { NSIGEMT, SIL_FAULT }, +#endif + [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, + [SIGPOLL] = { NSIGPOLL, SIL_POLL }, + [SIGSYS] = { NSIGSYS, SIL_SYS }, +}; + +static bool known_siginfo_layout(int sig, int si_code) +{ + if (si_code == SI_KERNEL) + return true; + else if ((si_code > SI_USER)) { + if (sig_specific_sicodes(sig)) { + if (si_code <= sig_sicodes[sig].limit) + return true; + } + else if (si_code <= NSIGPOLL) + return true; + } + else if (si_code >= SI_DETHREAD) + return true; + else if (si_code == SI_ASYNCNL) + return true; + return false; +} + enum siginfo_layout siginfo_layout(int sig, int si_code) { enum siginfo_layout layout = SIL_KILL; if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { - static const struct { - unsigned char limit, layout; - } filter[] = { - [SIGILL] = { NSIGILL, SIL_FAULT }, - [SIGFPE] = { NSIGFPE, SIL_FAULT }, - [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, - [SIGBUS] = { NSIGBUS, SIL_FAULT }, - [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, -#if defined(SIGEMT) - [SIGEMT] = { NSIGEMT, SIL_FAULT }, -#endif - [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, - [SIGPOLL] = { NSIGPOLL, SIL_POLL }, - [SIGSYS] = { NSIGSYS, SIL_SYS }, - }; - if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) { - layout = filter[sig].layout; + if ((sig < ARRAY_SIZE(sig_sicodes)) && + (si_code <= sig_sicodes[sig].limit)) { + layout = sig_sicodes[sig].layout; /* Handle the exceptions */ if ((sig == SIGBUS) && (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) @@ -2889,17 +2910,42 @@ enum siginfo_layout siginfo_layout(int sig, int si_code) return layout; } +static inline char __user *si_expansion(const siginfo_t __user *info) +{ + return ((char __user *)info) + sizeof(struct kernel_siginfo); +} + int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) { + char __user *expansion = si_expansion(to); if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) return -EFAULT; + if (clear_user(expansion, SI_EXPANSION_SIZE)) + return -EFAULT; return 0; } int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) { - if (copy_from_user(to, from, sizeof(struct siginfo))) + if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) return -EFAULT; + if (unlikely(!known_siginfo_layout(to->si_signo, to->si_code))) { + char __user *expansion = si_expansion(from); + char buf[SI_EXPANSION_SIZE]; + int i; + /* + * An unknown si_code might need more than + * sizeof(struct kernel_siginfo) bytes. Verify all of the + * extra bytes are 0. This guarantees copy_siginfo_to_user + * will return this data to userspace exactly. + */ + if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) + return -EFAULT; + for (i = 0; i < SI_EXPANSION_SIZE; i++) { + if (buf[i] != 0) + return -E2BIG; + } + } return 0; } -- cgit v1.2.3 From 9c2298aad355d8c1957df3015448fef333526934 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 4 Oct 2018 11:05:14 +0200 Subject: sched/core: Fix comment regarding nr_iowait_cpu() and get_iowait_load() The comment related to nr_iowait_cpu() and get_iowait_load() confuses cpufreq with cpuidle and is not very useful for this reason, so fix it. Signed-off-by: Rafael J. Wysocki Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Linux PM Cc: Tejun Heo Cc: Thomas Gleixner Fixes: e33a9bba85a8 "sched/core: move IO scheduling accounting from io_schedule_timeout() into scheduler" Link: http://lkml.kernel.org/r/3803514.xkx7zY50tF@aspire.rjw.lan Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 56b3c1781276..fe0223121883 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2916,10 +2916,10 @@ unsigned long nr_iowait(void) } /* - * Consumers of these two interfaces, like for example the cpufreq menu - * governor are using nonsensical data. Boosting frequency for a CPU that has - * IO-wait which might not even end up running the task when it does become - * runnable. + * Consumers of these two interfaces, like for example the cpuidle menu + * governor, are using nonsensical data. Preferring shallow idle state selection + * for a CPU that has IO-wait which might not even end up running the task when + * it does become runnable. */ unsigned long nr_iowait_cpu(int cpu) -- cgit v1.2.3 From 479adb89a97b0a33e5a9d702119872cc82ca21aa Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 4 Oct 2018 13:28:08 -0700 Subject: cgroup: Fix dom_cgrp propagation when enabling threaded mode A cgroup which is already a threaded domain may be converted into a threaded cgroup if the prerequisite conditions are met. When this happens, all threaded descendant should also have their ->dom_cgrp updated to the new threaded domain cgroup. Unfortunately, this propagation was missing leading to the following failure. # cd /sys/fs/cgroup/unified # cat cgroup.subtree_control # show that no controllers are enabled # mkdir -p mycgrp/a/b/c # echo threaded > mycgrp/a/b/cgroup.type At this point, the hierarchy looks as follows: mycgrp [d] a [dt] b [t] c [inv] Now let's make node "a" threaded (and thus "mycgrp" s made "domain threaded"): # echo threaded > mycgrp/a/cgroup.type By this point, we now have a hierarchy that looks as follows: mycgrp [dt] a [t] b [t] c [inv] But, when we try to convert the node "c" from "domain invalid" to "threaded", we get ENOTSUP on the write(): # echo threaded > mycgrp/a/b/c/cgroup.type sh: echo: write error: Operation not supported This patch fixes the problem by * Moving the opencoded ->dom_cgrp save and restoration in cgroup_enable_threaded() into cgroup_{save|restore}_control() so that mulitple cgroups can be handled. * Updating all threaded descendants' ->dom_cgrp to point to the new dom_cgrp when enabling threaded mode. Signed-off-by: Tejun Heo Reported-and-tested-by: "Michael Kerrisk (man-pages)" Reported-by: Amin Jamali Reported-by: Joao De Almeida Pereira Link: https://lore.kernel.org/r/CAKgNAkhHYCMn74TCNiMJ=ccLd7DcmXSbvw3CbZ1YREeG7iJM5g@mail.gmail.com Fixes: 454000adaa2a ("cgroup: introduce cgroup->dom_cgrp and threaded css_set handling") Cc: stable@vger.kernel.org # v4.14+ --- include/linux/cgroup-defs.h | 1 + kernel/cgroup/cgroup.c | 25 ++++++++++++++++--------- 2 files changed, 17 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index ff20b677fb9f..22254c1fe1c5 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -412,6 +412,7 @@ struct cgroup { * specific task are charged to the dom_cgrp. */ struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; /* used while enabling threaded */ /* per-cpu recursive resource statistics */ struct cgroup_rstat_cpu __percpu *rstat_cpu; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index aae10baf1902..4a3dae2a8283 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2836,11 +2836,12 @@ restart: } /** - * cgroup_save_control - save control masks of a subtree + * cgroup_save_control - save control masks and dom_cgrp of a subtree * @cgrp: root of the target subtree * - * Save ->subtree_control and ->subtree_ss_mask to the respective old_ - * prefixed fields for @cgrp's subtree including @cgrp itself. + * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the + * respective old_ prefixed fields for @cgrp's subtree including @cgrp + * itself. */ static void cgroup_save_control(struct cgroup *cgrp) { @@ -2850,6 +2851,7 @@ static void cgroup_save_control(struct cgroup *cgrp) cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { dsct->old_subtree_control = dsct->subtree_control; dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; + dsct->old_dom_cgrp = dsct->dom_cgrp; } } @@ -2875,11 +2877,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp) } /** - * cgroup_restore_control - restore control masks of a subtree + * cgroup_restore_control - restore control masks and dom_cgrp of a subtree * @cgrp: root of the target subtree * - * Restore ->subtree_control and ->subtree_ss_mask from the respective old_ - * prefixed fields for @cgrp's subtree including @cgrp itself. + * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the + * respective old_ prefixed fields for @cgrp's subtree including @cgrp + * itself. */ static void cgroup_restore_control(struct cgroup *cgrp) { @@ -2889,6 +2892,7 @@ static void cgroup_restore_control(struct cgroup *cgrp) cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { dsct->subtree_control = dsct->old_subtree_control; dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; + dsct->dom_cgrp = dsct->old_dom_cgrp; } } @@ -3196,6 +3200,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp) { struct cgroup *parent = cgroup_parent(cgrp); struct cgroup *dom_cgrp = parent->dom_cgrp; + struct cgroup *dsct; + struct cgroup_subsys_state *d_css; int ret; lockdep_assert_held(&cgroup_mutex); @@ -3225,12 +3231,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp) */ cgroup_save_control(cgrp); - cgrp->dom_cgrp = dom_cgrp; + cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) + if (dsct == cgrp || cgroup_is_threaded(dsct)) + dsct->dom_cgrp = dom_cgrp; + ret = cgroup_apply_control(cgrp); if (!ret) parent->nr_threaded_children++; - else - cgrp->dom_cgrp = cgrp; cgroup_finalize_control(cgrp, ret); return ret; -- cgit v1.2.3 From d67f34c19a679436dd2963b588015e119279e7a8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 17 Sep 2018 14:45:34 +0200 Subject: clocksource: Provide clocksource_arch_init() Architectures have extra archdata in the clocksource, e.g. for VDSO support. There are no sanity checks or general initializations for this available. Add support for that. Signed-off-by: Thomas Gleixner Acked-by: Andy Lutomirski Acked-by: John Stultz Cc: Peter Zijlstra Cc: Matt Rickard Cc: Stephen Boyd Cc: Florian Weimer Cc: "K. Y. Srinivasan" Cc: Vitaly Kuznetsov Cc: devel@linuxdriverproject.org Cc: virtualization@lists.linux-foundation.org Cc: Paolo Bonzini Cc: Arnd Bergmann Cc: Juergen Gross Link: https://lkml.kernel.org/r/20180917130706.973042587@linutronix.de --- include/linux/clocksource.h | 5 +++++ kernel/time/Kconfig | 4 ++++ kernel/time/clocksource.c | 2 ++ 3 files changed, 11 insertions(+) (limited to 'kernel') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 308918928767..6e6b86f9046d 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -241,6 +241,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz __clocksource_update_freq_scale(cs, 1000, khz); } +#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT +extern void clocksource_arch_init(struct clocksource *cs); +#else +static inline void clocksource_arch_init(struct clocksource *cs) { } +#endif extern int timekeeping_notify(struct clocksource *clock); diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 78eabc41eaa6..58b981f4bb5d 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -12,6 +12,10 @@ config CLOCKSOURCE_WATCHDOG config ARCH_CLOCKSOURCE_DATA bool +# Architecture has extra clocksource init called from registration +config ARCH_CLOCKSOURCE_INIT + bool + # Clocksources require validation of the clocksource against the last # cycle update - x86/TSC misfeature config CLOCKSOURCE_VALIDATE_LAST_CYCLE diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 0e6e97a01942..ffe081623aec 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -937,6 +937,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) { unsigned long flags; + clocksource_arch_init(cs); + /* Initialize mult/shift and max_idle_ns */ __clocksource_update_freq_scale(cs, scale, freq); -- cgit v1.2.3 From 1fc8e6423edb4bba365b0780c2fcddfb921b24b2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 3 Oct 2018 16:48:07 -0700 Subject: dma-direct: fix return value of dma_direct_supported It appears that in commit 9d7a224b463e ("dma-direct: always allow dma mask <= physiscal memory size") the logic of the test was changed from a "<" to a ">=" however I don't see any reason for that change. I am assuming that there was some additional change planned, specifically I suspect the logic was intended to be reversed and possibly used for a return. Since that is the case I have gone ahead and done that. This addresses issues I had on my system that prevented me from booting with the above mentioned commit applied on an x86_64 system w/ Intel IOMMU. Fixes: 9d7a224b463e ("dma-direct: always allow dma mask <= physiscal memory size") Signed-off-by: Alexander Duyck Acked-by: Robin Murphy Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 170bd322a94a..674a8da22844 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -301,9 +301,7 @@ int dma_direct_supported(struct device *dev, u64 mask) min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); - if (mask >= phys_to_dma(dev, min_mask)) - return 0; - return 1; + return mask >= phys_to_dma(dev, min_mask); } int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) -- cgit v1.2.3 From d0e7d14455d41163126afecd0fcce935463cc512 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 4 Oct 2018 19:22:27 +0200 Subject: cpu/SMT: State SMT is disabled even with nosmt and without "=force" When booting with "nosmt=force" a message is issued into dmesg to confirm that SMT has been force-disabled but such a message is not issued when only "nosmt" is on the kernel command line. Fix that. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20181004172227.10094-1-bp@alien8.de Signed-off-by: Ingo Molnar --- kernel/cpu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 0097acec1c71..f1338452d998 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -362,6 +362,7 @@ void __init cpu_smt_disable(bool force) pr_info("SMT: Force disabled\n"); cpu_smt_control = CPU_SMT_FORCE_DISABLED; } else { + pr_info("SMT: disabled\n"); cpu_smt_control = CPU_SMT_DISABLED; } } -- cgit v1.2.3 From 277fcdb2cfee38ccdbe07e705dbd4896ba0c9930 Mon Sep 17 00:00:00 2001 From: He Zhe Date: Sun, 30 Sep 2018 00:45:50 +0800 Subject: printk: Fix panic caused by passing log_buf_len to command line log_buf_len_setup does not check input argument before passing it to simple_strtoull. The argument would be a NULL pointer if "log_buf_len", without its value, is set in command line and thus causes the following panic. PANIC: early exception 0xe3 IP 10:ffffffffaaeacd0d error 0 cr2 0x0 [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 4.19.0-rc4-yocto-standard+ #1 [ 0.000000] RIP: 0010:_parse_integer_fixup_radix+0xd/0x70 ... [ 0.000000] Call Trace: [ 0.000000] simple_strtoull+0x29/0x70 [ 0.000000] memparse+0x26/0x90 [ 0.000000] log_buf_len_setup+0x17/0x22 [ 0.000000] do_early_param+0x57/0x8e [ 0.000000] parse_args+0x208/0x320 [ 0.000000] ? rdinit_setup+0x30/0x30 [ 0.000000] parse_early_options+0x29/0x2d [ 0.000000] ? rdinit_setup+0x30/0x30 [ 0.000000] parse_early_param+0x36/0x4d [ 0.000000] setup_arch+0x336/0x99e [ 0.000000] start_kernel+0x6f/0x4ee [ 0.000000] x86_64_start_reservations+0x24/0x26 [ 0.000000] x86_64_start_kernel+0x6f/0x72 [ 0.000000] secondary_startup_64+0xa4/0xb0 This patch adds a check to prevent the panic. Link: http://lkml.kernel.org/r/1538239553-81805-1-git-send-email-zhe.he@windriver.com Cc: stable@vger.kernel.org Cc: rostedt@goodmis.org Cc: linux-kernel@vger.kernel.org Signed-off-by: He Zhe Reviewed-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index c5b568c2d167..a14f15ad0f35 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1049,7 +1049,12 @@ static void __init log_buf_len_update(unsigned size) /* save requested log_buf_len since it's too early to process it */ static int __init log_buf_len_setup(char *str) { - unsigned size = memparse(str, &str); + unsigned int size; + + if (!str) + return -EINVAL; + + size = memparse(str, &str); log_buf_len_update(size); -- cgit v1.2.3 From 51a72ab7372d85c96104e58036f1b49ba11e5d2b Mon Sep 17 00:00:00 2001 From: He Zhe Date: Sun, 30 Sep 2018 00:45:51 +0800 Subject: printk: Correct wrong casting log_first_seq and console_seq are 64-bit unsigned integers. Correct a wrong casting that might cut off the output. Link: http://lkml.kernel.org/r/1538239553-81805-2-git-send-email-zhe.he@windriver.com Cc: rostedt@goodmis.org Cc: linux-kernel@vger.kernel.org Signed-off-by: He Zhe [sergey.senozhatsky@gmail.com: More descriptive commit message] Reviewed-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index a14f15ad0f35..d4704baf6aaf 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2358,8 +2358,9 @@ again: printk_safe_enter_irqsave(flags); raw_spin_lock(&logbuf_lock); if (console_seq < log_first_seq) { - len = sprintf(text, "** %u printk messages dropped **\n", - (unsigned)(log_first_seq - console_seq)); + len = sprintf(text, + "** %llu printk messages dropped **\n", + log_first_seq - console_seq); /* messages are gone, move to first one */ console_seq = log_first_seq; -- cgit v1.2.3 From dd5adbfbfc50ff998909660c6158c0488f7b6a2b Mon Sep 17 00:00:00 2001 From: He Zhe Date: Sun, 30 Sep 2018 00:45:52 +0800 Subject: printk: Add KBUILD_MODNAME and remove a redundant print prefix Add KBUILD_MODNAME to make prints more clear. Link: http://lkml.kernel.org/r/1538239553-81805-3-git-send-email-zhe.he@windriver.com Cc: rostedt@goodmis.org Cc: linux-kernel@vger.kernel.org Signed-off-by: He Zhe Reviewed-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index d4704baf6aaf..15f3e70be448 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -16,6 +16,8 @@ * 01Mar01 Andrew Morton */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -2699,7 +2701,7 @@ void register_console(struct console *newcon) if (newcon->flags & CON_EXTENDED) if (!nr_ext_console_drivers++) - pr_info("printk: continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n"); + pr_info("continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n"); if (newcon->flags & CON_PRINTBUFFER) { /* -- cgit v1.2.3 From b799207e1e1816b09e7a5920fbb2d5fcf6edd681 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Fri, 5 Oct 2018 18:17:59 +0200 Subject: bpf: 32-bit RSH verification must truncate input before the ALU op When I wrote commit 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification"), I assumed that, in order to emulate 64-bit arithmetic with 32-bit logic, it is sufficient to just truncate the output to 32 bits; and so I just moved the register size coercion that used to be at the start of the function to the end of the function. That assumption is true for almost every op, but not for 32-bit right shifts, because those can propagate information towards the least significant bit. Fix it by always truncating inputs for 32-bit ops to 32 bits. Also get rid of the coerce_reg_to_size() after the ALU op, since that has no effect. Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification") Acked-by: Daniel Borkmann Signed-off-by: Jann Horn Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bb07e74b34a2..465952a8e465 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2896,6 +2896,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; + if (insn_bitness == 32) { + /* Relevant for 32-bit RSH: Information can propagate towards + * LSB, so it isn't sufficient to only truncate the output to + * 32 bits. + */ + coerce_reg_to_size(dst_reg, 4); + coerce_reg_to_size(&src_reg, 4); + } + smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; @@ -3131,7 +3140,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); - coerce_reg_to_size(&src_reg, 4); } __reg_deduce_bounds(dst_reg); -- cgit v1.2.3 From 9cf38d5559e813cccdba8b44c82cc46ba48d0896 Mon Sep 17 00:00:00 2001 From: Lianbo Jiang Date: Sun, 30 Sep 2018 11:10:31 +0800 Subject: kexec: Allocate decrypted control pages for kdump if SME is enabled When SME is enabled in the first kernel, it needs to allocate decrypted pages for kdump because when the kdump kernel boots, these pages need to be accessed decrypted in the initial boot stage, before SME is enabled. [ bp: clean up text. ] Signed-off-by: Lianbo Jiang Signed-off-by: Borislav Petkov Reviewed-by: Tom Lendacky Cc: kexec@lists.infradead.org Cc: tglx@linutronix.de Cc: mingo@redhat.com Cc: hpa@zytor.com Cc: akpm@linux-foundation.org Cc: dan.j.williams@intel.com Cc: bhelgaas@google.com Cc: baiyaowei@cmss.chinamobile.com Cc: tiwai@suse.de Cc: brijesh.singh@amd.com Cc: dyoung@redhat.com Cc: bhe@redhat.com Cc: jroedel@suse.de Link: https://lkml.kernel.org/r/20180930031033.22110-3-lijiang@redhat.com --- kernel/kexec_core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 23a83a4da38a..86ef06d3dbe3 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -471,6 +471,10 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image, } } + /* Ensure that these pages are decrypted if SME is enabled. */ + if (pages) + arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); + return pages; } @@ -867,6 +871,7 @@ static int kimage_load_crash_segment(struct kimage *image, result = -ENOMEM; goto out; } + arch_kexec_post_alloc_pages(page_address(page), 1, 0); ptr = kmap(page); ptr += maddr & ~PAGE_MASK; mchunk = min_t(size_t, mbytes, @@ -884,6 +889,7 @@ static int kimage_load_crash_segment(struct kimage *image, result = copy_from_user(ptr, buf, uchunk); kexec_flush_icache_page(page); kunmap(page); + arch_kexec_pre_free_pages(page_address(page), 1); if (result) { result = -EFAULT; goto out; -- cgit v1.2.3 From 601d5abfeaf244b86bb68c1e05c6e0d57be2f6b0 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 5 Oct 2018 09:02:48 +0200 Subject: signal: In sigqueueinfo prefer sig not si_signo Andrei Vagin reported: > Accoding to the man page, the user should not set si_signo, it has to be set > by kernel. > > $ man 2 rt_sigqueueinfo > > The uinfo argument specifies the data to accompany the signal. This > argument is a pointer to a structure of type siginfo_t, described in > sigaction(2) (and defined by including ). The caller > should set the following fields in this structure: > > si_code > This must be one of the SI_* codes in the Linux kernel source > file include/asm-generic/siginfo.h, with the restriction that > the code must be negative (i.e., cannot be SI_USER, which is > used by the kernel to indicate a signal sent by kill(2)) and > cannot (since Linux 2.6.39) be SI_TKILL (which is used by the > kernel to indicate a signal sent using tgkill(2)). > > si_pid This should be set to a process ID, typically the process ID of > the sender. > > si_uid This should be set to a user ID, typically the real user ID of > the sender. > > si_value > This field contains the user data to accompany the signal. For > more information, see the description of the last (union sigval) > argument of sigqueue(3). > > Internally, the kernel sets the si_signo field to the value specified > in sig, so that the receiver of the signal can also obtain the signal > number via that field. > > On Tue, Sep 25, 2018 at 07:19:02PM +0200, Eric W. Biederman wrote: >> >> If there is some application that calls sigqueueinfo directly that has >> a problem with this added sanity check we can revisit this when we see >> what kind of crazy that application is doing. > > > I already know two "applications" ;) > > https://github.com/torvalds/linux/blob/master/tools/testing/selftests/ptrace/peeksiginfo.c > https://github.com/checkpoint-restore/criu/blob/master/test/zdtm/static/sigpending.c > > Disclaimer: I'm the author of both of them. Looking at the kernel code the historical behavior has alwasy been to prefer the signal number passed in by the kernel. So sigh. Implmenet __copy_siginfo_from_user and __copy_siginfo_from_user32 to take that signal number and prefer it. The user of ptrace will still use copy_siginfo_from_user and copy_siginfo_from_user32 as they do not and never have had a signal number there. Luckily this change has never made it farther than linux-next. Fixes: e75dc036c445 ("signal: Fail sigqueueinfo if si_signo != sig") Reported-by: Andrei Vagin Tested-by: Andrei Vagin Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 141 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 84 insertions(+), 57 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 1c2dd117fee0..2bffc5a50183 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2925,11 +2925,10 @@ int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) return 0; } -int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) +static int post_copy_siginfo_from_user(kernel_siginfo_t *info, + const siginfo_t __user *from) { - if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) - return -EFAULT; - if (unlikely(!known_siginfo_layout(to->si_signo, to->si_code))) { + if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { char __user *expansion = si_expansion(from); char buf[SI_EXPANSION_SIZE]; int i; @@ -2949,6 +2948,22 @@ int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) return 0; } +static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, + const siginfo_t __user *from) +{ + if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) + return -EFAULT; + to->si_signo = signo; + return post_copy_siginfo_from_user(to, from); +} + +int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) +{ + if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) + return -EFAULT; + return post_copy_siginfo_from_user(to, from); +} + #ifdef CONFIG_COMPAT int copy_siginfo_to_user32(struct compat_siginfo __user *to, const struct kernel_siginfo *from) @@ -3041,88 +3056,106 @@ int __copy_siginfo_to_user32(struct compat_siginfo __user *to, return 0; } -int copy_siginfo_from_user32(struct kernel_siginfo *to, - const struct compat_siginfo __user *ufrom) +static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, + const struct compat_siginfo *from) { - struct compat_siginfo from; - - if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) - return -EFAULT; - clear_siginfo(to); - to->si_signo = from.si_signo; - to->si_errno = from.si_errno; - to->si_code = from.si_code; - switch(siginfo_layout(from.si_signo, from.si_code)) { + to->si_signo = from->si_signo; + to->si_errno = from->si_errno; + to->si_code = from->si_code; + switch(siginfo_layout(from->si_signo, from->si_code)) { case SIL_KILL: - to->si_pid = from.si_pid; - to->si_uid = from.si_uid; + to->si_pid = from->si_pid; + to->si_uid = from->si_uid; break; case SIL_TIMER: - to->si_tid = from.si_tid; - to->si_overrun = from.si_overrun; - to->si_int = from.si_int; + to->si_tid = from->si_tid; + to->si_overrun = from->si_overrun; + to->si_int = from->si_int; break; case SIL_POLL: - to->si_band = from.si_band; - to->si_fd = from.si_fd; + to->si_band = from->si_band; + to->si_fd = from->si_fd; break; case SIL_FAULT: - to->si_addr = compat_ptr(from.si_addr); + to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO - to->si_trapno = from.si_trapno; + to->si_trapno = from->si_trapno; #endif break; case SIL_FAULT_MCEERR: - to->si_addr = compat_ptr(from.si_addr); + to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO - to->si_trapno = from.si_trapno; + to->si_trapno = from->si_trapno; #endif - to->si_addr_lsb = from.si_addr_lsb; + to->si_addr_lsb = from->si_addr_lsb; break; case SIL_FAULT_BNDERR: - to->si_addr = compat_ptr(from.si_addr); + to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO - to->si_trapno = from.si_trapno; + to->si_trapno = from->si_trapno; #endif - to->si_lower = compat_ptr(from.si_lower); - to->si_upper = compat_ptr(from.si_upper); + to->si_lower = compat_ptr(from->si_lower); + to->si_upper = compat_ptr(from->si_upper); break; case SIL_FAULT_PKUERR: - to->si_addr = compat_ptr(from.si_addr); + to->si_addr = compat_ptr(from->si_addr); #ifdef __ARCH_SI_TRAPNO - to->si_trapno = from.si_trapno; + to->si_trapno = from->si_trapno; #endif - to->si_pkey = from.si_pkey; + to->si_pkey = from->si_pkey; break; case SIL_CHLD: - to->si_pid = from.si_pid; - to->si_uid = from.si_uid; - to->si_status = from.si_status; + to->si_pid = from->si_pid; + to->si_uid = from->si_uid; + to->si_status = from->si_status; #ifdef CONFIG_X86_X32_ABI if (in_x32_syscall()) { - to->si_utime = from._sifields._sigchld_x32._utime; - to->si_stime = from._sifields._sigchld_x32._stime; + to->si_utime = from->_sifields._sigchld_x32._utime; + to->si_stime = from->_sifields._sigchld_x32._stime; } else #endif { - to->si_utime = from.si_utime; - to->si_stime = from.si_stime; + to->si_utime = from->si_utime; + to->si_stime = from->si_stime; } break; case SIL_RT: - to->si_pid = from.si_pid; - to->si_uid = from.si_uid; - to->si_int = from.si_int; + to->si_pid = from->si_pid; + to->si_uid = from->si_uid; + to->si_int = from->si_int; break; case SIL_SYS: - to->si_call_addr = compat_ptr(from.si_call_addr); - to->si_syscall = from.si_syscall; - to->si_arch = from.si_arch; + to->si_call_addr = compat_ptr(from->si_call_addr); + to->si_syscall = from->si_syscall; + to->si_arch = from->si_arch; break; } return 0; } + +static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, + const struct compat_siginfo __user *ufrom) +{ + struct compat_siginfo from; + + if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) + return -EFAULT; + + from.si_signo = signo; + return post_copy_siginfo_from_user32(to, &from); +} + +int copy_siginfo_from_user32(struct kernel_siginfo *to, + const struct compat_siginfo __user *ufrom) +{ + struct compat_siginfo from; + + if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) + return -EFAULT; + + return post_copy_siginfo_from_user32(to, &from); +} #endif /* CONFIG_COMPAT */ /** @@ -3359,9 +3392,6 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) (task_pid_vnr(current) != pid)) return -EPERM; - if (info->si_signo != sig) - return -EINVAL; - /* POSIX.1b doesn't mention process groups. */ return kill_proc_info(sig, info, pid); } @@ -3376,7 +3406,7 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { kernel_siginfo_t info; - int ret = copy_siginfo_from_user(&info, uinfo); + int ret = __copy_siginfo_from_user(sig, &info, uinfo); if (unlikely(ret)) return ret; return do_rt_sigqueueinfo(pid, sig, &info); @@ -3389,7 +3419,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, struct compat_siginfo __user *, uinfo) { kernel_siginfo_t info; - int ret = copy_siginfo_from_user32(&info, uinfo); + int ret = __copy_siginfo_from_user32(sig, &info, uinfo); if (unlikely(ret)) return ret; return do_rt_sigqueueinfo(pid, sig, &info); @@ -3409,9 +3439,6 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t (task_pid_vnr(current) != pid)) return -EPERM; - if (info->si_signo != sig) - return -EINVAL; - return do_send_specific(tgid, pid, sig, info); } @@ -3419,7 +3446,7 @@ SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { kernel_siginfo_t info; - int ret = copy_siginfo_from_user(&info, uinfo); + int ret = __copy_siginfo_from_user(sig, &info, uinfo); if (unlikely(ret)) return ret; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); @@ -3433,7 +3460,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, struct compat_siginfo __user *, uinfo) { kernel_siginfo_t info; - int ret = copy_siginfo_from_user32(&info, uinfo); + int ret = __copy_siginfo_from_user32(sig, &info, uinfo); if (unlikely(ret)) return ret; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); -- cgit v1.2.3 From 99c65fa7c59ff558e70db8aa61bbdece5d3a9588 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 8 Oct 2018 00:20:07 -0700 Subject: dma-debug: Check for drivers mapping invalid addresses in dma_map_single() I recently debugged a DMA mapping oops where a driver was trying to map a buffer returned from request_firmware() with dma_map_single(). Memory returned from request_firmware() is mapped into the vmalloc region and this isn't a valid region to map with dma_map_single() per the DMA documentation's "What memory is DMA'able?" section. Unfortunately, we don't really check that in the DMA debugging code, so enabling DMA debugging doesn't help catch this problem. Let's add a new DMA debug function to check for a vmalloc address or an invalid virtual address and print a warning if this happens. This makes it a little easier to debug these sorts of problems, instead of seeing odd behavior or crashes when drivers attempt to map the vmalloc space for DMA. Cc: Marek Szyprowski Reviewed-by: Robin Murphy Signed-off-by: Stephen Boyd Signed-off-by: Christoph Hellwig --- include/linux/dma-debug.h | 8 ++++++++ include/linux/dma-mapping.h | 1 + kernel/dma/debug.c | 16 ++++++++++++++++ 3 files changed, 25 insertions(+) (limited to 'kernel') diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index a785f2507159..30213adbb6b9 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -32,6 +32,9 @@ extern void dma_debug_add_bus(struct bus_type *bus); extern int dma_debug_resize_entries(u32 num_entries); +extern void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len); + extern void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, @@ -103,6 +106,11 @@ static inline int dma_debug_resize_entries(u32 num_entries) return 0; } +static inline void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len) +{ +} + static inline void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 562af6b45f23..547a48bcfa3d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -229,6 +229,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); + debug_dma_map_single(dev, ptr, size); addr = ops->map_page(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index c007d25bee09..231ca4628062 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -1312,6 +1312,22 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg) #endif } +void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len) +{ + if (unlikely(dma_debug_disabled())) + return; + + if (!virt_addr_valid(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from invalid area [addr=%p] [len=%lu]\n", + addr, len); + + if (is_vmalloc_addr(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", + addr, len); +} +EXPORT_SYMBOL(debug_dma_map_single); + void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, bool map_single) -- cgit v1.2.3 From c941ce9c282cc606e6517356fcc186a9da2b4ab9 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Sun, 7 Oct 2018 12:56:47 +0100 Subject: bpf: add verifier callback to get stack usage info for offloaded progs In preparation for BPF-to-BPF calls in offloaded programs, add a new function attribute to the struct bpf_prog_offload_ops so that drivers supporting eBPF offload can hook at the end of program verification, and potentially extract information collected by the verifier. Implement a minimal callback (returning 0) in the drivers providing the structs, namely netdevsim and nfp. This will be useful in the nfp driver, in later commits, to extract the number of subprograms as well as the stack depth for those subprograms. Signed-off-by: Quentin Monnet Reviewed-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 8 +++++++- drivers/net/netdevsim/bpf.c | 8 +++++++- include/linux/bpf.h | 1 + include/linux/bpf_verifier.h | 1 + kernel/bpf/offload.c | 18 ++++++++++++++++++ kernel/bpf/verifier.c | 3 +++ 6 files changed, 37 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index a6e9248669e1..e470489021e3 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -640,6 +640,12 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return 0; } +static int nfp_bpf_finalize(struct bpf_verifier_env *env) +{ + return 0; +} + const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = { - .insn_hook = nfp_verify_insn, + .insn_hook = nfp_verify_insn, + .finalize = nfp_bpf_finalize, }; diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 81444208b216..cb3518474f0e 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -86,8 +86,14 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) return 0; } +static int nsim_bpf_finalize(struct bpf_verifier_env *env) +{ + return 0; +} + static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { - .insn_hook = nsim_bpf_verify_insn, + .insn_hook = nsim_bpf_verify_insn, + .finalize = nsim_bpf_finalize, }; static bool nsim_xdp_offload_active(struct netdevsim *ns) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 027697b6a22f..9b558713447f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -263,6 +263,7 @@ struct bpf_verifier_ops { struct bpf_prog_offload_ops { int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); + int (*finalize)(struct bpf_verifier_env *env); }; struct bpf_prog_offload { diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7b6fd2ab3263..9e8056ec20fa 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -245,5 +245,6 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); +int bpf_prog_offload_finalize(struct bpf_verifier_env *env); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 177a52436394..8e93c47f0779 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -172,6 +172,24 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, return ret; } +int bpf_prog_offload_finalize(struct bpf_verifier_env *env) +{ + struct bpf_prog_offload *offload; + int ret = -ENODEV; + + down_read(&bpf_devs_lock); + offload = env->prog->aux->offload; + if (offload) { + if (offload->dev_ops->finalize) + ret = offload->dev_ops->finalize(env); + else + ret = 0; + } + up_read(&bpf_devs_lock); + + return ret; +} + static void __bpf_prog_offload_destroy(struct bpf_prog *prog) { struct bpf_prog_offload *offload = prog->aux->offload; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 73c81bef6ae8..a0454cb299ba 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6309,6 +6309,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) env->cur_state = NULL; } + if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) + ret = bpf_prog_offload_finalize(env); + skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); -- cgit v1.2.3 From e4052d06a5195b29271a7af262711d69f9ecfd04 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Sun, 7 Oct 2018 12:56:58 +0100 Subject: bpf: allow offload of programs with BPF-to-BPF function calls Now that there is at least one driver supporting BPF-to-BPF function calls, lift the restriction, in the verifier, on hardware offload of eBPF programs containing such calls. But prevent jit_subprogs(), still in the verifier, from being run for offloaded programs. Signed-off-by: Quentin Monnet Reviewed-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a0454cb299ba..73cc136915fe 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1009,10 +1009,6 @@ static int check_subprogs(struct bpf_verifier_env *env) verbose(env, "function calls to other bpf functions are allowed for root only\n"); return -EPERM; } - if (bpf_prog_is_dev_bound(env->prog->aux)) { - verbose(env, "function calls in offloaded programs are not supported yet\n"); - return -EINVAL; - } ret = add_subprog(env, i + insn[i].imm + 1); if (ret < 0) return ret; @@ -5968,10 +5964,10 @@ static int fixup_call_args(struct bpf_verifier_env *env) struct bpf_insn *insn = prog->insnsi; int i, depth; #endif - int err; + int err = 0; - err = 0; - if (env->prog->jit_requested) { + if (env->prog->jit_requested && + !bpf_prog_is_dev_bound(env->prog->aux)) { err = jit_subprogs(env); if (err == 0) return 0; -- cgit v1.2.3 From 79ac32a427f5d1211fa417021fd04c36f63ab917 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 1 Oct 2018 07:40:53 -0700 Subject: dma-direct: document the zone selection logic What we are doing here isn't quite obvious, so add a comment explaining it. Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 674a8da22844..a5a943836c8e 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -84,7 +84,14 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, else *phys_mask = dma_to_phys(dev, dma_mask); - /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ + /* + * Optimistically try the zone that the physical address mask falls + * into first. If that returns memory that isn't actually addressable + * we will fallback to the next lower zone and try again. + * + * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding + * zones. + */ if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) return GFP_DMA; if (*phys_mask <= DMA_BIT_MASK(32)) -- cgit v1.2.3 From b8d62f33b7b225935649ab165d901fe8dd7f95e5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 8 Oct 2018 13:17:26 +0200 Subject: genirq: Fix grammar s/an /a / Fix a grammar mistake in . [ mingo: While at it also fix another similar error in another comment as well. ] Signed-off-by: Geert Uytterhoeven Cc: Jiri Kosina Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20181008111726.26286-1-geert%2Brenesas@glider.be Signed-off-by: Ingo Molnar --- include/linux/interrupt.h | 2 +- kernel/irq/irqdomain.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index eeceac3376fc..1d6711c28271 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -45,7 +45,7 @@ * IRQF_PERCPU - Interrupt is per cpu * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is - * registered first in an shared interrupt is considered for + * registered first in a shared interrupt is considered for * performance reasons) * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 3b30a4aeb0db..3366d11c3e02 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -867,7 +867,7 @@ void irq_dispose_mapping(unsigned int virq) EXPORT_SYMBOL_GPL(irq_dispose_mapping); /** - * irq_find_mapping() - Find a linux irq from an hw irq number. + * irq_find_mapping() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space */ -- cgit v1.2.3 From 8ca2b56cd7da98fc8f8d787bb706b9d6c8674a3b Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 3 Oct 2018 13:07:18 -0400 Subject: locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y A sizable portion of the CPU cycles spent on the __lock_acquire() is used up by the atomic increment of the class->ops stat counter. By taking it out from the lock_class structure and changing it to a per-cpu per-lock-class counter, we can reduce the amount of cacheline contention on the class structure when multiple CPUs are trying to acquire locks of the same class simultaneously. To limit the increase in memory consumption because of the percpu nature of that counter, it is now put back under the CONFIG_DEBUG_LOCKDEP config option. So the memory consumption increase will only occur if CONFIG_DEBUG_LOCKDEP is defined. The lock_class structure, however, is reduced in size by 16 bytes on 64-bit archs after ops removal and a minor restructuring of the fields. This patch also fixes a bug in the increment code as the counter is of the 'unsigned long' type, but atomic_inc() was used to increment it. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/d66681f3-8781-9793-1dcf-2436a284550b@redhat.com Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 7 +------ kernel/locking/lockdep.c | 11 ++++++++--- kernel/locking/lockdep_internals.h | 27 +++++++++++++++++++++++++++ kernel/locking/lockdep_proc.c | 2 +- 4 files changed, 37 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b0d0b51c4d85..1fd82ff99c65 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -99,13 +99,8 @@ struct lock_class { */ unsigned int version; - /* - * Statistics counter: - */ - unsigned long ops; - - const char *name; int name_version; + const char *name; #ifdef CONFIG_LOCK_STAT unsigned long contention_point[LOCKSTAT_POINTS]; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 511d30f88bce..a0f83058d6aa 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -139,7 +139,7 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; * get freed - this significantly simplifies the debugging code. */ unsigned long nr_lock_classes; -static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; +struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; static inline struct lock_class *hlock_class(struct held_lock *hlock) { @@ -436,6 +436,7 @@ unsigned int max_lockdep_depth; * Various lockdep statistics: */ DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); +DEFINE_PER_CPU(unsigned long [MAX_LOCKDEP_KEYS], lock_class_ops); #endif /* @@ -1392,7 +1393,9 @@ static void print_lock_class_header(struct lock_class *class, int depth) printk("%*s->", depth, ""); print_lock_name(class); - printk(KERN_CONT " ops: %lu", class->ops); +#ifdef CONFIG_DEBUG_LOCKDEP + printk(KERN_CONT " ops: %lu", debug_class_ops_read(class)); +#endif printk(KERN_CONT " {\n"); for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { @@ -3227,7 +3230,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (!class) return 0; } - atomic_inc((atomic_t *)&class->ops); + + debug_class_ops_inc(class); + if (very_verbose(class)) { printk("\nacquire class [%px] %s", class->key, class->name); if (class->name_version > 1) diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index d459d624ba2a..88c847a41c8a 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -152,9 +152,15 @@ struct lockdep_stats { int nr_find_usage_forwards_recursions; int nr_find_usage_backwards_checks; int nr_find_usage_backwards_recursions; + + /* + * Per lock class locking operation stat counts + */ + unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; }; DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); +extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; #define __debug_atomic_inc(ptr) \ this_cpu_inc(lockdep_stats.ptr); @@ -179,9 +185,30 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); } \ __total; \ }) + +static inline void debug_class_ops_inc(struct lock_class *class) +{ + int idx; + + idx = class - lock_classes; + __debug_atomic_inc(lock_class_ops[idx]); +} + +static inline unsigned long debug_class_ops_read(struct lock_class *class) +{ + int idx, cpu; + unsigned long ops = 0; + + idx = class - lock_classes; + for_each_possible_cpu(cpu) + ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); + return ops; +} + #else # define __debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_dec(ptr) do { } while (0) # define debug_atomic_read(ptr) 0 +# define debug_class_ops_inc(ptr) do { } while (0) #endif diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 3dd980dfba2d..3d31f9b0059e 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -68,7 +68,7 @@ static int l_show(struct seq_file *m, void *v) seq_printf(m, "%p", class->key); #ifdef CONFIG_DEBUG_LOCKDEP - seq_printf(m, " OPS:%8ld", class->ops); + seq_printf(m, " OPS:%8ld", debug_class_ops_read(class)); #endif #ifdef CONFIG_PROVE_LOCKING seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); -- cgit v1.2.3 From 4de1a293a08bdf8ec1530e02163930ac86f80ea2 Mon Sep 17 00:00:00 2001 From: Lance Roy Date: Tue, 2 Oct 2018 22:38:57 -0700 Subject: futex: Replace spin_is_locked() with lockdep lockdep_assert_held() is better suited for checking locking requirements, since it won't get confused when the lock is held by some other task. This is also a step towards possibly removing spin_is_locked(). Signed-off-by: Lance Roy Signed-off-by: Thomas Gleixner Cc: "Paul E. McKenney" Cc: Peter Zijlstra Cc: Darren Hart Link: https://lkml.kernel.org/r/20181003053902.6910-12-ldr709@gmail.com --- kernel/futex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 11fc3bb456d6..3e2de8fc1891 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1365,9 +1365,9 @@ static void __unqueue_futex(struct futex_q *q) { struct futex_hash_bucket *hb; - if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) - || WARN_ON(plist_node_empty(&q->list))) + if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) return; + lockdep_assert_held(q->lock_ptr); hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); plist_del(&q->list, &hb->chain); -- cgit v1.2.3 From e6fe3e5b7d16e8f146a4ae7fe481bc6e97acde1e Mon Sep 17 00:00:00 2001 From: He Zhe Date: Sun, 30 Sep 2018 00:45:53 +0800 Subject: printk: Give error on attempt to set log buffer length to over 2G The current printk() is ready to handle log buffer size up to 2G. Give an explicit error for users who want to use larger log buffer. Also fix printk formatting to show the 2G as a positive number. Link: http://lkml.kernel.org/r/20181008135916.gg4kkmoki5bgtco5@pathway.suse.cz Cc: rostedt@goodmis.org Cc: linux-kernel@vger.kernel.org Suggested-by: Sergey Senozhatsky Signed-off-by: He Zhe Reviewed-by: Sergey Senozhatsky [pmladek: Fixed to the really safe limit 2GB.] Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 15f3e70be448..fce696d80e09 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -440,6 +440,7 @@ static u32 clear_idx; /* record buffer */ #define LOG_ALIGN __alignof__(struct printk_log) #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) +#define LOG_BUF_LEN_MAX (u32)(1 << 31) static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; @@ -1040,18 +1041,23 @@ void log_buf_vmcoreinfo_setup(void) static unsigned long __initdata new_log_buf_len; /* we practice scaling the ring buffer by powers of 2 */ -static void __init log_buf_len_update(unsigned size) +static void __init log_buf_len_update(u64 size) { + if (size > (u64)LOG_BUF_LEN_MAX) { + size = (u64)LOG_BUF_LEN_MAX; + pr_err("log_buf over 2G is not supported.\n"); + } + if (size) size = roundup_pow_of_two(size); if (size > log_buf_len) - new_log_buf_len = size; + new_log_buf_len = (unsigned long)size; } /* save requested log_buf_len since it's too early to process it */ static int __init log_buf_len_setup(char *str) { - unsigned int size; + u64 size; if (!str) return -EINVAL; @@ -1121,7 +1127,7 @@ void __init setup_log_buf(int early) } if (unlikely(!new_log_buf)) { - pr_err("log_buf_len: %ld bytes not available\n", + pr_err("log_buf_len: %lu bytes not available\n", new_log_buf_len); return; } @@ -1134,8 +1140,8 @@ void __init setup_log_buf(int early) memcpy(log_buf, __log_buf, __LOG_BUF_LEN); logbuf_unlock_irqrestore(flags); - pr_info("log_buf_len: %d bytes\n", log_buf_len); - pr_info("early log buf free: %d(%d%%)\n", + pr_info("log_buf_len: %u bytes\n", log_buf_len); + pr_info("early log buf free: %u(%u%%)\n", free, (free * 100) / __LOG_BUF_LEN); } -- cgit v1.2.3 From b9fd04262a8abc366f40a9e97598e94591352c26 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Sep 2018 13:10:34 +0200 Subject: dma-direct: respect DMA_ATTR_NO_WARN Respect the DMA_ATTR_NO_WARN flags for allocations in dma-direct. Signed-off-by: Christoph Hellwig Acked-by: Robin Murphy --- kernel/dma/direct.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index a5a943836c8e..87a6bc2a96c0 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -114,6 +114,9 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, u64 phys_mask; void *ret; + if (attrs & DMA_ATTR_NO_WARN) + gfp |= __GFP_NOWARN; + /* we always manually zero the memory once we are done: */ gfp &= ~__GFP_ZERO; gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, -- cgit v1.2.3 From c3f7f2c7eba1a53d2e5ffbc2dcc9a20c5f094890 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Tue, 25 Sep 2018 23:58:40 -0400 Subject: smp: use __cpumask_set_cpu in on_each_cpu_cond The code in on_each_cpu_cond sets CPUs in a locally allocated bitmask, which should never be used by other CPUs simultaneously. There is no need to use locked memory accesses to set the bits in this bitmap. Switch to __cpumask_set_cpu. Cc: npiggin@gmail.com Cc: mingo@kernel.org Cc: will.deacon@arm.com Cc: songliubraving@fb.com Cc: kernel-team@fb.com Cc: hpa@zytor.com Suggested-by: Peter Zijlstra Signed-off-by: Rik van Riel Reviewed-by: Andy Lutomirski Signed-off-by: Peter Zijlstra (Intel) Link: http://lkml.kernel.org/r/20180926035844.1420-4-riel@surriel.com --- kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/smp.c b/kernel/smp.c index d86eec5f51c1..a7d4f9f50a49 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -682,7 +682,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) - cpumask_set_cpu(cpu, cpus); + __cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); preempt_enable(); free_cpumask_var(cpus); -- cgit v1.2.3 From 7d49b28a80b830c3ca876d33bedc58d62a78e16f Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Tue, 25 Sep 2018 23:58:41 -0400 Subject: smp,cpumask: introduce on_each_cpu_cond_mask Introduce a variant of on_each_cpu_cond that iterates only over the CPUs in a cpumask, in order to avoid making callbacks for every single CPU in the system when we only need to test a subset. Cc: npiggin@gmail.com Cc: mingo@kernel.org Cc: will.deacon@arm.com Cc: songliubraving@fb.com Cc: kernel-team@fb.com Cc: hpa@zytor.com Cc: luto@kernel.org Signed-off-by: Rik van Riel Signed-off-by: Peter Zijlstra (Intel) Link: http://lkml.kernel.org/r/20180926035844.1420-5-riel@surriel.com --- include/linux/smp.h | 4 ++++ kernel/smp.c | 17 +++++++++++++---- kernel/up.c | 14 +++++++++++--- 3 files changed, 28 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/include/linux/smp.h b/include/linux/smp.h index 9fb239e12b82..a56f08ff3097 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -53,6 +53,10 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags); +void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags, const struct cpumask *mask); + int smp_call_function_single_async(int cpu, call_single_data_t *csd); #ifdef CONFIG_SMP diff --git a/kernel/smp.c b/kernel/smp.c index a7d4f9f50a49..163c451af42e 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -669,9 +669,9 @@ EXPORT_SYMBOL(on_each_cpu_mask); * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ -void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), +void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, - gfp_t gfp_flags) + gfp_t gfp_flags, const struct cpumask *mask) { cpumask_var_t cpus; int cpu, ret; @@ -680,7 +680,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { preempt_disable(); - for_each_online_cpu(cpu) + for_each_cpu(cpu, mask) if (cond_func(cpu, info)) __cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); @@ -692,7 +692,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), * just have to IPI them one by one. */ preempt_disable(); - for_each_online_cpu(cpu) + for_each_cpu(cpu, mask) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); @@ -701,6 +701,15 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), preempt_enable(); } } +EXPORT_SYMBOL(on_each_cpu_cond_mask); + +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags) +{ + on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, + cpu_online_mask); +} EXPORT_SYMBOL(on_each_cpu_cond); static void do_nothing(void *unused) diff --git a/kernel/up.c b/kernel/up.c index 42c46bf3e0a5..ff536f9cc8a2 100644 --- a/kernel/up.c +++ b/kernel/up.c @@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask); * Preemption is disabled here to make sure the cond_func is called under the * same condtions in UP and SMP. */ -void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), - smp_call_func_t func, void *info, bool wait, - gfp_t gfp_flags) +void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags, const struct cpumask *mask) { unsigned long flags; @@ -82,6 +82,14 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), } preempt_enable(); } +EXPORT_SYMBOL(on_each_cpu_cond_mask); + +void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags) +{ + on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL); +} EXPORT_SYMBOL(on_each_cpu_cond); int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) -- cgit v1.2.3 From a98959fdbda1849a01b2150bb635ed559ec06700 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 27 Sep 2018 09:22:02 -0500 Subject: resource: Include resource end in walk_*() interfaces find_next_iomem_res() finds an iomem resource that covers part of a range described by "start, end". All callers expect that range to be inclusive, i.e., both start and end are included, but find_next_iomem_res() doesn't handle the end address correctly. If it finds an iomem resource that contains exactly the end address, it skips it, e.g., if "start, end" is [0x0-0x10000] and there happens to be an iomem resource [mem 0x10000-0x10000] (the single byte at 0x10000), we skip it: find_next_iomem_res(...) { start = 0x0; end = 0x10000; for (p = next_resource(...)) { # p->start = 0x10000; # p->end = 0x10000; # we *should* return this resource, but this condition is false: if ((p->end >= start) && (p->start < end)) break; Adjust find_next_iomem_res() so it allows a resource that includes the single byte at the end of the range. This is a corner case that we probably don't see in practice. Fixes: 58c1b5b07907 ("[PATCH] memory hotadd fixes: find_next_system_ram catch range fix") Signed-off-by: Bjorn Helgaas Signed-off-by: Borislav Petkov CC: Andrew Morton CC: Brijesh Singh CC: Dan Williams CC: H. Peter Anvin CC: Lianbo Jiang CC: Takashi Iwai CC: Thomas Gleixner CC: Tom Lendacky CC: Vivek Goyal CC: Yaowei Bai CC: bhe@redhat.com CC: dan.j.williams@intel.com CC: dyoung@redhat.com CC: kexec@lists.infradead.org CC: mingo@redhat.com CC: x86-ml Link: http://lkml.kernel.org/r/153805812254.1157.16736368485811773752.stgit@bhelgaas-glaptop.roam.corp.google.com --- kernel/resource.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 30e1bc68503b..155ec873ea4d 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -319,7 +319,7 @@ int release_resource(struct resource *old) EXPORT_SYMBOL(release_resource); /* - * Finds the lowest iomem resource existing within [res->start.res->end). + * Finds the lowest iomem resource existing within [res->start..res->end]. * The caller must specify res->start, res->end, res->flags, and optionally * desc. If found, returns 0, res is overwritten, if not found, returns -1. * This function walks the whole tree and not just first level children until @@ -352,7 +352,7 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc, p = NULL; break; } - if ((p->end >= start) && (p->start < end)) + if ((p->end >= start) && (p->start <= end)) break; } -- cgit v1.2.3 From 010a93bf97c72f43aac664d0a685942f83d1a103 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 27 Sep 2018 09:22:09 -0500 Subject: resource: Fix find_next_iomem_res() iteration issue Previously find_next_iomem_res() used "*res" as both an input parameter for the range to search and the type of resource to search for, and an output parameter for the resource we found, which makes the interface confusing. The current callers use find_next_iomem_res() incorrectly because they allocate a single struct resource and use it for repeated calls to find_next_iomem_res(). When find_next_iomem_res() returns a resource, it overwrites the start, end, flags, and desc members of the struct. If we call find_next_iomem_res() again, we must update or restore these fields. The previous code restored res.start and res.end, but not res.flags or res.desc. Since the callers did not restore res.flags, if they searched for flags IORESOURCE_MEM | IORESOURCE_BUSY and found a resource with flags IORESOURCE_MEM | IORESOURCE_BUSY | IORESOURCE_SYSRAM, the next search would incorrectly skip resources unless they were also marked as IORESOURCE_SYSRAM. Fix this by restructuring the interface so it takes explicit "start, end, flags" parameters and uses "*res" only as an output parameter. Based on a patch by Lianbo Jiang . [ bp: While at it: - make comments kernel-doc style. - Originally-by: http://lore.kernel.org/lkml/20180921073211.20097-2-lijiang@redhat.com Signed-off-by: Bjorn Helgaas Signed-off-by: Borislav Petkov CC: Andrew Morton CC: Brijesh Singh CC: Dan Williams CC: H. Peter Anvin CC: Lianbo Jiang CC: Takashi Iwai CC: Thomas Gleixner CC: Tom Lendacky CC: Vivek Goyal CC: Yaowei Bai CC: bhe@redhat.com CC: dan.j.williams@intel.com CC: dyoung@redhat.com CC: kexec@lists.infradead.org CC: mingo@redhat.com CC: x86-ml Link: http://lkml.kernel.org/r/153805812916.1157.177580438135143788.stgit@bhelgaas-glaptop.roam.corp.google.com --- kernel/resource.c | 96 ++++++++++++++++++++++++------------------------------- 1 file changed, 42 insertions(+), 54 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 155ec873ea4d..38b8d11c9eaf 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -318,24 +318,27 @@ int release_resource(struct resource *old) EXPORT_SYMBOL(release_resource); -/* - * Finds the lowest iomem resource existing within [res->start..res->end]. - * The caller must specify res->start, res->end, res->flags, and optionally - * desc. If found, returns 0, res is overwritten, if not found, returns -1. - * This function walks the whole tree and not just first level children until - * and unless first_level_children_only is true. +/** + * Finds the lowest iomem resource that covers part of [start..end]. The + * caller must specify start, end, flags, and desc (which may be + * IORES_DESC_NONE). + * + * If a resource is found, returns 0 and *res is overwritten with the part + * of the resource that's within [start..end]; if none is found, returns + * -1. + * + * This function walks the whole tree and not just first level children + * unless @first_level_children_only is true. */ -static int find_next_iomem_res(struct resource *res, unsigned long desc, - bool first_level_children_only) +static int find_next_iomem_res(resource_size_t start, resource_size_t end, + unsigned long flags, unsigned long desc, + bool first_level_children_only, + struct resource *res) { - resource_size_t start, end; struct resource *p; bool sibling_only = false; BUG_ON(!res); - - start = res->start; - end = res->end; BUG_ON(start >= end); if (first_level_children_only) @@ -344,7 +347,7 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc, read_lock(&resource_lock); for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { - if ((p->flags & res->flags) != res->flags) + if ((p->flags & flags) != flags) continue; if ((desc != IORES_DESC_NONE) && (desc != p->desc)) continue; @@ -359,32 +362,31 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc, read_unlock(&resource_lock); if (!p) return -1; + /* copy data */ - if (res->start < p->start) - res->start = p->start; - if (res->end > p->end) - res->end = p->end; + res->start = max(start, p->start); + res->end = min(end, p->end); res->flags = p->flags; res->desc = p->desc; return 0; } -static int __walk_iomem_res_desc(struct resource *res, unsigned long desc, - bool first_level_children_only, - void *arg, +static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, + unsigned long flags, unsigned long desc, + bool first_level_children_only, void *arg, int (*func)(struct resource *, void *)) { - u64 orig_end = res->end; + struct resource res; int ret = -1; - while ((res->start < res->end) && - !find_next_iomem_res(res, desc, first_level_children_only)) { - ret = (*func)(res, arg); + while (start < end && + !find_next_iomem_res(start, end, flags, desc, + first_level_children_only, &res)) { + ret = (*func)(&res, arg); if (ret) break; - res->start = res->end + 1; - res->end = orig_end; + start = res.end + 1; } return ret; @@ -407,13 +409,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc, int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)) { - struct resource res; - - res.start = start; - res.end = end; - res.flags = flags; - - return __walk_iomem_res_desc(&res, desc, false, arg, func); + return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func); } EXPORT_SYMBOL_GPL(walk_iomem_res_desc); @@ -427,13 +423,9 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc); int walk_system_ram_res(u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)) { - struct resource res; + unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - res.start = start; - res.end = end; - res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - - return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true, + return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true, arg, func); } @@ -444,13 +436,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg, int walk_mem_res(u64 start, u64 end, void *arg, int (*func)(struct resource *, void *)) { - struct resource res; + unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; - res.start = start; - res.end = end; - res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; - - return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true, + return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true, arg, func); } @@ -464,25 +452,25 @@ int walk_mem_res(u64 start, u64 end, void *arg, int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { + resource_size_t start, end; + unsigned long flags; struct resource res; unsigned long pfn, end_pfn; - u64 orig_end; int ret = -1; - res.start = (u64) start_pfn << PAGE_SHIFT; - res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; - res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - orig_end = res.end; - while ((res.start < res.end) && - (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) { + start = (u64) start_pfn << PAGE_SHIFT; + end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; + flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + while (start < end && + !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, + true, &res)) { pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; end_pfn = (res.end + 1) >> PAGE_SHIFT; if (end_pfn > pfn) ret = (*func)(pfn, end_pfn - pfn, arg); if (ret) break; - res.start = res.end + 1; - res.end = orig_end; + start = res.end + 1; } return ret; } -- cgit v1.2.3 From b69c2e20f6e4046da84ce5b33ba1ef89cb087b40 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 9 Oct 2018 16:11:21 +0200 Subject: resource: Clean it up a bit - Drop BUG_ON()s and do normal error handling instead, in find_next_iomem_res(). - Align function arguments on opening braces. - Get rid of local var sibling_only in find_next_iomem_res(). - Shorten unnecessarily long first_level_children_only arg name. Signed-off-by: Borislav Petkov CC: Andrew Morton CC: Bjorn Helgaas CC: Brijesh Singh CC: Dan Williams CC: H. Peter Anvin CC: Lianbo Jiang CC: Takashi Iwai CC: Thomas Gleixner CC: Tom Lendacky CC: Vivek Goyal CC: Yaowei Bai CC: bhe@redhat.com CC: dan.j.williams@intel.com CC: dyoung@redhat.com CC: kexec@lists.infradead.org CC: mingo@redhat.com Link: --- kernel/resource.c | 55 ++++++++++++++++++++++++++----------------------------- 1 file changed, 26 insertions(+), 29 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 38b8d11c9eaf..b3a3a1fc499e 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -328,25 +328,23 @@ EXPORT_SYMBOL(release_resource); * -1. * * This function walks the whole tree and not just first level children - * unless @first_level_children_only is true. + * unless @first_lvl is true. */ static int find_next_iomem_res(resource_size_t start, resource_size_t end, unsigned long flags, unsigned long desc, - bool first_level_children_only, - struct resource *res) + bool first_lvl, struct resource *res) { struct resource *p; - bool sibling_only = false; - BUG_ON(!res); - BUG_ON(start >= end); + if (!res) + return -EINVAL; - if (first_level_children_only) - sibling_only = true; + if (start >= end) + return -EINVAL; read_lock(&resource_lock); - for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { + for (p = iomem_resource.child; p; p = next_resource(p, first_lvl)) { if ((p->flags & flags) != flags) continue; if ((desc != IORES_DESC_NONE) && (desc != p->desc)) @@ -373,15 +371,14 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end, static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, unsigned long flags, unsigned long desc, - bool first_level_children_only, void *arg, + bool first_lvl, void *arg, int (*func)(struct resource *, void *)) { struct resource res; int ret = -1; while (start < end && - !find_next_iomem_res(start, end, flags, desc, - first_level_children_only, &res)) { + !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) { ret = (*func)(&res, arg); if (ret) break; @@ -392,7 +389,7 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, return ret; } -/* +/** * Walks through iomem resources and calls func() with matching resource * ranges. This walks through whole tree and not just first level children. * All the memory ranges which overlap start,end and also match flags and @@ -421,7 +418,7 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc); * ranges. */ int walk_system_ram_res(u64 start, u64 end, void *arg, - int (*func)(struct resource *, void *)) + int (*func)(struct resource *, void *)) { unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; @@ -450,7 +447,7 @@ int walk_mem_res(u64 start, u64 end, void *arg, * It is to be used only for System RAM. */ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, - void *arg, int (*func)(unsigned long, unsigned long, void *)) + void *arg, int (*func)(unsigned long, unsigned long, void *)) { resource_size_t start, end; unsigned long flags; @@ -646,8 +643,8 @@ static int find_resource(struct resource *root, struct resource *new, * @constraint: the size and alignment constraints to be met. */ static int reallocate_resource(struct resource *root, struct resource *old, - resource_size_t newsize, - struct resource_constraint *constraint) + resource_size_t newsize, + struct resource_constraint *constraint) { int err=0; struct resource new = *old; @@ -960,7 +957,7 @@ skip: * Existing children of the resource are assumed to be immutable. */ int adjust_resource(struct resource *res, resource_size_t start, - resource_size_t size) + resource_size_t size) { int result; @@ -971,9 +968,9 @@ int adjust_resource(struct resource *res, resource_size_t start, } EXPORT_SYMBOL(adjust_resource); -static void __init __reserve_region_with_split(struct resource *root, - resource_size_t start, resource_size_t end, - const char *name) +static void __init +__reserve_region_with_split(struct resource *root, resource_size_t start, + resource_size_t end, const char *name) { struct resource *parent = root; struct resource *conflict; @@ -1032,9 +1029,9 @@ static void __init __reserve_region_with_split(struct resource *root, } -void __init reserve_region_with_split(struct resource *root, - resource_size_t start, resource_size_t end, - const char *name) +void __init +reserve_region_with_split(struct resource *root, resource_size_t start, + resource_size_t end, const char *name) { int abort = 0; @@ -1160,7 +1157,7 @@ EXPORT_SYMBOL(__request_region); * The described resource region must match a currently busy region. */ void __release_region(struct resource *parent, resource_size_t start, - resource_size_t n) + resource_size_t n) { struct resource **p; resource_size_t end; @@ -1222,7 +1219,7 @@ EXPORT_SYMBOL(__release_region); * simplicity. Enhance this logic when necessary. */ int release_mem_region_adjustable(struct resource *parent, - resource_size_t start, resource_size_t size) + resource_size_t start, resource_size_t size) { struct resource **p; struct resource *res; @@ -1398,9 +1395,9 @@ static int devm_region_match(struct device *dev, void *res, void *match_data) this->start == match->start && this->n == match->n; } -struct resource * __devm_request_region(struct device *dev, - struct resource *parent, resource_size_t start, - resource_size_t n, const char *name) +struct resource * +__devm_request_region(struct device *dev, struct resource *parent, + resource_size_t start, resource_size_t n, const char *name) { struct region_devres *dr = NULL; struct resource *res; -- cgit v1.2.3 From 8af03d1ae2e154a8be3631e8694b87007e1bdbc2 Mon Sep 17 00:00:00 2001 From: Wenwen Wang Date: Sun, 7 Oct 2018 15:23:15 -0500 Subject: bpf: btf: Fix a missing check bug In btf_parse_hdr(), the length of the btf data header is firstly copied from the user space to 'hdr_len' and checked to see whether it is larger than 'btf_data_size'. If yes, an error code EINVAL is returned. Otherwise, the whole header is copied again from the user space to 'btf->hdr'. However, after the second copy, there is no check between 'btf->hdr->hdr_len' and 'hdr_len' to confirm that the two copies get the same value. Given that the btf data is in the user space, a malicious user can race to change the data between the two copies. By doing so, the user can provide malicious data to the kernel and cause undefined behavior. This patch adds a necessary check after the second copy, to make sure 'btf->hdr->hdr_len' has the same value as 'hdr_len'. Otherwise, an error code EINVAL will be returned. Signed-off-by: Wenwen Wang Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- kernel/bpf/btf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 138f0302692e..378cef70341c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2114,6 +2114,9 @@ static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data, hdr = &btf->hdr; + if (hdr->hdr_len != hdr_len) + return -EINVAL; + btf_verifier_log_hdr(env, btf_data_size); if (hdr->magic != BTF_MAGIC) { -- cgit v1.2.3 From 509db2833e0ddac7faf6e7d2dd6e7f85c98fbee0 Mon Sep 17 00:00:00 2001 From: Prashant Bhole Date: Tue, 9 Oct 2018 10:04:49 +0900 Subject: bpf: error handling when map_lookup_elem isn't supported The error value returned by map_lookup_elem doesn't differentiate whether lookup was failed because of invalid key or lookup is not supported. Lets add handling for -EOPNOTSUPP return value of map_lookup_elem() method of map, with expectation from map's implementation that it should return -EOPNOTSUPP if lookup is not supported. The errno for bpf syscall for BPF_MAP_LOOKUP_ELEM command will be set to EOPNOTSUPP if map lookup is not supported. Signed-off-by: Prashant Bhole Acked-by: Alexei Starovoitov Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- kernel/bpf/syscall.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 5742df21598c..4f416234251f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -719,10 +719,15 @@ static int map_lookup_elem(union bpf_attr *attr) } else { rcu_read_lock(); ptr = map->ops->map_lookup_elem(map, key); - if (ptr) + if (IS_ERR(ptr)) { + err = PTR_ERR(ptr); + } else if (!ptr) { + err = -ENOENT; + } else { + err = 0; memcpy(value, ptr, value_size); + } rcu_read_unlock(); - err = ptr ? 0 : -ENOENT; } if (err) -- cgit v1.2.3 From 3b4a63f674e94795c9b76a41b36364b8aec232e1 Mon Sep 17 00:00:00 2001 From: Prashant Bhole Date: Tue, 9 Oct 2018 10:04:50 +0900 Subject: bpf: return EOPNOTSUPP when map lookup isn't supported Return ERR_PTR(-EOPNOTSUPP) from map_lookup_elem() methods of below map types: - BPF_MAP_TYPE_PROG_ARRAY - BPF_MAP_TYPE_STACK_TRACE - BPF_MAP_TYPE_XSKMAP - BPF_MAP_TYPE_SOCKMAP/BPF_MAP_TYPE_SOCKHASH Signed-off-by: Prashant Bhole Acked-by: Alexei Starovoitov Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- kernel/bpf/arraymap.c | 2 +- kernel/bpf/sockmap.c | 2 +- kernel/bpf/stackmap.c | 2 +- kernel/bpf/xskmap.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index dded84cbe814..24583da9ffd1 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -449,7 +449,7 @@ static void fd_array_map_free(struct bpf_map *map) static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) { - return NULL; + return ERR_PTR(-EOPNOTSUPP); } /* only called from syscall */ diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index d37a1a0a6e1e..5d0677d808ae 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -2096,7 +2096,7 @@ int sockmap_get_from_fd(const union bpf_attr *attr, int type, static void *sock_map_lookup(struct bpf_map *map, void *key) { - return NULL; + return ERR_PTR(-EOPNOTSUPP); } static int sock_map_update_elem(struct bpf_map *map, diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 8061a439ef18..b2ade10f7ec3 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -505,7 +505,7 @@ const struct bpf_func_proto bpf_get_stack_proto = { /* Called from eBPF program */ static void *stack_map_lookup_elem(struct bpf_map *map, void *key) { - return NULL; + return ERR_PTR(-EOPNOTSUPP); } /* Called from syscall */ diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index 9f8463afda9c..ef0b7b6ef8a5 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -154,7 +154,7 @@ void __xsk_map_flush(struct bpf_map *map) static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) { - return NULL; + return ERR_PTR(-EOPNOTSUPP); } static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value, -- cgit v1.2.3 From d59e0ba19481c0046d2ea2bd0e5344eeaf45aace Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Tue, 9 Oct 2018 11:43:35 -0400 Subject: tick/sched : Remove redundant cpu_online() check can_stop_idle_tick() checks cpu_online() twice. The first check leaves the function when the CPU is not online, so the second check it redundant. Remove it. Signed-off-by: Peng Hao Signed-off-by: Thomas Gleixner Cc: fweisbec@gmail.com Link: https://lkml.kernel.org/r/1539099815-2943-1-git-send-email-penghao122@sina.com.cn --- kernel/time/tick-sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 5b33e2f5c0ed..69e673b88474 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -885,7 +885,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) if (need_resched()) return false; - if (unlikely(local_softirq_pending() && cpu_online(cpu))) { + if (unlikely(local_softirq_pending())) { static int ratelimit; if (ratelimit < 10 && -- cgit v1.2.3 From b2a2ab527d6de02fbf2331bae4a299d58ab52266 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 10 Oct 2018 20:11:25 -0500 Subject: signal: Guard against negative signal numbers in copy_siginfo_from_user The bounds checks in known_siginfo_layout only guards against positive numbers that are too large, large negative can slip through and can cause out of bounds accesses. Ordinarily this is not a concern because early in signal processing the signal number is filtered with valid_signal which ensures it is a small positive signal number, but copy_siginfo_from_user is called before this check is performed. [ 73.031126] BUG: unable to handle kernel paging request at ffffffff6281bcb6 [ 73.032038] PGD 3014067 P4D 3014067 PUD 0 [ 73.032565] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC PTI [ 73.033287] CPU: 0 PID: 732 Comm: trinity-c3 Tainted: G W T 4.19.0-rc1-00077-g4ce5f9c #1 [ 73.034423] RIP: 0010:copy_siginfo_from_user+0x4d/0xd0 [ 73.034908] Code: 00 8b 53 08 81 fa 80 00 00 00 0f 84 90 00 00 00 85 d2 7e 2d 48 63 0b 83 f9 1f 7f 1c 8d 71 ff bf d8 04 01 50 48 0f a3 f7 73 0e <0f> b6 8c 09 20 bb 81 82 39 ca 7f 15 eb 68 31 c0 83 fa 06 7f 0c eb [ 73.036665] RSP: 0018:ffff88001b8f7e20 EFLAGS: 00010297 [ 73.037160] RAX: 0000000000000000 RBX: ffff88001b8f7e90 RCX: fffffffff00000cb [ 73.037865] RDX: 0000000000000001 RSI: 00000000f00000ca RDI: 00000000500104d8 [ 73.038546] RBP: ffff88001b8f7e80 R08: 0000000000000000 R09: 0000000000000000 [ 73.039201] R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000008 [ 73.039874] R13: 00000000000002dc R14: 0000000000000000 R15: 0000000000000000 [ 73.040613] FS: 000000000104a880(0000) GS:ffff88001f000000(0000) knlGS:0000000000000000 [ 73.041649] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 73.042405] CR2: ffffffff6281bcb6 CR3: 000000001cb52003 CR4: 00000000001606b0 [ 73.043351] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 73.044286] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000600 [ 73.045221] Call Trace: [ 73.045556] __x64_sys_rt_tgsigqueueinfo+0x34/0xa0 [ 73.046199] do_syscall_64+0x1a4/0x390 [ 73.046708] ? vtime_user_enter+0x61/0x80 [ 73.047242] ? __context_tracking_enter+0x4e/0x60 [ 73.047714] ? __context_tracking_enter+0x4e/0x60 [ 73.048278] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Therefore fix known_siginfo_layout to take an unsigned signal number instead of a signed signal number. All valid signal numbers are small positive numbers so they will not be affected, but invalid negative signal numbers will now become large positive signal numbers and will not be used as indices into the sig_sicodes array. Making the signal number unsigned makes it difficult for similar mistakes to happen in the future. Fixes: 4ce5f9c9e754 ("signal: Use a smaller struct siginfo in the kernel") Inspired-by: Sean Christopherson Reported-by: kernel test robot Signed-off-by: "Eric W. Biederman" --- kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 2bffc5a50183..5f5bf374512b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2860,7 +2860,7 @@ static const struct { [SIGSYS] = { NSIGSYS, SIL_SYS }, }; -static bool known_siginfo_layout(int sig, int si_code) +static bool known_siginfo_layout(unsigned sig, int si_code) { if (si_code == SI_KERNEL) return true; -- cgit v1.2.3 From a36700589b85443e28170be59fa11c8a104130a5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 10 Oct 2018 20:29:44 -0500 Subject: signal: Guard against negative signal numbers in copy_siginfo_from_user32 While fixing an out of bounds array access in known_siginfo_layout reported by the kernel test robot it became apparent that the same bug exists in siginfo_layout and affects copy_siginfo_from_user32. The straight forward fix that makes guards against making this mistake in the future and should keep the code size small is to just take an unsigned signal number instead of a signed signal number, as I did to fix known_siginfo_layout. Cc: stable@vger.kernel.org Fixes: cc731525f26a ("signal: Remove kernel interal si_code magic") Signed-off-by: "Eric W. Biederman" --- include/linux/signal.h | 2 +- kernel/signal.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/signal.h b/include/linux/signal.h index 706a499d1eb1..200ed96a05af 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -40,7 +40,7 @@ enum siginfo_layout { SIL_SYS, }; -enum siginfo_layout siginfo_layout(int sig, int si_code); +enum siginfo_layout siginfo_layout(unsigned sig, int si_code); /* * Define some primitives to manipulate sigset_t. diff --git a/kernel/signal.c b/kernel/signal.c index 5f5bf374512b..4fd431ce4f91 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2879,7 +2879,7 @@ static bool known_siginfo_layout(unsigned sig, int si_code) return false; } -enum siginfo_layout siginfo_layout(int sig, int si_code) +enum siginfo_layout siginfo_layout(unsigned sig, int si_code) { enum siginfo_layout layout = SIL_KILL; if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { -- cgit v1.2.3 From a6ca88b241d5e929e6e60b12ad8cd288f0ffa256 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 1 Oct 2018 22:36:36 -0700 Subject: trace_uprobe: support reference counter in fd-based uprobe This patch enables uprobes with reference counter in fd-based uprobe. Highest 32 bits of perf_event_attr.config is used to stored offset of the reference count (semaphore). Format information in /sys/bus/event_source/devices/uprobe/format/ is updated to reflect this new feature. Link: http://lkml.kernel.org/r/20181002053636.1896903-1-songliubraving@fb.com Cc: Oleg Nesterov Acked-by: Peter Zijlstra (Intel) Reviewed-and-tested-by: Ravi Bangoria Signed-off-by: Song Liu Signed-off-by: Steven Rostedt (VMware) --- include/linux/trace_events.h | 3 ++- kernel/events/core.c | 49 ++++++++++++++++++++++++++++++++--------- kernel/trace/trace_event_perf.c | 7 +++--- kernel/trace/trace_probe.h | 3 ++- kernel/trace/trace_uprobe.c | 4 +++- 5 files changed, 50 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 78a010e19ed4..4130a5497d40 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -575,7 +575,8 @@ extern int bpf_get_kprobe_info(const struct perf_event *event, bool perf_type_tracepoint); #endif #ifdef CONFIG_UPROBE_EVENTS -extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe); +extern int perf_uprobe_init(struct perf_event *event, + unsigned long ref_ctr_offset, bool is_retprobe); extern void perf_uprobe_destroy(struct perf_event *event); extern int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, const char **filename, diff --git a/kernel/events/core.c b/kernel/events/core.c index c80549bf82c6..65b30773af3e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8368,30 +8368,39 @@ static struct pmu perf_tracepoint = { * * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe * if not set, create kprobe/uprobe + * + * The following values specify a reference counter (or semaphore in the + * terminology of tools like dtrace, systemtap, etc.) Userspace Statically + * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset. + * + * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset + * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left */ enum perf_probe_config { PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */ + PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, + PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS, }; PMU_FORMAT_ATTR(retprobe, "config:0"); +#endif -static struct attribute *probe_attrs[] = { +#ifdef CONFIG_KPROBE_EVENTS +static struct attribute *kprobe_attrs[] = { &format_attr_retprobe.attr, NULL, }; -static struct attribute_group probe_format_group = { +static struct attribute_group kprobe_format_group = { .name = "format", - .attrs = probe_attrs, + .attrs = kprobe_attrs, }; -static const struct attribute_group *probe_attr_groups[] = { - &probe_format_group, +static const struct attribute_group *kprobe_attr_groups[] = { + &kprobe_format_group, NULL, }; -#endif -#ifdef CONFIG_KPROBE_EVENTS static int perf_kprobe_event_init(struct perf_event *event); static struct pmu perf_kprobe = { .task_ctx_nr = perf_sw_context, @@ -8401,7 +8410,7 @@ static struct pmu perf_kprobe = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, - .attr_groups = probe_attr_groups, + .attr_groups = kprobe_attr_groups, }; static int perf_kprobe_event_init(struct perf_event *event) @@ -8433,6 +8442,24 @@ static int perf_kprobe_event_init(struct perf_event *event) #endif /* CONFIG_KPROBE_EVENTS */ #ifdef CONFIG_UPROBE_EVENTS +PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63"); + +static struct attribute *uprobe_attrs[] = { + &format_attr_retprobe.attr, + &format_attr_ref_ctr_offset.attr, + NULL, +}; + +static struct attribute_group uprobe_format_group = { + .name = "format", + .attrs = uprobe_attrs, +}; + +static const struct attribute_group *uprobe_attr_groups[] = { + &uprobe_format_group, + NULL, +}; + static int perf_uprobe_event_init(struct perf_event *event); static struct pmu perf_uprobe = { .task_ctx_nr = perf_sw_context, @@ -8442,12 +8469,13 @@ static struct pmu perf_uprobe = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, - .attr_groups = probe_attr_groups, + .attr_groups = uprobe_attr_groups, }; static int perf_uprobe_event_init(struct perf_event *event) { int err; + unsigned long ref_ctr_offset; bool is_retprobe; if (event->attr.type != perf_uprobe.type) @@ -8463,7 +8491,8 @@ static int perf_uprobe_event_init(struct perf_event *event) return -EOPNOTSUPP; is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; - err = perf_uprobe_init(event, is_retprobe); + ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; + err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); if (err) return err; diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 69a3fe926e8c..76217bbef815 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -290,7 +290,8 @@ void perf_kprobe_destroy(struct perf_event *p_event) #endif /* CONFIG_KPROBE_EVENTS */ #ifdef CONFIG_UPROBE_EVENTS -int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe) +int perf_uprobe_init(struct perf_event *p_event, + unsigned long ref_ctr_offset, bool is_retprobe) { int ret; char *path = NULL; @@ -312,8 +313,8 @@ int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe) goto out; } - tp_event = create_local_trace_uprobe( - path, p_event->attr.probe_offset, is_retprobe); + tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset, + ref_ctr_offset, is_retprobe); if (IS_ERR(tp_event)) { ret = PTR_ERR(tp_event); goto out; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 5f52668e165d..03b10f3201a5 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -412,6 +412,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, extern void destroy_local_trace_kprobe(struct trace_event_call *event_call); extern struct trace_event_call * -create_local_trace_uprobe(char *name, unsigned long offs, bool is_return); +create_local_trace_uprobe(char *name, unsigned long offs, + unsigned long ref_ctr_offset, bool is_return); extern void destroy_local_trace_uprobe(struct trace_event_call *event_call); #endif diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 3a7c73c40007..d09638706fe0 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1405,7 +1405,8 @@ static int unregister_uprobe_event(struct trace_uprobe *tu) #ifdef CONFIG_PERF_EVENTS struct trace_event_call * -create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) +create_local_trace_uprobe(char *name, unsigned long offs, + unsigned long ref_ctr_offset, bool is_return) { struct trace_uprobe *tu; struct path path; @@ -1437,6 +1438,7 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) tu->offset = offs; tu->path = path; + tu->ref_ctr_offset = ref_ctr_offset; tu->filename = kstrdup(name, GFP_KERNEL); init_trace_event_call(tu, &tu->tp.call); -- cgit v1.2.3 From 56de763052792669d61d79a087611da9a7f04d4e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:16:36 +0900 Subject: tracing: probeevent: Cleanup print argument functions Cleanup the print-argument function to decouple it into print-name and print-value, so that it can support more flexible expression, like array type. Link: http://lkml.kernel.org/r/152465859635.26224.13452846788717102315.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 20 ++++++-------------- kernel/trace/trace_probe.c | 12 +++++------- kernel/trace/trace_probe.h | 19 ++++++++++++++++--- kernel/trace/trace_uprobe.c | 9 ++------- 4 files changed, 29 insertions(+), 31 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 508396edc56a..6326c71181aa 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1136,8 +1136,6 @@ print_kprobe_event(struct trace_iterator *iter, int flags, struct kprobe_trace_entry_head *field; struct trace_seq *s = &iter->seq; struct trace_probe *tp; - u8 *data; - int i; field = (struct kprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); @@ -1149,11 +1147,9 @@ print_kprobe_event(struct trace_iterator *iter, int flags, trace_seq_putc(s, ')'); - data = (u8 *)&field[1]; - for (i = 0; i < tp->nr_args; i++) - if (!tp->args[i].type->print(s, tp->args[i].name, - data + tp->args[i].offset, field)) - goto out; + if (print_probe_args(s, tp->args, tp->nr_args, + (u8 *)&field[1], field) < 0) + goto out; trace_seq_putc(s, '\n'); out: @@ -1167,8 +1163,6 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, struct kretprobe_trace_entry_head *field; struct trace_seq *s = &iter->seq; struct trace_probe *tp; - u8 *data; - int i; field = (struct kretprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); @@ -1185,11 +1179,9 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, trace_seq_putc(s, ')'); - data = (u8 *)&field[1]; - for (i = 0; i < tp->nr_args; i++) - if (!tp->args[i].type->print(s, tp->args[i].name, - data + tp->args[i].offset, field)) - goto out; + if (print_probe_args(s, tp->args, tp->nr_args, + (u8 *)&field[1], field) < 0) + goto out; trace_seq_putc(s, '\n'); diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index e99c3ce7aa65..e2c184eaa7db 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -26,10 +26,9 @@ const char *reserved_field_names[] = { /* Printing in basic type function template */ #define DEFINE_BASIC_PRINT_TYPE_FUNC(tname, type, fmt) \ -int PRINT_TYPE_FUNC_NAME(tname)(struct trace_seq *s, const char *name, \ - void *data, void *ent) \ +int PRINT_TYPE_FUNC_NAME(tname)(struct trace_seq *s, void *data, void *ent)\ { \ - trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ + trace_seq_printf(s, fmt, *(type *)data); \ return !trace_seq_has_overflowed(s); \ } \ const char PRINT_TYPE_FMT_NAME(tname)[] = fmt; \ @@ -49,15 +48,14 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(x32, u32, "0x%x") DEFINE_BASIC_PRINT_TYPE_FUNC(x64, u64, "0x%Lx") /* Print type function for string type */ -int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name, - void *data, void *ent) +int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent) { int len = *(u32 *)data >> 16; if (!len) - trace_seq_printf(s, " %s=(fault)", name); + trace_seq_puts(s, "(fault)"); else - trace_seq_printf(s, " %s=\"%s\"", name, + trace_seq_printf(s, "\"%s\"", (const char *)get_loc_data(data, ent)); return !trace_seq_has_overflowed(s); } diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 03b10f3201a5..8254a061ac35 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -82,7 +82,7 @@ static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) /* Data fetch function type */ typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); /* Printing function type */ -typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, void *); +typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); /* Fetch types */ enum { @@ -124,8 +124,7 @@ typedef u32 string_size; /* Printing in basic type function template */ #define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \ -int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ - void *data, void *ent); \ +int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, void *data, void *ent);\ extern const char PRINT_TYPE_FMT_NAME(type)[] DECLARE_BASIC_PRINT_TYPE_FUNC(u8); @@ -403,6 +402,20 @@ store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, } } +static inline int +print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args, + u8 *data, void *field) +{ + int i; + + for (i = 0; i < nr_args; i++) { + trace_seq_printf(s, " %s=", args[i].name); + if (!args[i].type->print(s, data + args[i].offset, field)) + return -ENOMEM; + } + return 0; +} + extern int set_print_fmt(struct trace_probe *tp, bool is_return); #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d09638706fe0..c55753e1079e 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -892,7 +892,6 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e struct trace_seq *s = &iter->seq; struct trace_uprobe *tu; u8 *data; - int i; entry = (struct uprobe_trace_entry_head *)iter->ent; tu = container_of(event, struct trace_uprobe, tp.call.event); @@ -909,12 +908,8 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e data = DATAOF_TRACE_ENTRY(entry, false); } - for (i = 0; i < tu->tp.nr_args; i++) { - struct probe_arg *parg = &tu->tp.args[i]; - - if (!parg->type->print(s, parg->name, data + parg->offset, entry)) - goto out; - } + if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0) + goto out; trace_seq_putc(s, '\n'); -- cgit v1.2.3 From eeb07b0615004bce145015b704de85fd3ac6cce0 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:17:05 +0900 Subject: tracing: probeevent: Cleanup argument field definition Cleanup event argument definition code in one place for maintenancability. Link: http://lkml.kernel.org/r/152465862529.26224.9068605421476018902.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 32 ++++---------------------------- kernel/trace/trace_probe.c | 21 +++++++++++++++++++++ kernel/trace/trace_probe.h | 2 ++ kernel/trace/trace_uprobe.c | 15 ++------------- 4 files changed, 29 insertions(+), 41 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6326c71181aa..1356927e32d0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1192,49 +1192,25 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, static int kprobe_event_define_fields(struct trace_event_call *event_call) { - int ret, i; + int ret; struct kprobe_trace_entry_head field; struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); - /* Set argument names as fields */ - for (i = 0; i < tk->tp.nr_args; i++) { - struct probe_arg *parg = &tk->tp.args[i]; - ret = trace_define_field(event_call, parg->type->fmttype, - parg->name, - sizeof(field) + parg->offset, - parg->type->size, - parg->type->is_signed, - FILTER_OTHER); - if (ret) - return ret; - } - return 0; + return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); } static int kretprobe_event_define_fields(struct trace_event_call *event_call) { - int ret, i; + int ret; struct kretprobe_trace_entry_head field; struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); - /* Set argument names as fields */ - for (i = 0; i < tk->tp.nr_args; i++) { - struct probe_arg *parg = &tk->tp.args[i]; - ret = trace_define_field(event_call, parg->type->fmttype, - parg->name, - sizeof(field) + parg->offset, - parg->type->size, - parg->type->is_signed, - FILTER_OTHER); - if (ret) - return ret; - } - return 0; + return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); } #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index e2c184eaa7db..21af28ffba3a 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -668,3 +668,24 @@ int set_print_fmt(struct trace_probe *tp, bool is_return) return 0; } + +int traceprobe_define_arg_fields(struct trace_event_call *event_call, + size_t offset, struct trace_probe *tp) +{ + int ret, i; + + /* Set argument names as fields */ + for (i = 0; i < tp->nr_args; i++) { + struct probe_arg *parg = &tp->args[i]; + + ret = trace_define_field(event_call, parg->type->fmttype, + parg->name, + offset + parg->offset, + parg->type->size, + parg->type->is_signed, + FILTER_OTHER); + if (ret) + return ret; + } + return 0; +} diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 8254a061ac35..a1df7763b797 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -429,3 +429,5 @@ create_local_trace_uprobe(char *name, unsigned long offs, unsigned long ref_ctr_offset, bool is_return); extern void destroy_local_trace_uprobe(struct trace_event_call *event_call); #endif +extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, + size_t offset, struct trace_probe *tp); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c55753e1079e..28a8f69cec89 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1018,7 +1018,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) static int uprobe_event_define_fields(struct trace_event_call *event_call) { - int ret, i, size; + int ret, size; struct uprobe_trace_entry_head field; struct trace_uprobe *tu = event_call->data; @@ -1030,19 +1030,8 @@ static int uprobe_event_define_fields(struct trace_event_call *event_call) DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); size = SIZEOF_TRACE_ENTRY(false); } - /* Set argument names as fields */ - for (i = 0; i < tu->tp.nr_args; i++) { - struct probe_arg *parg = &tu->tp.args[i]; - - ret = trace_define_field(event_call, parg->type->fmttype, - parg->name, size + parg->offset, - parg->type->size, parg->type->is_signed, - FILTER_OTHER); - if (ret) - return ret; - } - return 0; + return traceprobe_define_arg_fields(event_call, size, &tu->tp); } #ifdef CONFIG_PERF_EVENTS -- cgit v1.2.3 From 7bfbc63eda08b8158c040d6882c807f62b0750bb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:17:34 +0900 Subject: tracing: probeevent: Remove NOKPROBE_SYMBOL from print functions Remove unneeded NOKPROBE_SYMBOL from print functions since the print functions are only used when printing out the trace data, and not from kprobe handler. Link: http://lkml.kernel.org/r/152465865422.26224.10111548170594014954.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_probe.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 21af28ffba3a..5f3b5b3fd2cd 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -31,8 +31,7 @@ int PRINT_TYPE_FUNC_NAME(tname)(struct trace_seq *s, void *data, void *ent)\ trace_seq_printf(s, fmt, *(type *)data); \ return !trace_seq_has_overflowed(s); \ } \ -const char PRINT_TYPE_FMT_NAME(tname)[] = fmt; \ -NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(tname)); +const char PRINT_TYPE_FMT_NAME(tname)[] = fmt; DEFINE_BASIC_PRINT_TYPE_FUNC(u8, u8, "%u") DEFINE_BASIC_PRINT_TYPE_FUNC(u16, u16, "%u") @@ -59,7 +58,6 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent) (const char *)get_loc_data(data, ent)); return !trace_seq_has_overflowed(s); } -NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; -- cgit v1.2.3 From 533059281ee594f9fbb9e58042aaec77083ef251 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:18:03 +0900 Subject: tracing: probeevent: Introduce new argument fetching code Replace {k,u}probe event argument fetching framework with switch-case based. Currently that is implemented with structures, macros and chain of function-pointers, which is more complicated than necessary and may get a performance penalty by retpoline. This simplify that with an array of "fetch_insn" (opcode and oprands), and make process_fetch_insn() just interprets it. No function pointers are used. Link: http://lkml.kernel.org/r/152465868340.26224.2551120475197839464.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 291 +++++++++++++---------------- kernel/trace/trace_probe.c | 401 +++++++++++----------------------------- kernel/trace/trace_probe.h | 230 +++++------------------ kernel/trace/trace_probe_tmpl.h | 120 ++++++++++++ kernel/trace/trace_uprobe.c | 127 ++++++++----- 5 files changed, 491 insertions(+), 678 deletions(-) create mode 100644 kernel/trace/trace_probe_tmpl.h (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1356927e32d0..c024cc40d509 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -14,6 +14,7 @@ #include "trace_kprobe_selftest.h" #include "trace_probe.h" +#include "trace_probe_tmpl.h" #define KPROBE_EVENT_SYSTEM "kprobes" #define KRETPROBE_MAXACTIVE_MAX 4096 @@ -120,160 +121,6 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); static int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs); -/* Memory fetching by symbol */ -struct symbol_cache { - char *symbol; - long offset; - unsigned long addr; -}; - -unsigned long update_symbol_cache(struct symbol_cache *sc) -{ - sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); - - if (sc->addr) - sc->addr += sc->offset; - - return sc->addr; -} - -void free_symbol_cache(struct symbol_cache *sc) -{ - kfree(sc->symbol); - kfree(sc); -} - -struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) -{ - struct symbol_cache *sc; - - if (!sym || strlen(sym) == 0) - return NULL; - - sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); - if (!sc) - return NULL; - - sc->symbol = kstrdup(sym, GFP_KERNEL); - if (!sc->symbol) { - kfree(sc); - return NULL; - } - sc->offset = offset; - update_symbol_cache(sc); - - return sc; -} - -/* - * Kprobes-specific fetch functions - */ -#define DEFINE_FETCH_stack(type) \ -static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ - void *offset, void *dest) \ -{ \ - *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \ - (unsigned int)((unsigned long)offset)); \ -} \ -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type)); - -DEFINE_BASIC_FETCH_FUNCS(stack) -/* No string on the stack entry */ -#define fetch_stack_string NULL -#define fetch_stack_string_size NULL - -#define DEFINE_FETCH_memory(type) \ -static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ - void *addr, void *dest) \ -{ \ - type retval; \ - if (probe_kernel_address(addr, retval)) \ - *(type *)dest = 0; \ - else \ - *(type *)dest = retval; \ -} \ -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type)); - -DEFINE_BASIC_FETCH_FUNCS(memory) -/* - * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max - * length and relative data location. - */ -static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, - void *addr, void *dest) -{ - int maxlen = get_rloc_len(*(u32 *)dest); - u8 *dst = get_rloc_data(dest); - long ret; - - if (!maxlen) - return; - - /* - * Try to get string again, since the string can be changed while - * probing. - */ - ret = strncpy_from_unsafe(dst, addr, maxlen); - - if (ret < 0) { /* Failed to fetch string */ - dst[0] = '\0'; - *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); - } else { - *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest)); - } -} -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string)); - -/* Return the length of string -- including null terminal byte */ -static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, - void *addr, void *dest) -{ - mm_segment_t old_fs; - int ret, len = 0; - u8 c; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - pagefault_disable(); - - do { - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); - len++; - } while (c && ret == 0 && len < MAX_STRING_SIZE); - - pagefault_enable(); - set_fs(old_fs); - - if (ret < 0) /* Failed to check the length */ - *(u32 *)dest = 0; - else - *(u32 *)dest = len; -} -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size)); - -#define DEFINE_FETCH_symbol(type) \ -void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\ -{ \ - struct symbol_cache *sc = data; \ - if (sc->addr) \ - fetch_memory_##type(regs, (void *)sc->addr, dest); \ - else \ - *(type *)dest = 0; \ -} \ -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type)); - -DEFINE_BASIC_FETCH_FUNCS(symbol) -DEFINE_FETCH_symbol(string) -DEFINE_FETCH_symbol(string_size) - -/* kprobes don't support file_offset fetch methods */ -#define fetch_file_offset_u8 NULL -#define fetch_file_offset_u16 NULL -#define fetch_file_offset_u32 NULL -#define fetch_file_offset_u64 NULL -#define fetch_file_offset_string NULL -#define fetch_file_offset_string_size NULL - /* Fetch type information table */ static const struct fetch_type kprobes_fetch_type_table[] = { /* Special types */ @@ -529,7 +376,7 @@ static bool within_notrace_func(struct trace_kprobe *tk) /* Internal register function - just handle k*probes and flags */ static int __register_trace_kprobe(struct trace_kprobe *tk) { - int i, ret; + int ret; if (trace_probe_is_registered(&tk->tp)) return -EINVAL; @@ -540,9 +387,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) return -EINVAL; } - for (i = 0; i < tk->tp.nr_args; i++) - traceprobe_update_arg(&tk->tp.args[i]); - /* Set/clear disabled flag according to tp->flag */ if (trace_probe_is_enabled(&tk->tp)) tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; @@ -876,8 +720,8 @@ static int create_trace_kprobe(int argc, char **argv) /* Parse fetch argument */ ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, - is_return, true, - kprobes_fetch_type_table); + is_return, true, + kprobes_fetch_type_table); if (ret) { pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; @@ -1031,6 +875,133 @@ static const struct file_operations kprobe_profile_ops = { .release = seq_release, }; +/* Kprobe specific fetch functions */ + +/* Return the length of string -- including null terminal byte */ +static nokprobe_inline void +fetch_store_strlen(unsigned long addr, void *dest) +{ + mm_segment_t old_fs; + int ret, len = 0; + u8 c; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + pagefault_disable(); + + do { + ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); + len++; + } while (c && ret == 0 && len < MAX_STRING_SIZE); + + pagefault_enable(); + set_fs(old_fs); + + if (ret < 0) /* Failed to check the length */ + *(u32 *)dest = 0; + else + *(u32 *)dest = len; +} + +/* + * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max + * length and relative data location. + */ +static nokprobe_inline void +fetch_store_string(unsigned long addr, void *dest) +{ + int maxlen = get_rloc_len(*(u32 *)dest); + u8 *dst = get_rloc_data(dest); + long ret; + + if (!maxlen) + return; + + /* + * Try to get string again, since the string can be changed while + * probing. + */ + ret = strncpy_from_unsafe(dst, (void *)addr, maxlen); + + if (ret < 0) { /* Failed to fetch string */ + dst[0] = '\0'; + *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); + } else { + *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest)); + } +} + +/* Note that we don't verify it, since the code does not come from user space */ +static int +process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, + bool pre) +{ + unsigned long val; + int ret; + + /* 1st stage: get value from context */ + switch (code->op) { + case FETCH_OP_REG: + val = regs_get_register(regs, code->param); + break; + case FETCH_OP_STACK: + val = regs_get_kernel_stack_nth(regs, code->param); + break; + case FETCH_OP_STACKP: + val = kernel_stack_pointer(regs); + break; + case FETCH_OP_RETVAL: + val = regs_return_value(regs); + break; + case FETCH_OP_IMM: + val = code->immediate; + break; + case FETCH_OP_COMM: + val = (unsigned long)current->comm; + break; + default: + return -EILSEQ; + } + code++; + + /* 2nd stage: dereference memory if needed */ + while (code->op == FETCH_OP_DEREF) { + ret = probe_kernel_read(&val, (void *)val + code->offset, + sizeof(val)); + if (ret) + return ret; + code++; + } + + /* 3rd stage: store value to buffer */ + switch (code->op) { + case FETCH_OP_ST_RAW: + fetch_store_raw(val, code, dest); + break; + case FETCH_OP_ST_MEM: + probe_kernel_read(dest, (void *)val + code->offset, code->size); + break; + case FETCH_OP_ST_STRING: + if (pre) + fetch_store_strlen(val + code->offset, dest); + else + fetch_store_string(val + code->offset, dest); + break; + default: + return -EILSEQ; + } + code++; + + /* 4th stage: modify stored value if needed */ + if (code->op == FETCH_OP_MOD_BF) { + fetch_apply_bitfield(code, dest); + code++; + } + + return code->op == FETCH_OP_END ? 0 : -EILSEQ; +} +NOKPROBE_SYMBOL(process_fetch_insn) + /* Kprobe handler */ static nokprobe_inline void __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 5f3b5b3fd2cd..c59c69cb2f2e 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -61,174 +61,6 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent) const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; -#define CHECK_FETCH_FUNCS(method, fn) \ - (((FETCH_FUNC_NAME(method, u8) == fn) || \ - (FETCH_FUNC_NAME(method, u16) == fn) || \ - (FETCH_FUNC_NAME(method, u32) == fn) || \ - (FETCH_FUNC_NAME(method, u64) == fn) || \ - (FETCH_FUNC_NAME(method, string) == fn) || \ - (FETCH_FUNC_NAME(method, string_size) == fn)) \ - && (fn != NULL)) - -/* Data fetch function templates */ -#define DEFINE_FETCH_reg(type) \ -void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, void *offset, void *dest) \ -{ \ - *(type *)dest = (type)regs_get_register(regs, \ - (unsigned int)((unsigned long)offset)); \ -} \ -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(reg, type)); -DEFINE_BASIC_FETCH_FUNCS(reg) -/* No string on the register */ -#define fetch_reg_string NULL -#define fetch_reg_string_size NULL - -#define DEFINE_FETCH_retval(type) \ -void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs, \ - void *dummy, void *dest) \ -{ \ - *(type *)dest = (type)regs_return_value(regs); \ -} \ -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(retval, type)); -DEFINE_BASIC_FETCH_FUNCS(retval) -/* No string on the retval */ -#define fetch_retval_string NULL -#define fetch_retval_string_size NULL - -/* Dereference memory access function */ -struct deref_fetch_param { - struct fetch_param orig; - long offset; - fetch_func_t fetch; - fetch_func_t fetch_size; -}; - -#define DEFINE_FETCH_deref(type) \ -void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs, \ - void *data, void *dest) \ -{ \ - struct deref_fetch_param *dprm = data; \ - unsigned long addr; \ - call_fetch(&dprm->orig, regs, &addr); \ - if (addr) { \ - addr += dprm->offset; \ - dprm->fetch(regs, (void *)addr, dest); \ - } else \ - *(type *)dest = 0; \ -} \ -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, type)); -DEFINE_BASIC_FETCH_FUNCS(deref) -DEFINE_FETCH_deref(string) - -void FETCH_FUNC_NAME(deref, string_size)(struct pt_regs *regs, - void *data, void *dest) -{ - struct deref_fetch_param *dprm = data; - unsigned long addr; - - call_fetch(&dprm->orig, regs, &addr); - if (addr && dprm->fetch_size) { - addr += dprm->offset; - dprm->fetch_size(regs, (void *)addr, dest); - } else - *(string_size *)dest = 0; -} -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(deref, string_size)); - -static void update_deref_fetch_param(struct deref_fetch_param *data) -{ - if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) - update_deref_fetch_param(data->orig.data); - else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) - update_symbol_cache(data->orig.data); -} -NOKPROBE_SYMBOL(update_deref_fetch_param); - -static void free_deref_fetch_param(struct deref_fetch_param *data) -{ - if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) - free_deref_fetch_param(data->orig.data); - else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) - free_symbol_cache(data->orig.data); - kfree(data); -} -NOKPROBE_SYMBOL(free_deref_fetch_param); - -/* Bitfield fetch function */ -struct bitfield_fetch_param { - struct fetch_param orig; - unsigned char hi_shift; - unsigned char low_shift; -}; - -#define DEFINE_FETCH_bitfield(type) \ -void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs, \ - void *data, void *dest) \ -{ \ - struct bitfield_fetch_param *bprm = data; \ - type buf = 0; \ - call_fetch(&bprm->orig, regs, &buf); \ - if (buf) { \ - buf <<= bprm->hi_shift; \ - buf >>= bprm->low_shift; \ - } \ - *(type *)dest = buf; \ -} \ -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(bitfield, type)); -DEFINE_BASIC_FETCH_FUNCS(bitfield) -#define fetch_bitfield_string NULL -#define fetch_bitfield_string_size NULL - -static void -update_bitfield_fetch_param(struct bitfield_fetch_param *data) -{ - /* - * Don't check the bitfield itself, because this must be the - * last fetch function. - */ - if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) - update_deref_fetch_param(data->orig.data); - else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) - update_symbol_cache(data->orig.data); -} - -static void -free_bitfield_fetch_param(struct bitfield_fetch_param *data) -{ - /* - * Don't check the bitfield itself, because this must be the - * last fetch function. - */ - if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) - free_deref_fetch_param(data->orig.data); - else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) - free_symbol_cache(data->orig.data); - - kfree(data); -} - -void FETCH_FUNC_NAME(comm, string)(struct pt_regs *regs, - void *data, void *dest) -{ - int maxlen = get_rloc_len(*(u32 *)dest); - u8 *dst = get_rloc_data(dest); - long ret; - - if (!maxlen) - return; - - ret = strlcpy(dst, current->comm, maxlen); - *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest)); -} -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(comm, string)); - -void FETCH_FUNC_NAME(comm, string_size)(struct pt_regs *regs, - void *data, void *dest) -{ - *(u32 *)dest = strlen(current->comm) + 1; -} -NOKPROBE_SYMBOL(FETCH_FUNC_NAME(comm, string_size)); - static const struct fetch_type *find_fetch_type(const char *type, const struct fetch_type *ftbl) { @@ -272,37 +104,6 @@ fail: return NULL; } -/* Special function : only accept unsigned long */ -static void fetch_kernel_stack_address(struct pt_regs *regs, void *dummy, void *dest) -{ - *(unsigned long *)dest = kernel_stack_pointer(regs); -} -NOKPROBE_SYMBOL(fetch_kernel_stack_address); - -static void fetch_user_stack_address(struct pt_regs *regs, void *dummy, void *dest) -{ - *(unsigned long *)dest = user_stack_pointer(regs); -} -NOKPROBE_SYMBOL(fetch_user_stack_address); - -static fetch_func_t get_fetch_size_function(const struct fetch_type *type, - fetch_func_t orig_fn, - const struct fetch_type *ftbl) -{ - int i; - - if (type != &ftbl[FETCH_TYPE_STRING]) - return NULL; /* Only string type needs size function */ - - for (i = 0; i < FETCH_MTD_END; i++) - if (type->fetch[i] == orig_fn) - return ftbl[FETCH_TYPE_STRSIZE].fetch[i]; - - WARN_ON(1); /* This should not happen */ - - return NULL; -} - /* Split symbol and offset. */ int traceprobe_split_symbol_offset(char *symbol, long *offset) { @@ -327,7 +128,7 @@ int traceprobe_split_symbol_offset(char *symbol, long *offset) #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) static int parse_probe_vars(char *arg, const struct fetch_type *t, - struct fetch_param *f, bool is_return, + struct fetch_insn *code, bool is_return, bool is_kprobe) { int ret = 0; @@ -335,33 +136,24 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, if (strcmp(arg, "retval") == 0) { if (is_return) - f->fn = t->fetch[FETCH_MTD_retval]; + code->op = FETCH_OP_RETVAL; else ret = -EINVAL; } else if (strncmp(arg, "stack", 5) == 0) { if (arg[5] == '\0') { - if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR)) - return -EINVAL; - - if (is_kprobe) - f->fn = fetch_kernel_stack_address; - else - f->fn = fetch_user_stack_address; + code->op = FETCH_OP_STACKP; } else if (isdigit(arg[5])) { ret = kstrtoul(arg + 5, 10, ¶m); if (ret || (is_kprobe && param > PARAM_MAX_STACK)) ret = -EINVAL; else { - f->fn = t->fetch[FETCH_MTD_stack]; - f->data = (void *)param; + code->op = FETCH_OP_STACK; + code->param = (unsigned int)param; } } else ret = -EINVAL; } else if (strcmp(arg, "comm") == 0) { - if (strcmp(t->name, "string") != 0 && - strcmp(t->name, "string_size") != 0) - return -EINVAL; - f->fn = t->fetch[FETCH_MTD_comm]; + code->op = FETCH_OP_COMM; } else ret = -EINVAL; @@ -369,10 +161,13 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, } /* Recursive argument parser */ -static int parse_probe_arg(char *arg, const struct fetch_type *t, - struct fetch_param *f, bool is_return, bool is_kprobe, - const struct fetch_type *ftbl) +static int +parse_probe_arg(char *arg, const struct fetch_type *type, + struct fetch_insn **pcode, struct fetch_insn *end, + bool is_return, bool is_kprobe, + const struct fetch_type *ftbl) { + struct fetch_insn *code = *pcode; unsigned long param; long offset; char *tmp; @@ -380,14 +175,15 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, switch (arg[0]) { case '$': - ret = parse_probe_vars(arg + 1, t, f, is_return, is_kprobe); + ret = parse_probe_vars(arg + 1, type, code, + is_return, is_kprobe); break; case '%': /* named register */ ret = regs_query_register_offset(arg + 1); if (ret >= 0) { - f->fn = t->fetch[FETCH_MTD_reg]; - f->data = (void *)(unsigned long)ret; + code->op = FETCH_OP_REG; + code->param = (unsigned int)ret; ret = 0; } break; @@ -397,9 +193,9 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, ret = kstrtoul(arg + 1, 0, ¶m); if (ret) break; - - f->fn = t->fetch[FETCH_MTD_memory]; - f->data = (void *)param; + /* load address */ + code->op = FETCH_OP_IMM; + code->immediate = param; } else if (arg[1] == '+') { /* kprobes don't support file offsets */ if (is_kprobe) @@ -409,8 +205,8 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, if (ret) break; - f->fn = t->fetch[FETCH_MTD_file_offset]; - f->data = (void *)offset; + code->op = FETCH_OP_FOFFS; + code->immediate = (unsigned long)offset; // imm64? } else { /* uprobes don't support symbols */ if (!is_kprobe) @@ -420,10 +216,19 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, if (ret) break; - f->data = alloc_symbol_cache(arg + 1, offset); - if (f->data) - f->fn = t->fetch[FETCH_MTD_symbol]; + code->op = FETCH_OP_IMM; + code->immediate = + (unsigned long)kallsyms_lookup_name(arg + 1); + if (!code->immediate) + return -ENOENT; + code->immediate += offset; } + /* These are fetching from memory */ + if (++code == end) + return -E2BIG; + *pcode = code; + code->op = FETCH_OP_DEREF; + code->offset = offset; break; case '+': /* deref memory */ @@ -431,11 +236,10 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, case '-': tmp = strchr(arg, '('); if (!tmp) - break; + return -EINVAL; *tmp = '\0'; ret = kstrtol(arg, 0, &offset); - if (ret) break; @@ -443,36 +247,29 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, tmp = strrchr(arg, ')'); if (tmp) { - struct deref_fetch_param *dprm; - const struct fetch_type *t2; + const struct fetch_type *t2; t2 = find_fetch_type(NULL, ftbl); *tmp = '\0'; - dprm = kzalloc(sizeof(struct deref_fetch_param), GFP_KERNEL); - - if (!dprm) - return -ENOMEM; - - dprm->offset = offset; - dprm->fetch = t->fetch[FETCH_MTD_memory]; - dprm->fetch_size = get_fetch_size_function(t, - dprm->fetch, ftbl); - ret = parse_probe_arg(arg, t2, &dprm->orig, is_return, - is_kprobe, ftbl); + ret = parse_probe_arg(arg, t2, &code, end, is_return, + is_kprobe, ftbl); if (ret) - kfree(dprm); - else { - f->fn = t->fetch[FETCH_MTD_deref]; - f->data = (void *)dprm; - } + break; + if (code->op == FETCH_OP_COMM) + return -EINVAL; + if (++code == end) + return -E2BIG; + *pcode = code; + + code->op = FETCH_OP_DEREF; + code->offset = offset; } break; } - if (!ret && !f->fn) { /* Parsed, but do not find fetch method */ - pr_info("%s type has no corresponding fetch method.\n", t->name); + if (!ret && code->op == FETCH_OP_NOP) { + /* Parsed, but do not find fetch method */ ret = -EINVAL; } - return ret; } @@ -481,22 +278,15 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t, /* Bitfield type needs to be parsed into a fetch function */ static int __parse_bitfield_probe_arg(const char *bf, const struct fetch_type *t, - struct fetch_param *f) + struct fetch_insn **pcode) { - struct bitfield_fetch_param *bprm; + struct fetch_insn *code = *pcode; unsigned long bw, bo; char *tail; if (*bf != 'b') return 0; - bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); - if (!bprm) - return -ENOMEM; - - bprm->orig = *f; - f->fn = t->fetch[FETCH_MTD_bitfield]; - f->data = (void *)bprm; bw = simple_strtoul(bf + 1, &tail, 0); /* Use simple one */ if (bw == 0 || *tail != '@') @@ -507,9 +297,15 @@ static int __parse_bitfield_probe_arg(const char *bf, if (tail == bf || *tail != '/') return -EINVAL; + code++; + if (code->op != FETCH_OP_NOP) + return -E2BIG; + *pcode = code; - bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo); - bprm->low_shift = bprm->hi_shift + bo; + code->op = FETCH_OP_MOD_BF; + code->lshift = BYTES_TO_BITS(t->size) - (bw + bo); + code->rshift = BYTES_TO_BITS(t->size) - bw; + code->basesize = t->size; return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0; } @@ -519,6 +315,7 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, struct probe_arg *parg, bool is_return, bool is_kprobe, const struct fetch_type *ftbl) { + struct fetch_insn *code, *tmp = NULL; const char *t; int ret; @@ -549,18 +346,60 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, } parg->offset = *size; *size += parg->type->size; - ret = parse_probe_arg(arg, parg->type, &parg->fetch, is_return, - is_kprobe, ftbl); - - if (ret >= 0 && t != NULL) - ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch); - if (ret >= 0) { - parg->fetch_size.fn = get_fetch_size_function(parg->type, - parg->fetch.fn, - ftbl); - parg->fetch_size.data = parg->fetch.data; + code = tmp = kzalloc(sizeof(*code) * FETCH_INSN_MAX, GFP_KERNEL); + if (!code) + return -ENOMEM; + code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; + + ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1], + is_return, is_kprobe, ftbl); + if (ret) + goto fail; + + /* Store operation */ + if (!strcmp(parg->type->name, "string")) { + if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_IMM && + code->op != FETCH_OP_COMM) { + pr_info("string only accepts memory or address.\n"); + ret = -EINVAL; + goto fail; + } + /* Since IMM or COMM must be the 1st insn, this is safe */ + if (code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) + code++; + code->op = FETCH_OP_ST_STRING; /* In DEREF case, replace it */ + parg->dynamic = true; + } else if (code->op == FETCH_OP_DEREF) { + code->op = FETCH_OP_ST_MEM; + code->size = parg->type->size; + } else { + code++; + if (code->op != FETCH_OP_NOP) { + ret = -E2BIG; + goto fail; + } + code->op = FETCH_OP_ST_RAW; + code->size = parg->type->size; + } + /* Modify operation */ + if (t != NULL) { + ret = __parse_bitfield_probe_arg(t, parg->type, &code); + if (ret) + goto fail; } + code++; + code->op = FETCH_OP_END; + + /* Shrink down the code buffer */ + parg->code = kzalloc(sizeof(*code) * (code - tmp + 1), GFP_KERNEL); + if (!parg->code) + ret = -ENOMEM; + else + memcpy(parg->code, tmp, sizeof(*code) * (code - tmp + 1)); + +fail: + kfree(tmp); return ret; } @@ -582,25 +421,9 @@ int traceprobe_conflict_field_name(const char *name, return 0; } -void traceprobe_update_arg(struct probe_arg *arg) -{ - if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) - update_bitfield_fetch_param(arg->fetch.data); - else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) - update_deref_fetch_param(arg->fetch.data); - else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) - update_symbol_cache(arg->fetch.data); -} - void traceprobe_free_probe_arg(struct probe_arg *arg) { - if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) - free_bitfield_fetch_param(arg->fetch.data); - else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) - free_deref_fetch_param(arg->fetch.data); - else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) - free_symbol_cache(arg->fetch.data); - + kfree(arg->code); kfree(arg->name); kfree(arg->comm); } diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index a1df7763b797..42c724a7ad11 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -79,25 +79,50 @@ static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) return (u8 *)ent + get_rloc_offs(*dl); } -/* Data fetch function type */ -typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); /* Printing function type */ typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); -/* Fetch types */ -enum { - FETCH_MTD_reg = 0, - FETCH_MTD_stack, - FETCH_MTD_retval, - FETCH_MTD_comm, - FETCH_MTD_memory, - FETCH_MTD_symbol, - FETCH_MTD_deref, - FETCH_MTD_bitfield, - FETCH_MTD_file_offset, - FETCH_MTD_END, +enum fetch_op { + FETCH_OP_NOP = 0, + // Stage 1 (load) ops + FETCH_OP_REG, /* Register : .param = offset */ + FETCH_OP_STACK, /* Stack : .param = index */ + FETCH_OP_STACKP, /* Stack pointer */ + FETCH_OP_RETVAL, /* Return value */ + FETCH_OP_IMM, /* Immediate : .immediate */ + FETCH_OP_COMM, /* Current comm */ + FETCH_OP_FOFFS, /* File offset: .immediate */ + // Stage 2 (dereference) op + FETCH_OP_DEREF, /* Dereference: .offset */ + // Stage 3 (store) ops + FETCH_OP_ST_RAW, /* Raw: .size */ + FETCH_OP_ST_MEM, /* Mem: .offset, .size */ + FETCH_OP_ST_STRING, /* String: .offset, .size */ + // Stage 4 (modify) op + FETCH_OP_MOD_BF, /* Bitfield: .basesize, .lshift, .rshift */ + FETCH_OP_END, }; +struct fetch_insn { + enum fetch_op op; + union { + unsigned int param; + struct { + unsigned int size; + int offset; + }; + struct { + unsigned char basesize; + unsigned char lshift; + unsigned char rshift; + }; + unsigned long immediate; + }; +}; + +/* fetch + deref*N + store + mod + end <= 16, this allows N=12, enough */ +#define FETCH_INSN_MAX 16 + /* Fetch type information table */ struct fetch_type { const char *name; /* Name of type */ @@ -106,13 +131,6 @@ struct fetch_type { print_type_func_t print; /* Print functions */ const char *fmt; /* Fromat string */ const char *fmttype; /* Name in format file */ - /* Fetch functions */ - fetch_func_t fetch[FETCH_MTD_END]; -}; - -struct fetch_param { - fetch_func_t fn; - void *data; }; /* For defining macros, define string/string_size types */ @@ -142,66 +160,12 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(x64); DECLARE_BASIC_PRINT_TYPE_FUNC(string); -#define FETCH_FUNC_NAME(method, type) fetch_##method##_##type - -/* Declare macro for basic types */ -#define DECLARE_FETCH_FUNC(method, type) \ -extern void FETCH_FUNC_NAME(method, type)(struct pt_regs *regs, \ - void *data, void *dest) - -#define DECLARE_BASIC_FETCH_FUNCS(method) \ -DECLARE_FETCH_FUNC(method, u8); \ -DECLARE_FETCH_FUNC(method, u16); \ -DECLARE_FETCH_FUNC(method, u32); \ -DECLARE_FETCH_FUNC(method, u64) - -DECLARE_BASIC_FETCH_FUNCS(reg); -#define fetch_reg_string NULL -#define fetch_reg_string_size NULL - -DECLARE_BASIC_FETCH_FUNCS(retval); -#define fetch_retval_string NULL -#define fetch_retval_string_size NULL - -DECLARE_BASIC_FETCH_FUNCS(symbol); -DECLARE_FETCH_FUNC(symbol, string); -DECLARE_FETCH_FUNC(symbol, string_size); - -DECLARE_BASIC_FETCH_FUNCS(deref); -DECLARE_FETCH_FUNC(deref, string); -DECLARE_FETCH_FUNC(deref, string_size); - -DECLARE_BASIC_FETCH_FUNCS(bitfield); -#define fetch_bitfield_string NULL -#define fetch_bitfield_string_size NULL - -/* comm only makes sense as a string */ -#define fetch_comm_u8 NULL -#define fetch_comm_u16 NULL -#define fetch_comm_u32 NULL -#define fetch_comm_u64 NULL -DECLARE_FETCH_FUNC(comm, string); -DECLARE_FETCH_FUNC(comm, string_size); - -/* - * Define macro for basic types - we don't need to define s* types, because - * we have to care only about bitwidth at recording time. - */ -#define DEFINE_BASIC_FETCH_FUNCS(method) \ -DEFINE_FETCH_##method(u8) \ -DEFINE_FETCH_##method(u16) \ -DEFINE_FETCH_##method(u32) \ -DEFINE_FETCH_##method(u64) - /* Default (unsigned long) fetch type */ #define __DEFAULT_FETCH_TYPE(t) x##t #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) -#define ASSIGN_FETCH_FUNC(method, type) \ - [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type) - #define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ {.name = _name, \ .size = _size, \ @@ -209,17 +173,6 @@ DEFINE_FETCH_##method(u64) .print = PRINT_TYPE_FUNC_NAME(ptype), \ .fmt = PRINT_TYPE_FMT_NAME(ptype), \ .fmttype = _fmttype, \ - .fetch = { \ -ASSIGN_FETCH_FUNC(reg, ftype), \ -ASSIGN_FETCH_FUNC(stack, ftype), \ -ASSIGN_FETCH_FUNC(retval, ftype), \ -ASSIGN_FETCH_FUNC(comm, ftype), \ -ASSIGN_FETCH_FUNC(memory, ftype), \ -ASSIGN_FETCH_FUNC(symbol, ftype), \ -ASSIGN_FETCH_FUNC(deref, ftype), \ -ASSIGN_FETCH_FUNC(bitfield, ftype), \ -ASSIGN_FETCH_FUNC(file_offset, ftype), \ - } \ } #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ @@ -231,42 +184,13 @@ ASSIGN_FETCH_FUNC(file_offset, ftype), \ #define ASSIGN_FETCH_TYPE_END {} -#define FETCH_TYPE_STRING 0 -#define FETCH_TYPE_STRSIZE 1 +#define FETCH_TYPE_STRING 0 +#define FETCH_TYPE_STRSIZE 1 #ifdef CONFIG_KPROBE_EVENTS -struct symbol_cache; -unsigned long update_symbol_cache(struct symbol_cache *sc); -void free_symbol_cache(struct symbol_cache *sc); -struct symbol_cache *alloc_symbol_cache(const char *sym, long offset); bool trace_kprobe_on_func_entry(struct trace_event_call *call); bool trace_kprobe_error_injectable(struct trace_event_call *call); #else -/* uprobes do not support symbol fetch methods */ -#define fetch_symbol_u8 NULL -#define fetch_symbol_u16 NULL -#define fetch_symbol_u32 NULL -#define fetch_symbol_u64 NULL -#define fetch_symbol_string NULL -#define fetch_symbol_string_size NULL - -struct symbol_cache { -}; -static inline unsigned long __used update_symbol_cache(struct symbol_cache *sc) -{ - return 0; -} - -static inline void __used free_symbol_cache(struct symbol_cache *sc) -{ -} - -static inline struct symbol_cache * __used -alloc_symbol_cache(const char *sym, long offset) -{ - return NULL; -} - static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call) { return false; @@ -279,8 +203,8 @@ static inline bool trace_kprobe_error_injectable(struct trace_event_call *call) #endif /* CONFIG_KPROBE_EVENTS */ struct probe_arg { - struct fetch_param fetch; - struct fetch_param fetch_size; + struct fetch_insn *code; + bool dynamic;/* Dynamic array (string) is used */ unsigned int offset; /* Offset from argument entry */ const char *name; /* Name of this argument */ const char *comm; /* Command of this argument */ @@ -312,12 +236,6 @@ static inline bool trace_probe_is_registered(struct trace_probe *tp) return !!(tp->flags & TP_FLAG_REGISTERED); } -static nokprobe_inline void call_fetch(struct fetch_param *fprm, - struct pt_regs *regs, void *dest) -{ - return fprm->fn(regs, fprm->data, dest); -} - /* Check the name is good for event/group/fields */ static inline bool is_good_name(const char *name) { @@ -354,68 +272,6 @@ extern void traceprobe_free_probe_arg(struct probe_arg *arg); extern int traceprobe_split_symbol_offset(char *symbol, long *offset); -/* Sum up total data length for dynamic arraies (strings) */ -static nokprobe_inline int -__get_data_size(struct trace_probe *tp, struct pt_regs *regs) -{ - int i, ret = 0; - u32 len; - - for (i = 0; i < tp->nr_args; i++) - if (unlikely(tp->args[i].fetch_size.fn)) { - call_fetch(&tp->args[i].fetch_size, regs, &len); - ret += len; - } - - return ret; -} - -/* Store the value of each argument */ -static nokprobe_inline void -store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, - u8 *data, int maxlen) -{ - int i; - u32 end = tp->size; - u32 *dl; /* Data (relative) location */ - - for (i = 0; i < tp->nr_args; i++) { - if (unlikely(tp->args[i].fetch_size.fn)) { - /* - * First, we set the relative location and - * maximum data length to *dl - */ - dl = (u32 *)(data + tp->args[i].offset); - *dl = make_data_rloc(maxlen, end - tp->args[i].offset); - /* Then try to fetch string or dynamic array data */ - call_fetch(&tp->args[i].fetch, regs, dl); - /* Reduce maximum length */ - end += get_rloc_len(*dl); - maxlen -= get_rloc_len(*dl); - /* Trick here, convert data_rloc to data_loc */ - *dl = convert_rloc_to_loc(*dl, - ent_size + tp->args[i].offset); - } else - /* Just fetching data normally */ - call_fetch(&tp->args[i].fetch, regs, - data + tp->args[i].offset); - } -} - -static inline int -print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args, - u8 *data, void *field) -{ - int i; - - for (i = 0; i < nr_args; i++) { - trace_seq_printf(s, " %s=", args[i].name); - if (!args[i].type->print(s, data + args[i].offset, field)) - return -ENOMEM; - } - return 0; -} - extern int set_print_fmt(struct trace_probe *tp, bool is_return); #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h new file mode 100644 index 000000000000..c8a5272abf01 --- /dev/null +++ b/kernel/trace/trace_probe_tmpl.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Traceprobe fetch helper inlines + */ + +static nokprobe_inline void +fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf) +{ + switch (code->size) { + case 1: + *(u8 *)buf = (u8)val; + break; + case 2: + *(u16 *)buf = (u16)val; + break; + case 4: + *(u32 *)buf = (u32)val; + break; + case 8: + //TBD: 32bit signed + *(u64 *)buf = (u64)val; + break; + default: + *(unsigned long *)buf = val; + } +} + +static nokprobe_inline void +fetch_apply_bitfield(struct fetch_insn *code, void *buf) +{ + switch (code->basesize) { + case 1: + *(u8 *)buf <<= code->lshift; + *(u8 *)buf >>= code->rshift; + break; + case 2: + *(u16 *)buf <<= code->lshift; + *(u16 *)buf >>= code->rshift; + break; + case 4: + *(u32 *)buf <<= code->lshift; + *(u32 *)buf >>= code->rshift; + break; + case 8: + *(u64 *)buf <<= code->lshift; + *(u64 *)buf >>= code->rshift; + break; + } +} + +/* Define this for each callsite */ +static int +process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, + void *dest, bool pre); + +/* Sum up total data length for dynamic arraies (strings) */ +static nokprobe_inline int +__get_data_size(struct trace_probe *tp, struct pt_regs *regs) +{ + struct probe_arg *arg; + int i, ret = 0; + u32 len; + + for (i = 0; i < tp->nr_args; i++) { + arg = tp->args + i; + if (unlikely(arg->dynamic)) { + process_fetch_insn(arg->code, regs, &len, true); + ret += len; + } + } + + return ret; +} + +/* Store the value of each argument */ +static nokprobe_inline void +store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, + u8 *data, int maxlen) +{ + struct probe_arg *arg; + u32 end = tp->size; + u32 *dl; /* Data (relative) location */ + int i; + + for (i = 0; i < tp->nr_args; i++) { + arg = tp->args + i; + if (unlikely(arg->dynamic)) { + /* + * First, we set the relative location and + * maximum data length to *dl + */ + dl = (u32 *)(data + arg->offset); + *dl = make_data_rloc(maxlen, end - arg->offset); + /* Then try to fetch string or dynamic array data */ + process_fetch_insn(arg->code, regs, dl, false); + /* Reduce maximum length */ + end += get_rloc_len(*dl); + maxlen -= get_rloc_len(*dl); + /* Trick here, convert data_rloc to data_loc */ + *dl = convert_rloc_to_loc(*dl, ent_size + arg->offset); + } else + /* Just fetching data normally */ + process_fetch_insn(arg->code, regs, data + arg->offset, + false); + } +} + +static inline int +print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args, + u8 *data, void *field) +{ + int i; + + for (i = 0; i < nr_args; i++) { + trace_seq_printf(s, " %s=", args[i].name); + if (!args[i].type->print(s, data + args[i].offset, field)) + return -ENOMEM; + } + return 0; +} diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 28a8f69cec89..e076f89ab33a 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -15,6 +15,7 @@ #include #include "trace_probe.h" +#include "trace_probe_tmpl.h" #define UPROBE_EVENT_SYSTEM "uprobes" @@ -99,37 +100,19 @@ static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) /* * Uprobes-specific fetch functions */ -#define DEFINE_FETCH_stack(type) \ -static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \ - void *offset, void *dest) \ -{ \ - *(type *)dest = (type)get_user_stack_nth(regs, \ - ((unsigned long)offset)); \ -} -DEFINE_BASIC_FETCH_FUNCS(stack) -/* No string on the stack entry */ -#define fetch_stack_string NULL -#define fetch_stack_string_size NULL - -#define DEFINE_FETCH_memory(type) \ -static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \ - void *addr, void *dest) \ -{ \ - type retval; \ - void __user *vaddr = (void __force __user *) addr; \ - \ - if (copy_from_user(&retval, vaddr, sizeof(type))) \ - *(type *)dest = 0; \ - else \ - *(type *) dest = retval; \ +static nokprobe_inline int +probe_user_read(void *dest, void *src, size_t size) +{ + void __user *vaddr = (void __force __user *)src; + + return copy_from_user(dest, vaddr, size); } -DEFINE_BASIC_FETCH_FUNCS(memory) /* * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max * length and relative data location. */ -static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, - void *addr, void *dest) +static nokprobe_inline void +fetch_store_string(unsigned long addr, void *dest) { long ret; u32 rloc = *(u32 *)dest; @@ -152,8 +135,9 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, } } -static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, - void *addr, void *dest) +/* Return the length of string -- including null terminal byte */ +static nokprobe_inline void +fetch_store_strlen(unsigned long addr, void *dest) { int len; void __user *vaddr = (void __force __user *) addr; @@ -166,7 +150,7 @@ static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, *(u32 *)dest = len; } -static unsigned long translate_user_vaddr(void *file_offset) +static unsigned long translate_user_vaddr(unsigned long file_offset) { unsigned long base_addr; struct uprobe_dispatch_data *udd; @@ -174,21 +158,9 @@ static unsigned long translate_user_vaddr(void *file_offset) udd = (void *) current->utask->vaddr; base_addr = udd->bp_addr - udd->tu->offset; - return base_addr + (unsigned long)file_offset; + return base_addr + file_offset; } -#define DEFINE_FETCH_file_offset(type) \ -static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \ - void *offset, void *dest)\ -{ \ - void *vaddr = (void *)translate_user_vaddr(offset); \ - \ - FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \ -} -DEFINE_BASIC_FETCH_FUNCS(file_offset) -DEFINE_FETCH_file_offset(string) -DEFINE_FETCH_file_offset(string_size) - /* Fetch type information table */ static const struct fetch_type uprobes_fetch_type_table[] = { /* Special types */ @@ -213,6 +185,77 @@ static const struct fetch_type uprobes_fetch_type_table[] = { ASSIGN_FETCH_TYPE_END }; +/* Note that we don't verify it, since the code does not come from user space */ +static int +process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, + bool pre) +{ + unsigned long val; + int ret; + + /* 1st stage: get value from context */ + switch (code->op) { + case FETCH_OP_REG: + val = regs_get_register(regs, code->param); + break; + case FETCH_OP_STACK: + val = get_user_stack_nth(regs, code->param); + break; + case FETCH_OP_STACKP: + val = user_stack_pointer(regs); + break; + case FETCH_OP_RETVAL: + val = regs_return_value(regs); + break; + case FETCH_OP_IMM: + val = code->immediate; + break; + case FETCH_OP_FOFFS: + val = translate_user_vaddr(code->immediate); + break; + default: + return -EILSEQ; + } + code++; + + /* 2nd stage: dereference memory if needed */ + while (code->op == FETCH_OP_DEREF) { + ret = probe_user_read(&val, (void *)val + code->offset, + sizeof(val)); + if (ret) + return ret; + code++; + } + + /* 3rd stage: store value to buffer */ + switch (code->op) { + case FETCH_OP_ST_RAW: + fetch_store_raw(val, code, dest); + break; + case FETCH_OP_ST_MEM: + probe_user_read(dest, (void *)val + code->offset, code->size); + break; + case FETCH_OP_ST_STRING: + if (pre) + fetch_store_strlen(val + code->offset, dest); + else + fetch_store_string(val + code->offset, dest); + break; + default: + return -EILSEQ; + } + code++; + + /* 4th stage: modify stored value if needed */ + if (code->op == FETCH_OP_MOD_BF) { + fetch_apply_bitfield(code, dest); + code++; + } + + return code->op == FETCH_OP_END ? 0 : -EILSEQ; +} +NOKPROBE_SYMBOL(process_fetch_insn) + static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) { rwlock_init(&filter->rwlock); -- cgit v1.2.3 From f451bc89d8357f010304564728ba7c5d38a1d4d5 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:18:32 +0900 Subject: tracing: probeevent: Unify fetch type tables Unify {k,u}probe_fetch_type_table to probe_fetch_type_table because the main difference of those type tables (fetcharg methods) are gone. Now we can consolidate it. Link: http://lkml.kernel.org/r/152465871274.26224.13999436317830479698.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 27 +---------------------- kernel/trace/trace_probe.c | 54 ++++++++++++++++++++++++++++++--------------- kernel/trace/trace_probe.h | 6 +---- kernel/trace/trace_uprobe.c | 27 +---------------------- 4 files changed, 39 insertions(+), 75 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c024cc40d509..dc1c638daf44 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -121,30 +121,6 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); static int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs); -/* Fetch type information table */ -static const struct fetch_type kprobes_fetch_type_table[] = { - /* Special types */ - [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, - sizeof(u32), 1, "__data_loc char[]"), - [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, - string_size, sizeof(u32), 0, "u32"), - /* Basic types */ - ASSIGN_FETCH_TYPE(u8, u8, 0), - ASSIGN_FETCH_TYPE(u16, u16, 0), - ASSIGN_FETCH_TYPE(u32, u32, 0), - ASSIGN_FETCH_TYPE(u64, u64, 0), - ASSIGN_FETCH_TYPE(s8, u8, 1), - ASSIGN_FETCH_TYPE(s16, u16, 1), - ASSIGN_FETCH_TYPE(s32, u32, 1), - ASSIGN_FETCH_TYPE(s64, u64, 1), - ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), - ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), - ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), - ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), - - ASSIGN_FETCH_TYPE_END -}; - /* * Allocate new trace_probe and initialize it (including kprobes). */ @@ -720,8 +696,7 @@ static int create_trace_kprobe(int argc, char **argv) /* Parse fetch argument */ ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, - is_return, true, - kprobes_fetch_type_table); + is_return, true); if (ret) { pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index c59c69cb2f2e..d06e67cca3e1 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -61,8 +61,29 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent) const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; -static const struct fetch_type *find_fetch_type(const char *type, - const struct fetch_type *ftbl) +/* Fetch type information table */ +static const struct fetch_type probe_fetch_types[] = { + /* Special types */ + __ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1, + "__data_loc char[]"), + /* Basic types */ + ASSIGN_FETCH_TYPE(u8, u8, 0), + ASSIGN_FETCH_TYPE(u16, u16, 0), + ASSIGN_FETCH_TYPE(u32, u32, 0), + ASSIGN_FETCH_TYPE(u64, u64, 0), + ASSIGN_FETCH_TYPE(s8, u8, 1), + ASSIGN_FETCH_TYPE(s16, u16, 1), + ASSIGN_FETCH_TYPE(s32, u32, 1), + ASSIGN_FETCH_TYPE(s64, u64, 1), + ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), + ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), + ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), + ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), + + ASSIGN_FETCH_TYPE_END +}; + +static const struct fetch_type *find_fetch_type(const char *type) { int i; @@ -83,21 +104,21 @@ static const struct fetch_type *find_fetch_type(const char *type, switch (bs) { case 8: - return find_fetch_type("u8", ftbl); + return find_fetch_type("u8"); case 16: - return find_fetch_type("u16", ftbl); + return find_fetch_type("u16"); case 32: - return find_fetch_type("u32", ftbl); + return find_fetch_type("u32"); case 64: - return find_fetch_type("u64", ftbl); + return find_fetch_type("u64"); default: goto fail; } } - for (i = 0; ftbl[i].name; i++) { - if (strcmp(type, ftbl[i].name) == 0) - return &ftbl[i]; + for (i = 0; probe_fetch_types[i].name; i++) { + if (strcmp(type, probe_fetch_types[i].name) == 0) + return &probe_fetch_types[i]; } fail: @@ -164,8 +185,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, static int parse_probe_arg(char *arg, const struct fetch_type *type, struct fetch_insn **pcode, struct fetch_insn *end, - bool is_return, bool is_kprobe, - const struct fetch_type *ftbl) + bool is_return, bool is_kprobe) { struct fetch_insn *code = *pcode; unsigned long param; @@ -247,12 +267,11 @@ parse_probe_arg(char *arg, const struct fetch_type *type, tmp = strrchr(arg, ')'); if (tmp) { - const struct fetch_type *t2; + const struct fetch_type *t2 = find_fetch_type(NULL); - t2 = find_fetch_type(NULL, ftbl); *tmp = '\0'; ret = parse_probe_arg(arg, t2, &code, end, is_return, - is_kprobe, ftbl); + is_kprobe); if (ret) break; if (code->op == FETCH_OP_COMM) @@ -312,8 +331,7 @@ static int __parse_bitfield_probe_arg(const char *bf, /* String length checking wrapper */ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, - struct probe_arg *parg, bool is_return, bool is_kprobe, - const struct fetch_type *ftbl) + struct probe_arg *parg, bool is_return, bool is_kprobe) { struct fetch_insn *code, *tmp = NULL; const char *t; @@ -339,7 +357,7 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, */ if (!t && strcmp(arg, "$comm") == 0) t = "string"; - parg->type = find_fetch_type(t, ftbl); + parg->type = find_fetch_type(t); if (!parg->type) { pr_info("Unsupported type: %s\n", t); return -EINVAL; @@ -353,7 +371,7 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1], - is_return, is_kprobe, ftbl); + is_return, is_kprobe); if (ret) goto fail; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 42c724a7ad11..5c262ed6347c 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -184,9 +184,6 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(string); #define ASSIGN_FETCH_TYPE_END {} -#define FETCH_TYPE_STRING 0 -#define FETCH_TYPE_STRSIZE 1 - #ifdef CONFIG_KPROBE_EVENTS bool trace_kprobe_on_func_entry(struct trace_event_call *call); bool trace_kprobe_error_injectable(struct trace_event_call *call); @@ -261,8 +258,7 @@ find_event_file_link(struct trace_probe *tp, struct trace_event_file *file) } extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, - struct probe_arg *parg, bool is_return, bool is_kprobe, - const struct fetch_type *ftbl); + struct probe_arg *parg, bool is_return, bool is_kprobe); extern int traceprobe_conflict_field_name(const char *name, struct probe_arg *args, int narg); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e076f89ab33a..7772fec84c12 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -161,30 +161,6 @@ static unsigned long translate_user_vaddr(unsigned long file_offset) return base_addr + file_offset; } -/* Fetch type information table */ -static const struct fetch_type uprobes_fetch_type_table[] = { - /* Special types */ - [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, - sizeof(u32), 1, "__data_loc char[]"), - [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, - string_size, sizeof(u32), 0, "u32"), - /* Basic types */ - ASSIGN_FETCH_TYPE(u8, u8, 0), - ASSIGN_FETCH_TYPE(u16, u16, 0), - ASSIGN_FETCH_TYPE(u32, u32, 0), - ASSIGN_FETCH_TYPE(u64, u64, 0), - ASSIGN_FETCH_TYPE(s8, u8, 1), - ASSIGN_FETCH_TYPE(s16, u16, 1), - ASSIGN_FETCH_TYPE(s32, u32, 1), - ASSIGN_FETCH_TYPE(s64, u64, 1), - ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0), - ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), - ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), - ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), - - ASSIGN_FETCH_TYPE_END -}; - /* Note that we don't verify it, since the code does not come from user space */ static int process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, @@ -621,8 +597,7 @@ static int create_trace_uprobe(int argc, char **argv) /* Parse fetch argument */ ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, - is_return, false, - uprobes_fetch_type_table); + is_return, false); if (ret) { pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; -- cgit v1.2.3 From 9178412ddf5a98feba0ad3986111c5ad10eb9e59 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:19:01 +0900 Subject: tracing: probeevent: Return consumed bytes of dynamic area Cleanup string fetching routine so that returns the consumed bytes of dynamic area and store the string information as data_loc format instead of data_rloc. This simplifies the fetcharg loop. Link: http://lkml.kernel.org/r/152465874163.26224.12125143907501289031.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 57 ++++++++++++++++++-------------------- kernel/trace/trace_probe.h | 26 ++++-------------- kernel/trace/trace_probe_tmpl.h | 54 +++++++++++++++++------------------- kernel/trace/trace_uprobe.c | 61 ++++++++++++++++++++--------------------- 4 files changed, 88 insertions(+), 110 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index dc1c638daf44..7e5064f8ab8f 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -853,8 +853,8 @@ static const struct file_operations kprobe_profile_ops = { /* Kprobe specific fetch functions */ /* Return the length of string -- including null terminal byte */ -static nokprobe_inline void -fetch_store_strlen(unsigned long addr, void *dest) +static nokprobe_inline int +fetch_store_strlen(unsigned long addr) { mm_segment_t old_fs; int ret, len = 0; @@ -872,47 +872,40 @@ fetch_store_strlen(unsigned long addr, void *dest) pagefault_enable(); set_fs(old_fs); - if (ret < 0) /* Failed to check the length */ - *(u32 *)dest = 0; - else - *(u32 *)dest = len; + return (ret < 0) ? ret : len; } /* * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max * length and relative data location. */ -static nokprobe_inline void -fetch_store_string(unsigned long addr, void *dest) +static nokprobe_inline int +fetch_store_string(unsigned long addr, void *dest, void *base) { - int maxlen = get_rloc_len(*(u32 *)dest); - u8 *dst = get_rloc_data(dest); + int maxlen = get_loc_len(*(u32 *)dest); + u8 *dst = get_loc_data(dest, base); long ret; - if (!maxlen) - return; - + if (unlikely(!maxlen)) + return -ENOMEM; /* * Try to get string again, since the string can be changed while * probing. */ ret = strncpy_from_unsafe(dst, (void *)addr, maxlen); - if (ret < 0) { /* Failed to fetch string */ - dst[0] = '\0'; - *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); - } else { - *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest)); - } + if (ret >= 0) + *(u32 *)dest = make_data_loc(ret, (void *)dst - base); + return ret; } /* Note that we don't verify it, since the code does not come from user space */ static int process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, - bool pre) + void *base) { unsigned long val; - int ret; + int ret = 0; /* 1st stage: get value from context */ switch (code->op) { @@ -949,6 +942,13 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, } /* 3rd stage: store value to buffer */ + if (unlikely(!dest)) { + if (code->op == FETCH_OP_ST_STRING) + return fetch_store_strlen(val + code->offset); + else + return -EILSEQ; + } + switch (code->op) { case FETCH_OP_ST_RAW: fetch_store_raw(val, code, dest); @@ -957,10 +957,7 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, probe_kernel_read(dest, (void *)val + code->offset, code->size); break; case FETCH_OP_ST_STRING: - if (pre) - fetch_store_strlen(val + code->offset, dest); - else - fetch_store_string(val + code->offset, dest); + ret = fetch_store_string(val + code->offset, dest, base); break; default: return -EILSEQ; @@ -973,7 +970,7 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, code++; } - return code->op == FETCH_OP_END ? 0 : -EILSEQ; + return code->op == FETCH_OP_END ? ret : -EILSEQ; } NOKPROBE_SYMBOL(process_fetch_insn) @@ -1008,7 +1005,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, entry = ring_buffer_event_data(event); entry->ip = (unsigned long)tk->rp.kp.addr; - store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); + store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); event_trigger_unlock_commit_regs(trace_file, buffer, event, entry, irq_flags, pc, regs); @@ -1057,7 +1054,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, entry = ring_buffer_event_data(event); entry->func = (unsigned long)tk->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; - store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); + store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); event_trigger_unlock_commit_regs(trace_file, buffer, event, entry, irq_flags, pc, regs); @@ -1203,7 +1200,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) entry->ip = (unsigned long)tk->rp.kp.addr; memset(&entry[1], 0, dsize); - store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); + store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, head, NULL); return 0; @@ -1239,7 +1236,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, entry->func = (unsigned long)tk->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; - store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); + store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize); perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, head, NULL); } diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 5c262ed6347c..b6bdd82fa485 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -54,29 +54,15 @@ #define TP_FLAG_PROFILE 2 #define TP_FLAG_REGISTERED 4 +/* data_loc: data location, compatible with u32 */ +#define make_data_loc(len, offs) \ + (((u32)(len) << 16) | ((u32)(offs) & 0xffff)) +#define get_loc_len(dl) ((u32)(dl) >> 16) +#define get_loc_offs(dl) ((u32)(dl) & 0xffff) -/* data_rloc: data relative location, compatible with u32 */ -#define make_data_rloc(len, roffs) \ - (((u32)(len) << 16) | ((u32)(roffs) & 0xffff)) -#define get_rloc_len(dl) ((u32)(dl) >> 16) -#define get_rloc_offs(dl) ((u32)(dl) & 0xffff) - -/* - * Convert data_rloc to data_loc: - * data_rloc stores the offset from data_rloc itself, but data_loc - * stores the offset from event entry. - */ -#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) - -static nokprobe_inline void *get_rloc_data(u32 *dl) -{ - return (u8 *)dl + get_rloc_offs(*dl); -} - -/* For data_loc conversion */ static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) { - return (u8 *)ent + get_rloc_offs(*dl); + return (u8 *)ent + get_loc_offs(*dl); } /* Printing function type */ diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index c8a5272abf01..3b4aba6f84cc 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -48,24 +48,28 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf) } } -/* Define this for each callsite */ +/* + * This must be defined for each callsite. + * Return consumed dynamic data size (>= 0), or error (< 0). + * If dest is NULL, don't store result and return required dynamic data size. + */ static int process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, - void *dest, bool pre); + void *dest, void *base); /* Sum up total data length for dynamic arraies (strings) */ static nokprobe_inline int __get_data_size(struct trace_probe *tp, struct pt_regs *regs) { struct probe_arg *arg; - int i, ret = 0; - u32 len; + int i, len, ret = 0; for (i = 0; i < tp->nr_args; i++) { arg = tp->args + i; if (unlikely(arg->dynamic)) { - process_fetch_insn(arg->code, regs, &len, true); - ret += len; + len = process_fetch_insn(arg->code, regs, NULL, NULL); + if (len > 0) + ret += len; } } @@ -74,34 +78,26 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs) /* Store the value of each argument */ static nokprobe_inline void -store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs, - u8 *data, int maxlen) +store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, + int header_size, int maxlen) { struct probe_arg *arg; - u32 end = tp->size; - u32 *dl; /* Data (relative) location */ - int i; + void *base = data - header_size; + void *dyndata = data + tp->size; + u32 *dl; /* Data location */ + int ret, i; for (i = 0; i < tp->nr_args; i++) { arg = tp->args + i; - if (unlikely(arg->dynamic)) { - /* - * First, we set the relative location and - * maximum data length to *dl - */ - dl = (u32 *)(data + arg->offset); - *dl = make_data_rloc(maxlen, end - arg->offset); - /* Then try to fetch string or dynamic array data */ - process_fetch_insn(arg->code, regs, dl, false); - /* Reduce maximum length */ - end += get_rloc_len(*dl); - maxlen -= get_rloc_len(*dl); - /* Trick here, convert data_rloc to data_loc */ - *dl = convert_rloc_to_loc(*dl, ent_size + arg->offset); - } else - /* Just fetching data normally */ - process_fetch_insn(arg->code, regs, data + arg->offset, - false); + dl = data + arg->offset; + /* Point the dynamic data area if needed */ + if (unlikely(arg->dynamic)) + *dl = make_data_loc(maxlen, dyndata - base); + ret = process_fetch_insn(arg->code, regs, dl, base); + if (unlikely(ret < 0 && arg->dynamic)) + *dl = make_data_loc(0, dyndata - base); + else + dyndata += ret; } } diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 7772fec84c12..08ad51c8ebc0 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -111,43 +111,38 @@ probe_user_read(void *dest, void *src, size_t size) * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max * length and relative data location. */ -static nokprobe_inline void -fetch_store_string(unsigned long addr, void *dest) +static nokprobe_inline int +fetch_store_string(unsigned long addr, void *dest, void *base) { long ret; - u32 rloc = *(u32 *)dest; - int maxlen = get_rloc_len(rloc); - u8 *dst = get_rloc_data(dest); + u32 loc = *(u32 *)dest; + int maxlen = get_loc_len(loc); + u8 *dst = get_loc_data(dest, base); void __user *src = (void __force __user *) addr; - if (!maxlen) - return; + if (unlikely(!maxlen)) + return -ENOMEM; ret = strncpy_from_user(dst, src, maxlen); - if (ret == maxlen) - dst[--ret] = '\0'; - - if (ret < 0) { /* Failed to fetch string */ - ((u8 *)get_rloc_data(dest))[0] = '\0'; - *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc)); - } else { - *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc)); + if (ret >= 0) { + if (ret == maxlen) + dst[ret - 1] = '\0'; + *(u32 *)dest = make_data_loc(ret, (void *)dst - base); } + + return ret; } /* Return the length of string -- including null terminal byte */ -static nokprobe_inline void -fetch_store_strlen(unsigned long addr, void *dest) +static nokprobe_inline int +fetch_store_strlen(unsigned long addr) { int len; void __user *vaddr = (void __force __user *) addr; len = strnlen_user(vaddr, MAX_STRING_SIZE); - if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */ - *(u32 *)dest = 0; - else - *(u32 *)dest = len; + return (len > MAX_STRING_SIZE) ? 0 : len; } static unsigned long translate_user_vaddr(unsigned long file_offset) @@ -164,10 +159,10 @@ static unsigned long translate_user_vaddr(unsigned long file_offset) /* Note that we don't verify it, since the code does not come from user space */ static int process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, - bool pre) + void *base) { unsigned long val; - int ret; + int ret = 0; /* 1st stage: get value from context */ switch (code->op) { @@ -204,18 +199,22 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, } /* 3rd stage: store value to buffer */ + if (unlikely(!dest)) { + if (code->op == FETCH_OP_ST_STRING) + return fetch_store_strlen(val + code->offset); + else + return -EILSEQ; + } + switch (code->op) { case FETCH_OP_ST_RAW: fetch_store_raw(val, code, dest); break; case FETCH_OP_ST_MEM: - probe_user_read(dest, (void *)val + code->offset, code->size); + probe_kernel_read(dest, (void *)val + code->offset, code->size); break; case FETCH_OP_ST_STRING: - if (pre) - fetch_store_strlen(val + code->offset, dest); - else - fetch_store_string(val + code->offset, dest); + ret = fetch_store_string(val + code->offset, dest, base); break; default: return -EILSEQ; @@ -228,7 +227,7 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, code++; } - return code->op == FETCH_OP_END ? 0 : -EILSEQ; + return code->op == FETCH_OP_END ? ret : -EILSEQ; } NOKPROBE_SYMBOL(process_fetch_insn) @@ -1300,7 +1299,7 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); ucb = uprobe_buffer_get(); - store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); + store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize); if (tu->tp.flags & TP_FLAG_TRACE) ret |= uprobe_trace_func(tu, regs, ucb, dsize); @@ -1335,7 +1334,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); ucb = uprobe_buffer_get(); - store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); + store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize); if (tu->tp.flags & TP_FLAG_TRACE) uretprobe_trace_func(tu, func, regs, ucb, dsize); -- cgit v1.2.3 From 0a46c8549f8c775ed6afac57a8b9fd7c4b4d156f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:19:30 +0900 Subject: tracing: probeevent: Append traceprobe_ for exported function Append traceprobe_ for exported function set_print_fmt() as same as other functions. Link: http://lkml.kernel.org/r/152465877071.26224.11143125027282999726.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 4 ++-- kernel/trace/trace_probe.c | 2 +- kernel/trace/trace_probe.h | 2 +- kernel/trace/trace_uprobe.c | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7e5064f8ab8f..4895ca85ec79 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1371,7 +1371,7 @@ static int register_kprobe_event(struct trace_kprobe *tk) init_trace_event_call(tk, call); - if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) + if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) return -ENOMEM; ret = register_trace_event(&call->event); if (!ret) { @@ -1428,7 +1428,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, init_trace_event_call(tk, &tk->tp.call); - if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) { + if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) { ret = -ENOMEM; goto error; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index d06e67cca3e1..d119bf8c3b4f 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -490,7 +490,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, return pos; } -int set_print_fmt(struct trace_probe *tp, bool is_return) +int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return) { int len; char *print_fmt; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index b6bdd82fa485..c4e9d3d3216d 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -254,7 +254,7 @@ extern void traceprobe_free_probe_arg(struct probe_arg *arg); extern int traceprobe_split_symbol_offset(char *symbol, long *offset); -extern int set_print_fmt(struct trace_probe *tp, bool is_return); +extern int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return); #ifdef CONFIG_PERF_EVENTS extern struct trace_event_call * diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 08ad51c8ebc0..912cb2093944 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1370,7 +1370,7 @@ static int register_uprobe_event(struct trace_uprobe *tu) init_trace_event_call(tu, call); - if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) + if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) return -ENOMEM; ret = register_trace_event(&call->event); @@ -1443,7 +1443,7 @@ create_local_trace_uprobe(char *name, unsigned long offs, tu->filename = kstrdup(name, GFP_KERNEL); init_trace_event_call(tu, &tu->tp.call); - if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) { + if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) { ret = -ENOMEM; goto error; } -- cgit v1.2.3 From 9b960a38835fcaf977f20dcc34ce9e54ff9563bd Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:19:59 +0900 Subject: tracing: probeevent: Unify fetch_insn processing common part Unify the fetch_insn bottom process (from stage 2: dereference indirect data) from kprobe and uprobe events, since those are mostly same. Link: http://lkml.kernel.org/r/152465879965.26224.8547240824606804815.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 47 ++++++----------------------------- kernel/trace/trace_probe_tmpl.h | 55 ++++++++++++++++++++++++++++++++++++++++- kernel/trace/trace_uprobe.c | 43 ++------------------------------ 3 files changed, 63 insertions(+), 82 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 4895ca85ec79..fdd43f2f1fd1 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -899,13 +899,18 @@ fetch_store_string(unsigned long addr, void *dest, void *base) return ret; } +static nokprobe_inline int +probe_mem_read(void *dest, void *src, size_t size) +{ + return probe_kernel_read(dest, src, size); +} + /* Note that we don't verify it, since the code does not come from user space */ static int process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, void *base) { unsigned long val; - int ret = 0; /* 1st stage: get value from context */ switch (code->op) { @@ -932,45 +937,7 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, } code++; - /* 2nd stage: dereference memory if needed */ - while (code->op == FETCH_OP_DEREF) { - ret = probe_kernel_read(&val, (void *)val + code->offset, - sizeof(val)); - if (ret) - return ret; - code++; - } - - /* 3rd stage: store value to buffer */ - if (unlikely(!dest)) { - if (code->op == FETCH_OP_ST_STRING) - return fetch_store_strlen(val + code->offset); - else - return -EILSEQ; - } - - switch (code->op) { - case FETCH_OP_ST_RAW: - fetch_store_raw(val, code, dest); - break; - case FETCH_OP_ST_MEM: - probe_kernel_read(dest, (void *)val + code->offset, code->size); - break; - case FETCH_OP_ST_STRING: - ret = fetch_store_string(val + code->offset, dest, base); - break; - default: - return -EILSEQ; - } - code++; - - /* 4th stage: modify stored value if needed */ - if (code->op == FETCH_OP_MOD_BF) { - fetch_apply_bitfield(code, dest); - code++; - } - - return code->op == FETCH_OP_END ? ret : -EILSEQ; + return process_fetch_insn_bottom(code, val, dest, base); } NOKPROBE_SYMBOL(process_fetch_insn) diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 3b4aba6f84cc..b4075f3e3a29 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -49,13 +49,66 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf) } /* - * This must be defined for each callsite. + * These functions must be defined for each callsite. * Return consumed dynamic data size (>= 0), or error (< 0). * If dest is NULL, don't store result and return required dynamic data size. */ static int process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, void *base); +static nokprobe_inline int fetch_store_strlen(unsigned long addr); +static nokprobe_inline int +fetch_store_string(unsigned long addr, void *dest, void *base); +static nokprobe_inline int +probe_mem_read(void *dest, void *src, size_t size); + +/* From the 2nd stage, routine is same */ +static nokprobe_inline int +process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, + void *dest, void *base) +{ + int ret = 0; + + /* 2nd stage: dereference memory if needed */ + while (code->op == FETCH_OP_DEREF) { + ret = probe_mem_read(&val, (void *)val + code->offset, + sizeof(val)); + if (ret) + return ret; + code++; + } + + /* 3rd stage: store value to buffer */ + if (unlikely(!dest)) { + if (code->op == FETCH_OP_ST_STRING) + return fetch_store_strlen(val + code->offset); + else + return -EILSEQ; + } + + switch (code->op) { + case FETCH_OP_ST_RAW: + fetch_store_raw(val, code, dest); + break; + case FETCH_OP_ST_MEM: + probe_mem_read(dest, (void *)val + code->offset, code->size); + break; + case FETCH_OP_ST_STRING: + ret = fetch_store_string(val + code->offset, dest, base); + break; + default: + return -EILSEQ; + } + code++; + + /* 4th stage: modify stored value if needed */ + if (code->op == FETCH_OP_MOD_BF) { + fetch_apply_bitfield(code, dest); + code++; + } + + return code->op == FETCH_OP_END ? ret : -EILSEQ; +} /* Sum up total data length for dynamic arraies (strings) */ static nokprobe_inline int diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 912cb2093944..7154473ffaa4 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -101,7 +101,7 @@ static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) * Uprobes-specific fetch functions */ static nokprobe_inline int -probe_user_read(void *dest, void *src, size_t size) +probe_mem_read(void *dest, void *src, size_t size) { void __user *vaddr = (void __force __user *)src; @@ -162,7 +162,6 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, void *base) { unsigned long val; - int ret = 0; /* 1st stage: get value from context */ switch (code->op) { @@ -189,45 +188,7 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, } code++; - /* 2nd stage: dereference memory if needed */ - while (code->op == FETCH_OP_DEREF) { - ret = probe_user_read(&val, (void *)val + code->offset, - sizeof(val)); - if (ret) - return ret; - code++; - } - - /* 3rd stage: store value to buffer */ - if (unlikely(!dest)) { - if (code->op == FETCH_OP_ST_STRING) - return fetch_store_strlen(val + code->offset); - else - return -EILSEQ; - } - - switch (code->op) { - case FETCH_OP_ST_RAW: - fetch_store_raw(val, code, dest); - break; - case FETCH_OP_ST_MEM: - probe_kernel_read(dest, (void *)val + code->offset, code->size); - break; - case FETCH_OP_ST_STRING: - ret = fetch_store_string(val + code->offset, dest, base); - break; - default: - return -EILSEQ; - } - code++; - - /* 4th stage: modify stored value if needed */ - if (code->op == FETCH_OP_MOD_BF) { - fetch_apply_bitfield(code, dest); - code++; - } - - return code->op == FETCH_OP_END ? ret : -EILSEQ; + return process_fetch_insn_bottom(code, val, dest, base); } NOKPROBE_SYMBOL(process_fetch_insn) -- cgit v1.2.3 From 60c2e0cebfd01bd1bc5e8843f063264148d6b2bb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:20:28 +0900 Subject: tracing: probeevent: Add symbol type Add "symbol" type to probeevent, which is an alias of u32 or u64 (depends on BITS_PER_LONG). This shows the result value in symbol+offset style. This type is only available with kprobe events. Link: http://lkml.kernel.org/r/152465882860.26224.14779072294412467338.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/kprobetrace.rst | 2 ++ kernel/trace/trace.c | 2 +- kernel/trace/trace_probe.c | 8 ++++++++ kernel/trace/trace_probe.h | 12 +++++++++--- 4 files changed, 20 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst index 8bfc75c90806..6224ddf34508 100644 --- a/Documentation/trace/kprobetrace.rst +++ b/Documentation/trace/kprobetrace.rst @@ -72,6 +72,8 @@ offset, and container-size (usually 32). The syntax is:: b@/ +Symbol type('symbol') is an alias of u32 or u64 type (depends on BITS_PER_LONG) +which shows given pointer in "symbol+offset" style. For $comm, the default type is "string"; any other type is invalid. diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 147be8523560..1e3f28b1fa07 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4626,7 +4626,7 @@ static const char readme_msg[] = "\t args: =fetcharg[:type]\n" "\t fetcharg: %, @
, @[+|-],\n" "\t $stack, $stack, $retval, $comm\n" - "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n" + "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b@/\n" #endif " events/\t\t- Directory containing all trace event subsystems:\n" diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index d119bf8c3b4f..1e7e0618577d 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -46,6 +46,13 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(x16, u16, "0x%x") DEFINE_BASIC_PRINT_TYPE_FUNC(x32, u32, "0x%x") DEFINE_BASIC_PRINT_TYPE_FUNC(x64, u64, "0x%Lx") +int PRINT_TYPE_FUNC_NAME(symbol)(struct trace_seq *s, void *data, void *ent) +{ + trace_seq_printf(s, "%pS", (void *)*(unsigned long *)data); + return !trace_seq_has_overflowed(s); +} +const char PRINT_TYPE_FMT_NAME(symbol)[] = "%pS"; + /* Print type function for string type */ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent) { @@ -79,6 +86,7 @@ static const struct fetch_type probe_fetch_types[] = { ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0), ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0), ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0), + ASSIGN_FETCH_TYPE_ALIAS(symbol, ADDR_FETCH_TYPE, ADDR_FETCH_TYPE, 0), ASSIGN_FETCH_TYPE_END }; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index c4e9d3d3216d..469110e0790b 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -145,6 +145,7 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(x32); DECLARE_BASIC_PRINT_TYPE_FUNC(x64); DECLARE_BASIC_PRINT_TYPE_FUNC(string); +DECLARE_BASIC_PRINT_TYPE_FUNC(symbol); /* Default (unsigned long) fetch type */ #define __DEFAULT_FETCH_TYPE(t) x##t @@ -152,6 +153,10 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(string); #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) +#define __ADDR_FETCH_TYPE(t) u##t +#define _ADDR_FETCH_TYPE(t) __ADDR_FETCH_TYPE(t) +#define ADDR_FETCH_TYPE _ADDR_FETCH_TYPE(BITS_PER_LONG) + #define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ {.name = _name, \ .size = _size, \ @@ -160,13 +165,14 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(string); .fmt = PRINT_TYPE_FMT_NAME(ptype), \ .fmttype = _fmttype, \ } - +#define _ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ + __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, #_fmttype) #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ - __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype) + _ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, ptype) /* If ptype is an alias of atype, use this macro (show atype in format) */ #define ASSIGN_FETCH_TYPE_ALIAS(ptype, atype, ftype, sign) \ - __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #atype) + _ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, atype) #define ASSIGN_FETCH_TYPE_END {} -- cgit v1.2.3 From 40b53b771806b1770837169cd32d1bf167fbccaf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:21:55 +0900 Subject: tracing: probeevent: Add array type support Add array type support for probe events. This allows user to get arraied types from memory address. The array type syntax is TYPE[N] Where TYPE is one of types (u8/16/32/64,s8/16/32/64, x8/16/32/64, symbol, string) and N is a fixed value less than 64. The string array type is a bit different from other types. For other base types, [1] is equal to (e.g. +0(%di):x32[1] is same as +0(%di):x32.) But string[1] is not equal to string. The string type itself represents "char array", but string array type represents "char * array". So, for example, +0(%di):string[1] is equal to +0(+0(%di)):string. Link: http://lkml.kernel.org/r/152465891533.26224.6150658225601339931.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/kprobetrace.rst | 11 +++ kernel/trace/trace.c | 3 +- kernel/trace/trace_probe.c | 130 +++++++++++++++++++++++++++--------- kernel/trace/trace_probe.h | 14 ++++ kernel/trace/trace_probe_tmpl.h | 63 ++++++++++++++--- 5 files changed, 181 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst index 6224ddf34508..2dfed7a1ea6f 100644 --- a/Documentation/trace/kprobetrace.rst +++ b/Documentation/trace/kprobetrace.rst @@ -64,9 +64,20 @@ respectively. 'x' prefix implies it is unsigned. Traced arguments are shown in decimal ('s' and 'u') or hexadecimal ('x'). Without type casting, 'x32' or 'x64' is used depends on the architecture (e.g. x86-32 uses x32, and x86-64 uses x64). +These value types can be an array. To record array data, you can add '[N]' +(where N is a fixed number, less than 64) to the base type. +E.g. 'x16[4]' means an array of x16 (2bytes hex) with 4 elements. +Note that the array can be applied to memory type fetchargs, you can not +apply it to registers/stack-entries etc. (for example, '$stack1:x8[8]' is +wrong, but '+8($stack):x8[8]' is OK.) String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. +The string array type is a bit different from other types. For other base +types, [1] is equal to (e.g. +0(%di):x32[1] is same +as +0(%di):x32.) But string[1] is not equal to string. The string type itself +represents "char array", but string array type represents "char * array". +So, for example, +0(%di):string[1] is equal to +0(+0(%di)):string. Bitfield is another special type, which takes 3 parameters, bit-width, bit- offset, and container-size (usually 32). The syntax is:: diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1e3f28b1fa07..e7f99f513959 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4627,7 +4627,8 @@ static const char readme_msg[] = "\t fetcharg: %, @
, @[+|-],\n" "\t $stack, $stack, $retval, $comm\n" "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" - "\t b@/\n" + "\t b@/,\n" + "\t \\[\\]\n" #endif " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 1e7e0618577d..dfd096031305 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -341,9 +341,9 @@ static int __parse_bitfield_probe_arg(const char *bf, int traceprobe_parse_probe_arg(char *arg, ssize_t *size, struct probe_arg *parg, bool is_return, bool is_kprobe) { - struct fetch_insn *code, *tmp = NULL; - const char *t; - int ret; + struct fetch_insn *code, *scode, *tmp = NULL; + char *t, *t2; + int ret, len; if (strlen(arg) > MAX_ARGSTR_LEN) { pr_info("Argument is too long.: %s\n", arg); @@ -354,24 +354,42 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, pr_info("Failed to allocate memory for command '%s'.\n", arg); return -ENOMEM; } - t = strchr(parg->comm, ':'); + t = strchr(arg, ':'); if (t) { - arg[t - parg->comm] = '\0'; - t++; + *t = '\0'; + t2 = strchr(++t, '['); + if (t2) { + *t2 = '\0'; + parg->count = simple_strtoul(t2 + 1, &t2, 0); + if (strcmp(t2, "]") || parg->count == 0) + return -EINVAL; + if (parg->count > MAX_ARRAY_LEN) + return -E2BIG; + } } /* * The default type of $comm should be "string", and it can't be * dereferenced. */ if (!t && strcmp(arg, "$comm") == 0) - t = "string"; - parg->type = find_fetch_type(t); + parg->type = find_fetch_type("string"); + else + parg->type = find_fetch_type(t); if (!parg->type) { pr_info("Unsupported type: %s\n", t); return -EINVAL; } parg->offset = *size; - *size += parg->type->size; + *size += parg->type->size * (parg->count ?: 1); + + if (parg->count) { + len = strlen(parg->type->fmttype) + 6; + parg->fmt = kmalloc(len, GFP_KERNEL); + if (!parg->fmt) + return -ENOMEM; + snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype, + parg->count); + } code = tmp = kzalloc(sizeof(*code) * FETCH_INSN_MAX, GFP_KERNEL); if (!code) @@ -391,10 +409,20 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, ret = -EINVAL; goto fail; } - /* Since IMM or COMM must be the 1st insn, this is safe */ - if (code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) + if (code->op != FETCH_OP_DEREF || parg->count) { + /* + * IMM and COMM is pointing actual address, those must + * be kept, and if parg->count != 0, this is an array + * of string pointers instead of string address itself. + */ code++; + if (code->op != FETCH_OP_NOP) { + ret = -E2BIG; + goto fail; + } + } code->op = FETCH_OP_ST_STRING; /* In DEREF case, replace it */ + code->size = parg->type->size; parg->dynamic = true; } else if (code->op == FETCH_OP_DEREF) { code->op = FETCH_OP_ST_MEM; @@ -408,12 +436,29 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, code->op = FETCH_OP_ST_RAW; code->size = parg->type->size; } + scode = code; /* Modify operation */ if (t != NULL) { ret = __parse_bitfield_probe_arg(t, parg->type, &code); if (ret) goto fail; } + /* Loop(Array) operation */ + if (parg->count) { + if (scode->op != FETCH_OP_ST_MEM && + scode->op != FETCH_OP_ST_STRING) { + pr_info("array only accepts memory or address\n"); + ret = -EINVAL; + goto fail; + } + code++; + if (code->op != FETCH_OP_NOP) { + ret = -E2BIG; + goto fail; + } + code->op = FETCH_OP_LP_ARRAY; + code->param = parg->count; + } code++; code->op = FETCH_OP_END; @@ -452,14 +497,17 @@ void traceprobe_free_probe_arg(struct probe_arg *arg) kfree(arg->code); kfree(arg->name); kfree(arg->comm); + kfree(arg->fmt); } +/* When len=0, we just calculate the needed length */ +#define LEN_OR_ZERO (len ? len - pos : 0) static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, bool is_return) { - int i; + struct probe_arg *parg; + int i, j; int pos = 0; - const char *fmt, *arg; if (!is_return) { @@ -470,33 +518,49 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; } - /* When len=0, we just calculate the needed length */ -#define LEN_OR_ZERO (len ? len - pos : 0) - pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt); for (i = 0; i < tp->nr_args; i++) { - pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s", - tp->args[i].name, tp->args[i].type->fmt); + parg = tp->args + i; + pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=", parg->name); + if (parg->count) { + pos += snprintf(buf + pos, LEN_OR_ZERO, "{%s", + parg->type->fmt); + for (j = 1; j < parg->count; j++) + pos += snprintf(buf + pos, LEN_OR_ZERO, ",%s", + parg->type->fmt); + pos += snprintf(buf + pos, LEN_OR_ZERO, "}"); + } else + pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", + parg->type->fmt); } pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); for (i = 0; i < tp->nr_args; i++) { - if (strcmp(tp->args[i].type->name, "string") == 0) + parg = tp->args + i; + if (parg->count) { + if (strcmp(parg->type->name, "string") == 0) + fmt = ", __get_str(%s[%d])"; + else + fmt = ", REC->%s[%d]"; + for (j = 0; j < parg->count; j++) + pos += snprintf(buf + pos, LEN_OR_ZERO, + fmt, parg->name, j); + } else { + if (strcmp(parg->type->name, "string") == 0) + fmt = ", __get_str(%s)"; + else + fmt = ", REC->%s"; pos += snprintf(buf + pos, LEN_OR_ZERO, - ", __get_str(%s)", - tp->args[i].name); - else - pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", - tp->args[i].name); + fmt, parg->name); + } } -#undef LEN_OR_ZERO - /* return the length of print_fmt */ return pos; } +#undef LEN_OR_ZERO int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return) { @@ -524,11 +588,15 @@ int traceprobe_define_arg_fields(struct trace_event_call *event_call, /* Set argument names as fields */ for (i = 0; i < tp->nr_args; i++) { struct probe_arg *parg = &tp->args[i]; - - ret = trace_define_field(event_call, parg->type->fmttype, - parg->name, - offset + parg->offset, - parg->type->size, + const char *fmt = parg->type->fmttype; + int size = parg->type->size; + + if (parg->fmt) + fmt = parg->fmt; + if (parg->count) + size *= parg->count; + ret = trace_define_field(event_call, fmt, parg->name, + offset + parg->offset, size, parg->type->is_signed, FILTER_OTHER); if (ret) diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 469110e0790b..1f456fd82483 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -30,6 +30,7 @@ #define MAX_TRACE_ARGS 128 #define MAX_ARGSTR_LEN 63 +#define MAX_ARRAY_LEN 64 #define MAX_STRING_SIZE PATH_MAX /* Reserved field names */ @@ -65,6 +66,14 @@ static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) return (u8 *)ent + get_loc_offs(*dl); } +static nokprobe_inline u32 update_data_loc(u32 loc, int consumed) +{ + u32 maxlen = get_loc_len(loc); + u32 offset = get_loc_offs(loc); + + return make_data_loc(maxlen - consumed, offset + consumed); +} + /* Printing function type */ typedef int (*print_type_func_t)(struct trace_seq *, void *, void *); @@ -86,6 +95,8 @@ enum fetch_op { FETCH_OP_ST_STRING, /* String: .offset, .size */ // Stage 4 (modify) op FETCH_OP_MOD_BF, /* Bitfield: .basesize, .lshift, .rshift */ + // Stage 5 (loop) op + FETCH_OP_LP_ARRAY, /* Array: .param = loop count */ FETCH_OP_END, }; @@ -175,6 +186,7 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol); _ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, atype) #define ASSIGN_FETCH_TYPE_END {} +#define MAX_ARRAY_LEN 64 #ifdef CONFIG_KPROBE_EVENTS bool trace_kprobe_on_func_entry(struct trace_event_call *call); @@ -195,8 +207,10 @@ struct probe_arg { struct fetch_insn *code; bool dynamic;/* Dynamic array (string) is used */ unsigned int offset; /* Offset from argument entry */ + unsigned int count; /* Array count */ const char *name; /* Name of this argument */ const char *comm; /* Command of this argument */ + char *fmt; /* Format string if needed */ const struct fetch_type *type; /* Type of this argument */ }; diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index b4075f3e3a29..5c56afc17cf8 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -67,10 +67,15 @@ static nokprobe_inline int process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, void *dest, void *base) { - int ret = 0; + struct fetch_insn *s3 = NULL; + int total = 0, ret = 0, i = 0; + u32 loc = 0; + unsigned long lval = val; +stage2: /* 2nd stage: dereference memory if needed */ while (code->op == FETCH_OP_DEREF) { + lval = val; ret = probe_mem_read(&val, (void *)val + code->offset, sizeof(val)); if (ret) @@ -78,11 +83,15 @@ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, code++; } + s3 = code; +stage3: /* 3rd stage: store value to buffer */ if (unlikely(!dest)) { - if (code->op == FETCH_OP_ST_STRING) - return fetch_store_strlen(val + code->offset); - else + if (code->op == FETCH_OP_ST_STRING) { + ret += fetch_store_strlen(val + code->offset); + code++; + goto array; + } else return -EILSEQ; } @@ -94,6 +103,7 @@ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, probe_mem_read(dest, (void *)val + code->offset, code->size); break; case FETCH_OP_ST_STRING: + loc = *(u32 *)dest; ret = fetch_store_string(val + code->offset, dest, base); break; default: @@ -107,6 +117,29 @@ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, code++; } +array: + /* the last stage: Loop on array */ + if (code->op == FETCH_OP_LP_ARRAY) { + total += ret; + if (++i < code->param) { + code = s3; + if (s3->op != FETCH_OP_ST_STRING) { + dest += s3->size; + val += s3->size; + goto stage3; + } + code--; + val = lval + sizeof(char *); + if (dest) { + dest += sizeof(u32); + *(u32 *)dest = update_data_loc(loc, ret); + } + goto stage2; + } + code++; + ret = total; + } + return code->op == FETCH_OP_END ? ret : -EILSEQ; } @@ -158,12 +191,26 @@ static inline int print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args, u8 *data, void *field) { - int i; + void *p; + int i, j; for (i = 0; i < nr_args; i++) { - trace_seq_printf(s, " %s=", args[i].name); - if (!args[i].type->print(s, data + args[i].offset, field)) - return -ENOMEM; + struct probe_arg *a = args + i; + + trace_seq_printf(s, " %s=", a->name); + if (likely(!a->count)) { + if (!a->type->print(s, data + a->offset, field)) + return -ENOMEM; + continue; + } + trace_seq_putc(s, '{'); + p = data + a->offset; + for (j = 0; j < a->count; j++) { + if (!a->type->print(s, p, field)) + return -ENOMEM; + trace_seq_putc(s, j == a->count - 1 ? '}' : ','); + p += a->type->size; + } } return 0; } -- cgit v1.2.3 From a1303af5d79eb13a658633a9fb0ce3aed0f7decf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 25 Apr 2018 21:21:26 +0900 Subject: tracing: probeevent: Add $argN for accessing function args Add $argN special fetch variable for accessing function arguments. This allows user to trace the Nth argument easily at the function entry. Note that this returns most probably assignment of registers and stacks. In some case, it may not work well. If you need to access correct registers or stacks you should use perf-probe. Link: http://lkml.kernel.org/r/152465888632.26224.3412465701570253696.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- Documentation/trace/kprobetrace.rst | 10 ++++++---- kernel/trace/trace.c | 4 ++++ kernel/trace/trace_kprobe.c | 18 +++++++++++++----- kernel/trace/trace_probe.c | 36 +++++++++++++++++++++++------------- kernel/trace/trace_probe.h | 9 ++++++++- kernel/trace/trace_uprobe.c | 2 +- 6 files changed, 55 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst index 2dfed7a1ea6f..47e765c2f2c3 100644 --- a/Documentation/trace/kprobetrace.rst +++ b/Documentation/trace/kprobetrace.rst @@ -45,16 +45,18 @@ Synopsis of kprobe_events @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) $stackN : Fetch Nth entry of stack (N >= 0) $stack : Fetch stack address. - $retval : Fetch return value.(*) + $argN : Fetch the Nth function argument. (N >= 1) (\*1) + $retval : Fetch return value.(\*2) $comm : Fetch current task comm. - +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**) + +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(\*3) NAME=FETCHARG : Set NAME as the argument name of FETCHARG. FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types (x8/x16/x32/x64), "string" and bitfield are supported. - (*) only for return probe. - (**) this is useful for fetching a field of data structures. + (\*1) only for the probe on function entry (offs == 0). + (\*2) only for return probe. + (\*3) this is useful for fetching a field of data structures. Types ----- diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e7f99f513959..ec5b21778806 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4625,7 +4625,11 @@ static const char readme_msg[] = #endif "\t args: =fetcharg[:type]\n" "\t fetcharg: %, @
, @[+|-],\n" +#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API + "\t $stack, $stack, $retval, $comm, $arg\n" +#else "\t $stack, $stack, $retval, $comm\n" +#endif "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b@/,\n" "\t \\[\\]\n" diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index fdd43f2f1fd1..3faaadbddf54 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -533,13 +533,15 @@ static int create_trace_kprobe(int argc, char **argv) long offset = 0; void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; + unsigned int flags = TPARG_FL_KERNEL; /* argc must be >= 1 */ if (argv[0][0] == 'p') is_return = false; - else if (argv[0][0] == 'r') + else if (argv[0][0] == 'r') { is_return = true; - else if (argv[0][0] == '-') + flags |= TPARG_FL_RETURN; + } else if (argv[0][0] == '-') is_delete = true; else { pr_info("Probe definition must be started with 'p', 'r' or" @@ -625,8 +627,9 @@ static int create_trace_kprobe(int argc, char **argv) pr_info("Failed to parse either an address or a symbol.\n"); return ret; } - if (offset && is_return && - !kprobe_on_func_entry(NULL, symbol, offset)) { + if (kprobe_on_func_entry(NULL, symbol, offset)) + flags |= TPARG_FL_FENTRY; + if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { pr_info("Given offset is not valid for return probe.\n"); return -EINVAL; } @@ -696,7 +699,7 @@ static int create_trace_kprobe(int argc, char **argv) /* Parse fetch argument */ ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, - is_return, true); + flags); if (ret) { pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; @@ -932,6 +935,11 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, case FETCH_OP_COMM: val = (unsigned long)current->comm; break; +#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API + case FETCH_OP_ARG: + val = regs_get_kernel_argument(regs, code->param); + break; +#endif default: return -EILSEQ; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index dfd096031305..333cda6d2633 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -157,14 +157,13 @@ int traceprobe_split_symbol_offset(char *symbol, long *offset) #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) static int parse_probe_vars(char *arg, const struct fetch_type *t, - struct fetch_insn *code, bool is_return, - bool is_kprobe) + struct fetch_insn *code, unsigned int flags) { int ret = 0; unsigned long param; if (strcmp(arg, "retval") == 0) { - if (is_return) + if (flags & TPARG_FL_RETURN) code->op = FETCH_OP_RETVAL; else ret = -EINVAL; @@ -173,7 +172,8 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, code->op = FETCH_OP_STACKP; } else if (isdigit(arg[5])) { ret = kstrtoul(arg + 5, 10, ¶m); - if (ret || (is_kprobe && param > PARAM_MAX_STACK)) + if (ret || ((flags & TPARG_FL_KERNEL) && + param > PARAM_MAX_STACK)) ret = -EINVAL; else { code->op = FETCH_OP_STACK; @@ -183,6 +183,18 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, ret = -EINVAL; } else if (strcmp(arg, "comm") == 0) { code->op = FETCH_OP_COMM; +#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API + } else if (((flags & TPARG_FL_MASK) == + (TPARG_FL_KERNEL | TPARG_FL_FENTRY)) && + strncmp(arg, "arg", 3) == 0) { + if (!isdigit(arg[3])) + return -EINVAL; + ret = kstrtoul(arg + 3, 10, ¶m); + if (ret || !param || param > PARAM_MAX_STACK) + return -EINVAL; + code->op = FETCH_OP_ARG; + code->param = (unsigned int)param - 1; +#endif } else ret = -EINVAL; @@ -193,7 +205,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, static int parse_probe_arg(char *arg, const struct fetch_type *type, struct fetch_insn **pcode, struct fetch_insn *end, - bool is_return, bool is_kprobe) + unsigned int flags) { struct fetch_insn *code = *pcode; unsigned long param; @@ -203,8 +215,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type, switch (arg[0]) { case '$': - ret = parse_probe_vars(arg + 1, type, code, - is_return, is_kprobe); + ret = parse_probe_vars(arg + 1, type, code, flags); break; case '%': /* named register */ @@ -226,7 +237,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type, code->immediate = param; } else if (arg[1] == '+') { /* kprobes don't support file offsets */ - if (is_kprobe) + if (flags & TPARG_FL_KERNEL) return -EINVAL; ret = kstrtol(arg + 2, 0, &offset); @@ -237,7 +248,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type, code->immediate = (unsigned long)offset; // imm64? } else { /* uprobes don't support symbols */ - if (!is_kprobe) + if (!(flags & TPARG_FL_KERNEL)) return -EINVAL; ret = traceprobe_split_symbol_offset(arg + 1, &offset); @@ -278,8 +289,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type, const struct fetch_type *t2 = find_fetch_type(NULL); *tmp = '\0'; - ret = parse_probe_arg(arg, t2, &code, end, is_return, - is_kprobe); + ret = parse_probe_arg(arg, t2, &code, end, flags); if (ret) break; if (code->op == FETCH_OP_COMM) @@ -339,7 +349,7 @@ static int __parse_bitfield_probe_arg(const char *bf, /* String length checking wrapper */ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, - struct probe_arg *parg, bool is_return, bool is_kprobe) + struct probe_arg *parg, unsigned int flags) { struct fetch_insn *code, *scode, *tmp = NULL; char *t, *t2; @@ -397,7 +407,7 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1], - is_return, is_kprobe); + flags); if (ret) goto fail; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 1f456fd82483..09f62171cc23 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "trace.h" @@ -86,6 +87,7 @@ enum fetch_op { FETCH_OP_RETVAL, /* Return value */ FETCH_OP_IMM, /* Immediate : .immediate */ FETCH_OP_COMM, /* Current comm */ + FETCH_OP_ARG, /* Function argument : .param */ FETCH_OP_FOFFS, /* File offset: .immediate */ // Stage 2 (dereference) op FETCH_OP_DEREF, /* Dereference: .offset */ @@ -263,8 +265,13 @@ find_event_file_link(struct trace_probe *tp, struct trace_event_file *file) return NULL; } +#define TPARG_FL_RETURN BIT(0) +#define TPARG_FL_KERNEL BIT(1) +#define TPARG_FL_FENTRY BIT(2) +#define TPARG_FL_MASK GENMASK(2, 0) + extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, - struct probe_arg *parg, bool is_return, bool is_kprobe); + struct probe_arg *parg, unsigned int flags); extern int traceprobe_conflict_field_name(const char *name, struct probe_arg *args, int narg); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 7154473ffaa4..394b93572506 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -557,7 +557,7 @@ static int create_trace_uprobe(int argc, char **argv) /* Parse fetch argument */ ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, - is_return, false); + is_return ? TPARG_FL_RETURN : 0); if (ret) { pr_info("Parse error at argument[%d]. (%d)\n", i, ret); goto error; -- cgit v1.2.3 From f3f58935edbcb33fd529fc46d554162a0660fd2d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Aug 2018 01:17:47 +0900 Subject: tracing/uprobes: Fix to return -EFAULT if copy_from_user failed Fix probe_mem_read() to return -EFAULT if copy_from_user() failed. The copy_from_user() returns remaining bytes when it failed, but probe_mem_read() caller expects it returns error code like as probe_kernel_read(). Link: http://lkml.kernel.org/r/153547306719.26502.8353484532699160223.stgit@devbox Reported-by: Dan Carpenter Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_uprobe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 394b93572506..31ea48eceda1 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -105,7 +105,7 @@ probe_mem_read(void *dest, void *src, size_t size) { void __user *vaddr = (void __force __user *)src; - return copy_from_user(dest, vaddr, size); + return copy_from_user(dest, vaddr, size) ? -EFAULT : 0; } /* * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max -- cgit v1.2.3 From 59158ec4aef7d44be51a6f3e7e17fc64c32604eb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Aug 2018 01:18:15 +0900 Subject: tracing/kprobes: Check the probe on unloaded module correctly Current kprobe event doesn't checks correctly whether the given event is on unloaded module or not. It just checks the event has ":" in the name. That is not enough because if we define a probe on non-exist symbol on loaded module, it allows to define that (with warning message) To ensure it correctly, this searches the module name on loaded module list and only if there is not, it allows to define it. (this event will be available when the target module is loaded) Link: http://lkml.kernel.org/r/153547309528.26502.8300278470528281328.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3faaadbddf54..4727a13824f0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -62,9 +62,23 @@ static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, return strncmp(mod->name, name, len) == 0 && name[len] == ':'; } -static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk) +static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk) { - return !!strchr(trace_kprobe_symbol(tk), ':'); + char *p; + bool ret; + + if (!tk->symbol) + return false; + p = strchr(tk->symbol, ':'); + if (!p) + return true; + *p = '\0'; + mutex_lock(&module_mutex); + ret = !!find_module(tk->symbol); + mutex_unlock(&module_mutex); + *p = ':'; + + return ret; } static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) @@ -374,19 +388,13 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) else ret = register_kprobe(&tk->rp.kp); - if (ret == 0) + if (ret == 0) { tk->tp.flags |= TP_FLAG_REGISTERED; - else { - if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { - pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); - ret = 0; - } else if (ret == -EILSEQ) { - pr_warn("Probing address(0x%p) is not an instruction boundary.\n", - tk->rp.kp.addr); - ret = -EINVAL; - } + } else if (ret == -EILSEQ) { + pr_warn("Probing address(0x%p) is not an instruction boundary.\n", + tk->rp.kp.addr); + ret = -EINVAL; } - return ret; } @@ -449,6 +457,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk) /* Register k*probe */ ret = __register_trace_kprobe(tk); + if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) { + pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); + ret = 0; + } + if (ret < 0) unregister_kprobe_event(tk); else -- cgit v1.2.3 From a6682814f37124ec1e708cca8f44968445fa9dd7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Aug 2018 01:18:43 +0900 Subject: tracing/kprobes: Allow kprobe-events to record module symbol Allow kprobe-events to record module symbols. Since data symbols in a non-loaded module doesn't exist, it fails to define such symbol as an argument of kprobe-event. But if the kprobe event is defined on that module, we can defer to resolve the symbol address. Note that if given symbol is not found, the event is kept unavailable. User can enable it but the event is not recorded. Link: http://lkml.kernel.org/r/153547312336.26502.11432902826345374463.stgit@devbox Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_kprobe.c | 12 ++++++++- kernel/trace/trace_probe.c | 62 +++++++++++++++++++++++++++++++++++++++------ kernel/trace/trace_probe.h | 4 ++- 3 files changed, 68 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 4727a13824f0..fec67188c4d2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -366,7 +366,7 @@ static bool within_notrace_func(struct trace_kprobe *tk) /* Internal register function - just handle k*probes and flags */ static int __register_trace_kprobe(struct trace_kprobe *tk) { - int ret; + int i, ret; if (trace_probe_is_registered(&tk->tp)) return -EINVAL; @@ -377,6 +377,12 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) return -EINVAL; } + for (i = 0; i < tk->tp.nr_args; i++) { + ret = traceprobe_update_arg(&tk->tp.args[i]); + if (ret) + return ret; + } + /* Set/clear disabled flag according to tp->flag */ if (trace_probe_is_enabled(&tk->tp)) tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; @@ -928,6 +934,7 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, { unsigned long val; +retry: /* 1st stage: get value from context */ switch (code->op) { case FETCH_OP_REG: @@ -953,6 +960,9 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, val = regs_get_kernel_argument(regs, code->param); break; #endif + case FETCH_NOP_SYMBOL: /* Ignore a place holder */ + code++; + goto retry; default: return -EILSEQ; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 333cda6d2633..5b3d573b3dcf 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -251,16 +251,16 @@ parse_probe_arg(char *arg, const struct fetch_type *type, if (!(flags & TPARG_FL_KERNEL)) return -EINVAL; - ret = traceprobe_split_symbol_offset(arg + 1, &offset); - if (ret) - break; + /* Preserve symbol for updating */ + code->op = FETCH_NOP_SYMBOL; + code->data = kstrdup(arg + 1, GFP_KERNEL); + if (!code->data) + return -ENOMEM; + if (++code == end) + return -E2BIG; code->op = FETCH_OP_IMM; - code->immediate = - (unsigned long)kallsyms_lookup_name(arg + 1); - if (!code->immediate) - return -ENOENT; - code->immediate += offset; + code->immediate = 0; } /* These are fetching from memory */ if (++code == end) @@ -480,6 +480,11 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, memcpy(parg->code, tmp, sizeof(*code) * (code - tmp + 1)); fail: + if (ret) { + for (code = tmp; code < tmp + FETCH_INSN_MAX; code++) + if (code->op == FETCH_NOP_SYMBOL) + kfree(code->data); + } kfree(tmp); return ret; @@ -504,12 +509,53 @@ int traceprobe_conflict_field_name(const char *name, void traceprobe_free_probe_arg(struct probe_arg *arg) { + struct fetch_insn *code = arg->code; + + while (code && code->op != FETCH_OP_END) { + if (code->op == FETCH_NOP_SYMBOL) + kfree(code->data); + code++; + } kfree(arg->code); kfree(arg->name); kfree(arg->comm); kfree(arg->fmt); } +int traceprobe_update_arg(struct probe_arg *arg) +{ + struct fetch_insn *code = arg->code; + long offset; + char *tmp; + char c; + int ret = 0; + + while (code && code->op != FETCH_OP_END) { + if (code->op == FETCH_NOP_SYMBOL) { + if (code[1].op != FETCH_OP_IMM) + return -EINVAL; + + tmp = strpbrk("+-", code->data); + if (tmp) + c = *tmp; + ret = traceprobe_split_symbol_offset(code->data, + &offset); + if (ret) + return ret; + + code[1].immediate = + (unsigned long)kallsyms_lookup_name(code->data); + if (tmp) + *tmp = c; + if (!code[1].immediate) + return -ENOENT; + code[1].immediate += offset; + } + code++; + } + return 0; +} + /* When len=0, we just calculate the needed length */ #define LEN_OR_ZERO (len ? len - pos : 0) static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 09f62171cc23..974afc1a3e73 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -100,6 +100,7 @@ enum fetch_op { // Stage 5 (loop) op FETCH_OP_LP_ARRAY, /* Array: .param = loop count */ FETCH_OP_END, + FETCH_NOP_SYMBOL, /* Unresolved Symbol holder */ }; struct fetch_insn { @@ -116,6 +117,7 @@ struct fetch_insn { unsigned char rshift; }; unsigned long immediate; + void *data; }; }; @@ -276,7 +278,7 @@ extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, extern int traceprobe_conflict_field_name(const char *name, struct probe_arg *args, int narg); -extern void traceprobe_update_arg(struct probe_arg *arg); +extern int traceprobe_update_arg(struct probe_arg *arg); extern void traceprobe_free_probe_arg(struct probe_arg *arg); extern int traceprobe_split_symbol_offset(char *symbol, long *offset); -- cgit v1.2.3 From cee271678d0e3177a25d0fcb2fa5e051d48e4262 Mon Sep 17 00:00:00 2001 From: Björn Töpel Date: Mon, 8 Oct 2018 19:40:16 +0200 Subject: xsk: do not call synchronize_net() under RCU read lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The XSKMAP update and delete functions called synchronize_net(), which can sleep. It is not allowed to sleep during an RCU read section. Instead we need to make sure that the sock sk_destruct (xsk_destruct) function is asynchronously called after an RCU grace period. Setting the SOCK_RCU_FREE flag for XDP sockets takes care of this. Fixes: fbfc504a24f5 ("bpf: introduce new bpf AF_XDP map type BPF_MAP_TYPE_XSKMAP") Reported-by: Eric Dumazet Signed-off-by: Björn Töpel Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/xskmap.c | 10 ++-------- net/xdp/xsk.c | 2 ++ 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index 9f8463afda9c..47147c9e184d 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value, sock_hold(sock->sk); old_xs = xchg(&m->xsk_map[i], xs); - if (old_xs) { - /* Make sure we've flushed everything. */ - synchronize_net(); + if (old_xs) sock_put((struct sock *)old_xs); - } sockfd_put(sock); return 0; @@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key) return -EINVAL; old_xs = xchg(&m->xsk_map[k], NULL); - if (old_xs) { - /* Make sure we've flushed everything. */ - synchronize_net(); + if (old_xs) sock_put((struct sock *)old_xs); - } return 0; } diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 4e937cd7c17d..661504042d30 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -744,6 +744,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, sk->sk_destruct = xsk_destruct; sk_refcnt_debug_inc(sk); + sock_set_flag(sk, SOCK_RCU_FREE); + xs = xdp_sk(sk); mutex_init(&xs->mutex); spin_lock_init(&xs->tx_completion_lock); -- cgit v1.2.3 From baa9be4ffb55876923dc9716abc0a448e510ba30 Mon Sep 17 00:00:00 2001 From: Phil Auld Date: Mon, 8 Oct 2018 10:36:40 -0400 Subject: sched/fair: Fix throttle_list starvation with low CFS quota With a very low cpu.cfs_quota_us setting, such as the minimum of 1000, distribute_cfs_runtime may not empty the throttled_list before it runs out of runtime to distribute. In that case, due to the change from c06f04c7048 to put throttled entries at the head of the list, later entries on the list will starve. Essentially, the same X processes will get pulled off the list, given CPU time and then, when expired, get put back on the head of the list where distribute_cfs_runtime will give runtime to the same set of processes leaving the rest. Fix the issue by setting a bit in struct cfs_bandwidth when distribute_cfs_runtime is running, so that the code in throttle_cfs_rq can decide to put the throttled entry on the tail or the head of the list. The bit is set/cleared by the callers of distribute_cfs_runtime while they hold cfs_bandwidth->lock. This is easy to reproduce with a handful of CPU consumers. I use 'crash' on the live system. In some cases you can simply look at the throttled list and see the later entries are not changing: crash> list cfs_rq.throttled_list -H 0xffff90b54f6ade40 -s cfs_rq.runtime_remaining | paste - - | awk '{print $1" "$4}' | pr -t -n3 1 ffff90b56cb2d200 -976050 2 ffff90b56cb2cc00 -484925 3 ffff90b56cb2bc00 -658814 4 ffff90b56cb2ba00 -275365 5 ffff90b166a45600 -135138 6 ffff90b56cb2da00 -282505 7 ffff90b56cb2e000 -148065 8 ffff90b56cb2fa00 -872591 9 ffff90b56cb2c000 -84687 10 ffff90b56cb2f000 -87237 11 ffff90b166a40a00 -164582 crash> list cfs_rq.throttled_list -H 0xffff90b54f6ade40 -s cfs_rq.runtime_remaining | paste - - | awk '{print $1" "$4}' | pr -t -n3 1 ffff90b56cb2d200 -994147 2 ffff90b56cb2cc00 -306051 3 ffff90b56cb2bc00 -961321 4 ffff90b56cb2ba00 -24490 5 ffff90b166a45600 -135138 6 ffff90b56cb2da00 -282505 7 ffff90b56cb2e000 -148065 8 ffff90b56cb2fa00 -872591 9 ffff90b56cb2c000 -84687 10 ffff90b56cb2f000 -87237 11 ffff90b166a40a00 -164582 Sometimes it is easier to see by finding a process getting starved and looking at the sched_info: crash> task ffff8eb765994500 sched_info PID: 7800 TASK: ffff8eb765994500 CPU: 16 COMMAND: "cputest" sched_info = { pcount = 8, run_delay = 697094208, last_arrival = 240260125039, last_queued = 240260327513 }, crash> task ffff8eb765994500 sched_info PID: 7800 TASK: ffff8eb765994500 CPU: 16 COMMAND: "cputest" sched_info = { pcount = 8, run_delay = 697094208, last_arrival = 240260125039, last_queued = 240260327513 }, Signed-off-by: Phil Auld Reviewed-by: Ben Segall Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org Fixes: c06f04c70489 ("sched: Fix potential near-infinite distribute_cfs_runtime() loop") Link: http://lkml.kernel.org/r/20181008143639.GA4019@pauld.bos.csb Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 22 +++++++++++++++++++--- kernel/sched/sched.h | 2 ++ 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7fc4a371bdd2..f88e00705b55 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4476,9 +4476,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) /* * Add to the _head_ of the list, so that an already-started - * distribute_cfs_runtime will not see us + * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is + * not running add to the tail so that later runqueues don't get starved. */ - list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); + if (cfs_b->distribute_running) + list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); + else + list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); /* * If we're the first throttled task, make sure the bandwidth @@ -4622,14 +4626,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) * in us over-using our runtime if it is all used during this loop, but * only by limited amounts in that extreme case. */ - while (throttled && cfs_b->runtime > 0) { + while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { runtime = cfs_b->runtime; + cfs_b->distribute_running = 1; raw_spin_unlock(&cfs_b->lock); /* we can't nest cfs_b->lock while distributing bandwidth */ runtime = distribute_cfs_runtime(cfs_b, runtime, runtime_expires); raw_spin_lock(&cfs_b->lock); + cfs_b->distribute_running = 0; throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->runtime -= min(runtime, cfs_b->runtime); @@ -4740,6 +4746,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) /* confirm we're still not at a refresh boundary */ raw_spin_lock(&cfs_b->lock); + if (cfs_b->distribute_running) { + raw_spin_unlock(&cfs_b->lock); + return; + } + if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { raw_spin_unlock(&cfs_b->lock); return; @@ -4749,6 +4760,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) runtime = cfs_b->runtime; expires = cfs_b->runtime_expires; + if (runtime) + cfs_b->distribute_running = 1; + raw_spin_unlock(&cfs_b->lock); if (!runtime) @@ -4759,6 +4773,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) raw_spin_lock(&cfs_b->lock); if (expires == cfs_b->runtime_expires) cfs_b->runtime -= min(runtime, cfs_b->runtime); + cfs_b->distribute_running = 0; raw_spin_unlock(&cfs_b->lock); } @@ -4867,6 +4882,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->period_timer.function = sched_cfs_period_timer; hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cfs_b->slack_timer.function = sched_cfs_slack_timer; + cfs_b->distribute_running = 0; } static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 455fa330de04..9683f458aec7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -346,6 +346,8 @@ struct cfs_bandwidth { int nr_periods; int nr_throttled; u64 throttled_time; + + bool distribute_running; #endif }; -- cgit v1.2.3 From 9627808d2d409279cea3fb334212d04a83ff6371 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 2 Oct 2018 11:38:34 +0900 Subject: printk: keep kernel cont support always enabled Since commit 5c2992ee7fd8a29 ("printk: remove console flushing special cases for partial buffered lines") we don't print cont fragments to the consoles; cont lines are now proper log_buf entries and there is no "consecutive continuation flag" anymore: we either have 'c' entries that mark continuation lines without fragments; or '-' entries that mark normal logbuf entries. There are no '+' entries anymore. However, we still have a small leftover - presence of ext_console drivers disables kernel cont support and we flush each pr_cont() and store it as a separate log_buf entry. Previously, it worked because msg_print_ext_header() had that "an optional external merge of the records" functionality: if (msg->flags & LOG_CONT) cont = (prev_flags & LOG_CONT) ? '+' : 'c'; We don't do this as of now, so keep kernel cont always enabled. Note from pmladek: The original purpose was to get full information including the metadata and dictionary via extended console drivers, see commit 6fe29354befe4c46e ("printk: implement support for extended console drivers"). The dictionary probably was the most important part but it was actually lost: static void cont_flush(void) { [...] log_store(cont.facility, cont.level, cont.flags, cont.ts_nsec, NULL, 0, cont.buf, cont.len); Nobody noticed because the only dictionary user is dev_printk() and dev_cont() is _not_ defined. Link: http://lkml.kernel.org/r/20181002023836.4487-2-sergey.senozhatsky@gmail.com To: Steven Rostedt Cc: Andrew Morton Cc: Dmitriy Vyukov Cc: Tetsuo Handa Cc: Tejun Heo Cc: Peter Zijlstra Cc: LKML Cc: Sergey Senozhatsky Signed-off-by: Sergey Senozhatsky [pmladek@suse.com: Updated commit message] Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index fce696d80e09..f717656c0fac 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -194,16 +194,7 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, return 0; } -/* - * Number of registered extended console drivers. - * - * If extended consoles are present, in-kernel cont reassembly is disabled - * and each fragment is stored as a separate log entry with proper - * continuation flag so that every emitted message has full metadata. This - * doesn't change the result for regular consoles or /proc/kmsg. For - * /dev/kmsg, as long as the reader concatenates messages according to - * consecutive continuation flags, the end result should be the same too. - */ +/* Number of registered extended console drivers. */ static int nr_ext_console_drivers; /* @@ -1781,12 +1772,8 @@ static void cont_flush(void) static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len) { - /* - * If ext consoles are present, flush and skip in-kernel - * continuation. See nr_ext_console_drivers definition. Also, if - * the line gets too long, split it up in separate records. - */ - if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) { + /* If the line gets too long, split it up in separate records. */ + if (cont.len + len > sizeof(cont.buf)) { cont_flush(); return false; } @@ -2706,8 +2693,7 @@ void register_console(struct console *newcon) } if (newcon->flags & CON_EXTENDED) - if (!nr_ext_console_drivers++) - pr_info("continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n"); + nr_ext_console_drivers++; if (newcon->flags & CON_PRINTBUFFER) { /* -- cgit v1.2.3 From 3ac37a93fa9217e576bebfd4ba3e80edaaeb2289 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 2 Oct 2018 11:38:35 +0900 Subject: printk: lock/unlock console only for new logbuf entries Prior to commit 5c2992ee7fd8a29 ("printk: remove console flushing special cases for partial buffered lines") we would do console_cont_flush() for each pr_cont() to print cont fragments, so console_unlock() would actually print data: pr_cont(); console_lock(); console_unlock() console_cont_flush(); // print cont fragment ... pr_cont(); console_lock(); console_unlock() console_cont_flush(); // print cont fragment We don't do console_cont_flush() anymore, so when we do pr_cont() console_unlock() does nothing (unless we flushed the cont buffer): pr_cont(); console_lock(); console_unlock(); // noop ... pr_cont(); console_lock(); console_unlock(); // noop ... pr_cont(); cont_flush(); console_lock(); console_unlock(); // print data We also wakeup klogd purposelessly for pr_cont() output - un-flushed cont buffer is not stored in log_buf; there is nothing to pull. Thus we can console_lock()/console_unlock()/wake_up_klogd() only when we know that we log_store()-ed a message and there is something to print to the consoles/syslog. Link: http://lkml.kernel.org/r/20181002023836.4487-3-sergey.senozhatsky@gmail.com To: Steven Rostedt Cc: Andrew Morton Cc: Dmitriy Vyukov Cc: Tetsuo Handa Cc: Tejun Heo Cc: Peter Zijlstra Cc: LKML Cc: Sergey Senozhatsky Signed-off-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index f717656c0fac..e9a7e50ed60a 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1890,8 +1890,9 @@ asmlinkage int vprintk_emit(int facility, int level, const char *fmt, va_list args) { int printed_len; - bool in_sched = false; + bool in_sched = false, pending_output; unsigned long flags; + u64 curr_log_seq; if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; @@ -1903,11 +1904,13 @@ asmlinkage int vprintk_emit(int facility, int level, /* This stops the holder of console_sem just where we want him */ logbuf_lock_irqsave(flags); + curr_log_seq = log_next_seq; printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args); + pending_output = (curr_log_seq != log_next_seq); logbuf_unlock_irqrestore(flags); /* If called from the scheduler, we can not call up(). */ - if (!in_sched) { + if (!in_sched && pending_output) { /* * Disable preemption to avoid being preempted while holding * console_sem which would prevent anyone from printing to @@ -1924,7 +1927,8 @@ asmlinkage int vprintk_emit(int facility, int level, preempt_enable(); } - wake_up_klogd(); + if (pending_output) + wake_up_klogd(); return printed_len; } EXPORT_SYMBOL(vprintk_emit); -- cgit v1.2.3 From 0e96a19c4450253c3ddcff69140b1096f2c2adaf Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 2 Oct 2018 11:38:36 +0900 Subject: printk: do not preliminary split up cont buffer We have a proper 'overflow' check which tells us that we need to split up existing cont buffer in separate records: if (cont.len + len > sizeof(cont.buf)) cont_flush(); At the same time we also have one extra flush: "if cont buffer is 80% full then split it up" in cont_add(): if (cont.len > (sizeof(cont.buf) * 80) / 100) cont_flush(); This looks to be redundant, since the existing "overflow" check should work just fine, so remove this 80% check and wait for either a normal cont termination \n, for preliminary flush due to possible buffer overflow or for preliminary flush due to cont race. Link: http://lkml.kernel.org/r/20181002023836.4487-4-sergey.senozhatsky@gmail.com To: Steven Rostedt Cc: Andrew Morton Cc: Dmitriy Vyukov Cc: Tetsuo Handa Cc: Tejun Heo Cc: Peter Zijlstra Cc: LKML Cc: Sergey Senozhatsky Signed-off-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index e9a7e50ed60a..505663bb4f1f 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1796,9 +1796,6 @@ static bool cont_add(int facility, int level, enum log_flags flags, const char * cont_flush(); } - if (cont.len > (sizeof(cont.buf) * 80) / 100) - cont_flush(); - return true; } -- cgit v1.2.3 From d2130e82e9454304e9b91ba9da551b5989af8c27 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 10 Oct 2018 20:33:08 +0900 Subject: printk: fix integer overflow in setup_log_buf() The way we calculate logbuf free space percentage overflows signed integer: int free; free = __LOG_BUF_LEN - log_next_idx; pr_info("early log buf free: %u(%u%%)\n", free, (free * 100) / __LOG_BUF_LEN); We support LOG_BUF_LEN of up to 1<<25 bytes. Since setup_log_buf() is called during early init, logbuf is mostly empty, so __LOG_BUF_LEN - log_next_idx is close to 1<<25. Thus when we multiply it by 100, we overflow signed integer value range: 100 is 2^6 + 2^5 + 2^2. Example, booting with LOG_BUF_LEN 1<<25 and log_buf_len=2G boot param: [ 0.075317] log_buf_len: -2147483648 bytes [ 0.075319] early log buf free: 33549896(-28%) Make "free" unsigned integer and use appropriate printk() specifier. Link: http://lkml.kernel.org/r/20181010113308.9337-1-sergey.senozhatsky@gmail.com To: Steven Rostedt Cc: linux-kernel@vger.kernel.org Cc: Sergey Senozhatsky Signed-off-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 505663bb4f1f..b77150ad1965 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1098,7 +1098,7 @@ void __init setup_log_buf(int early) { unsigned long flags; char *new_log_buf; - int free; + unsigned int free; if (log_buf != __log_buf) return; -- cgit v1.2.3 From bf173ca92da97863e1579a982d500da98f2e7a3f Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Fri, 12 Oct 2018 12:50:22 -0400 Subject: tracing: probeevent: Fix uninitialized used of offset in parse args Dan's smatch utility found an uninitialized use of offset in a path in parse_probe_args(). Unless an offset is specifically specified for commands that allow them, it should default to zero. Link: http://lkml.kernel.org/r/20181012134246.5doqaobxunlqqs53@mwanda Fixes: 533059281ee5 ("tracing: probeevent: Introduce new argument fetching code") Reported-by: Dan Carpenter (smatch) Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 5b3d573b3dcf..3ef15a6683c0 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -209,7 +209,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type, { struct fetch_insn *code = *pcode; unsigned long param; - long offset; + long offset = 0; char *tmp; int ret = 0; -- cgit v1.2.3 From 1ae80cf31938c8f77c37a29bbe29e7f1cd492be8 Mon Sep 17 00:00:00 2001 From: Daniel Colascione Date: Fri, 12 Oct 2018 03:54:27 -0700 Subject: bpf: wait for running BPF programs when updating map-in-map The map-in-map frequently serves as a mechanism for atomic snapshotting of state that a BPF program might record. The current implementation is dangerous to use in this way, however, since userspace has no way of knowing when all programs that might have retrieved the "old" value of the map may have completed. This change ensures that map update operations on map-in-map map types always wait for all references to the old map to drop before returning to userspace. Signed-off-by: Daniel Colascione Reviewed-by: Joel Fernandes (Google) Signed-off-by: Alexei Starovoitov --- kernel/bpf/syscall.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 4f416234251f..53968f82b919 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -748,6 +748,17 @@ err_put: return err; } +static void maybe_wait_bpf_programs(struct bpf_map *map) +{ + /* Wait for any running BPF programs to complete so that + * userspace, when we return to it, knows that all programs + * that could be running use the new map value. + */ + if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || + map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) + synchronize_rcu(); +} + #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags static int map_update_elem(union bpf_attr *attr) @@ -842,6 +853,7 @@ static int map_update_elem(union bpf_attr *attr) } __this_cpu_dec(bpf_prog_active); preempt_enable(); + maybe_wait_bpf_programs(map); out: free_value: kfree(value); @@ -894,6 +906,7 @@ static int map_delete_elem(union bpf_attr *attr) rcu_read_unlock(); __this_cpu_dec(bpf_prog_active); preempt_enable(); + maybe_wait_bpf_programs(map); out: kfree(key); err_put: -- cgit v1.2.3 From 1243a51f6c05ecbb2c5c9e02fdcc1e7a06f76f26 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 13 Oct 2018 02:45:57 +0200 Subject: tcp, ulp: remove ulp bits from sockmap In order to prepare sockmap logic to be used in combination with kTLS we need to detangle it from ULP, and further split it in later commits into a generic API. Joint work with John. Signed-off-by: Daniel Borkmann Signed-off-by: John Fastabend Signed-off-by: Alexei Starovoitov --- include/net/tcp.h | 1 - kernel/bpf/sockmap.c | 39 ++++++++--------------------- net/ipv4/tcp_ulp.c | 71 ++++++++++------------------------------------------ 3 files changed, 23 insertions(+), 88 deletions(-) (limited to 'kernel') diff --git a/include/net/tcp.h b/include/net/tcp.h index 0d2929223c70..8f5cef67fd35 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2057,7 +2057,6 @@ struct tcp_ulp_ops { int tcp_register_ulp(struct tcp_ulp_ops *type); void tcp_unregister_ulp(struct tcp_ulp_ops *type); int tcp_set_ulp(struct sock *sk, const char *name); -int tcp_set_ulp_id(struct sock *sk, const int ulp); void tcp_get_available_ulp(char *buf, size_t len); void tcp_cleanup_ulp(struct sock *sk); diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 5d0677d808ae..de6f7a65c72b 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -182,6 +182,7 @@ enum { static struct proto *saved_tcpv6_prot __read_mostly; static DEFINE_SPINLOCK(tcpv6_prot_lock); static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS]; + static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], struct proto *base) { @@ -239,6 +240,13 @@ static int bpf_tcp_init(struct sock *sk) return 0; } +static int __init bpf_sock_init(void) +{ + build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot); + return 0; +} +core_initcall(bpf_sock_init); + static void smap_release_sock(struct smap_psock *psock, struct sock *sock); static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge); @@ -413,15 +421,6 @@ enum __sk_action { __SK_NONE, }; -static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = { - .name = "bpf_tcp", - .uid = TCP_ULP_BPF, - .user_visible = false, - .owner = NULL, - .init = bpf_tcp_init, - .release = bpf_tcp_release, -}; - static int memcopy_from_iter(struct sock *sk, struct sk_msg_buff *md, struct iov_iter *from, int bytes) @@ -1236,16 +1235,6 @@ static void bpf_tcp_msg_add(struct smap_psock *psock, bpf_prog_put(orig_tx_msg); } -static int bpf_tcp_ulp_register(void) -{ - build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot); - /* Once BPF TX ULP is registered it is never unregistered. It - * will be in the ULP list for the lifetime of the system. Doing - * duplicate registers is not a problem. - */ - return tcp_register_ulp(&bpf_tcp_ulp_ops); -} - static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) { struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); @@ -1491,7 +1480,7 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock) { if (refcount_dec_and_test(&psock->refcnt)) { if (psock_is_smap_sk(sock)) - tcp_cleanup_ulp(sock); + bpf_tcp_release(sock); write_lock_bh(&sock->sk_callback_lock); smap_stop_sock(psock, sock); write_unlock_bh(&sock->sk_callback_lock); @@ -1666,10 +1655,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); - err = bpf_tcp_ulp_register(); - if (err && err != -EEXIST) - return ERR_PTR(err); - stab = kzalloc(sizeof(*stab), GFP_USER); if (!stab) return ERR_PTR(-ENOMEM); @@ -1951,7 +1936,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, if (tx_msg) bpf_tcp_msg_add(psock, sock, tx_msg); if (new) { - err = tcp_set_ulp_id(sock, TCP_ULP_BPF); + err = bpf_tcp_init(sock); if (err) goto out_free; } @@ -2187,10 +2172,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) */ return ERR_PTR(-E2BIG); - err = bpf_tcp_ulp_register(); - if (err && err != -EEXIST) - return ERR_PTR(err); - htab = kzalloc(sizeof(*htab), GFP_USER); if (!htab) return ERR_PTR(-ENOMEM); diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 34e96353f115..a9162aa11af9 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -6,7 +6,7 @@ * */ -#include +#include #include #include #include @@ -29,18 +29,6 @@ static struct tcp_ulp_ops *tcp_ulp_find(const char *name) return NULL; } -static struct tcp_ulp_ops *tcp_ulp_find_id(const int ulp) -{ - struct tcp_ulp_ops *e; - - list_for_each_entry_rcu(e, &tcp_ulp_list, list) { - if (e->uid == ulp) - return e; - } - - return NULL; -} - static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name) { const struct tcp_ulp_ops *ulp = NULL; @@ -63,18 +51,6 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name) return ulp; } -static const struct tcp_ulp_ops *__tcp_ulp_lookup(const int uid) -{ - const struct tcp_ulp_ops *ulp; - - rcu_read_lock(); - ulp = tcp_ulp_find_id(uid); - if (!ulp || !try_module_get(ulp->owner)) - ulp = NULL; - rcu_read_unlock(); - return ulp; -} - /* Attach new upper layer protocol to the list * of available protocols. */ @@ -135,56 +111,35 @@ void tcp_cleanup_ulp(struct sock *sk) icsk->icsk_ulp_ops = NULL; } -/* Change upper layer protocol for socket */ -int tcp_set_ulp(struct sock *sk, const char *name) +static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops) { struct inet_connection_sock *icsk = inet_csk(sk); - const struct tcp_ulp_ops *ulp_ops; - int err = 0; + int err; - sock_owned_by_me(sk); + err = -EEXIST; if (icsk->icsk_ulp_ops) - return -EEXIST; - - ulp_ops = __tcp_ulp_find_autoload(name); - if (!ulp_ops) - return -ENOENT; - - if (!ulp_ops->user_visible) { - module_put(ulp_ops->owner); - return -ENOENT; - } + goto out_err; err = ulp_ops->init(sk); - if (err) { - module_put(ulp_ops->owner); - return err; - } + if (err) + goto out_err; icsk->icsk_ulp_ops = ulp_ops; return 0; +out_err: + module_put(ulp_ops->owner); + return err; } -int tcp_set_ulp_id(struct sock *sk, int ulp) +int tcp_set_ulp(struct sock *sk, const char *name) { - struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_ulp_ops *ulp_ops; - int err; sock_owned_by_me(sk); - if (icsk->icsk_ulp_ops) - return -EEXIST; - ulp_ops = __tcp_ulp_lookup(ulp); + ulp_ops = __tcp_ulp_find_autoload(name); if (!ulp_ops) return -ENOENT; - err = ulp_ops->init(sk); - if (err) { - module_put(ulp_ops->owner); - return err; - } - - icsk->icsk_ulp_ops = ulp_ops; - return 0; + return __tcp_set_ulp(sk, ulp_ops); } -- cgit v1.2.3 From 604326b41a6fb9b4a78b6179335decee0365cd8c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 13 Oct 2018 02:45:58 +0200 Subject: bpf, sockmap: convert to generic sk_msg interface Add a generic sk_msg layer, and convert current sockmap and later kTLS over to make use of it. While sk_buff handles network packet representation from netdevice up to socket, sk_msg handles data representation from application to socket layer. This means that sk_msg framework spans across ULP users in the kernel, and enables features such as introspection or filtering of data with the help of BPF programs that operate on this data structure. Latter becomes in particular useful for kTLS where data encryption is deferred into the kernel, and as such enabling the kernel to perform L7 introspection and policy based on BPF for TLS connections where the record is being encrypted after BPF has run and came to a verdict. In order to get there, first step is to transform open coding of scatter-gather list handling into a common core framework that subsystems can use. The code itself has been split and refactored into three bigger pieces: i) the generic sk_msg API which deals with managing the scatter gather ring, providing helpers for walking and mangling, transferring application data from user space into it, and preparing it for BPF pre/post-processing, ii) the plain sock map itself where sockets can be attached to or detached from; these bits are independent of i) which can now be used also without sock map, and iii) the integration with plain TCP as one protocol to be used for processing L7 application data (later this could e.g. also be extended to other protocols like UDP). The semantics are the same with the old sock map code and therefore no change of user facing behavior or APIs. While pursuing this work it also helped finding a number of bugs in the old sockmap code that we've fixed already in earlier commits. The test_sockmap kselftest suite passes through fine as well. Joint work with John. Signed-off-by: Daniel Borkmann Signed-off-by: John Fastabend Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 33 +- include/linux/bpf_types.h | 2 +- include/linux/filter.h | 21 - include/linux/skmsg.h | 371 +++++++ include/net/tcp.h | 27 + kernel/bpf/Makefile | 5 - kernel/bpf/core.c | 2 - kernel/bpf/sockmap.c | 2610 --------------------------------------------- kernel/bpf/syscall.c | 6 +- net/Kconfig | 11 + net/core/Makefile | 2 + net/core/filter.c | 270 ++--- net/core/skmsg.c | 763 +++++++++++++ net/core/sock_map.c | 1002 +++++++++++++++++ net/ipv4/Makefile | 1 + net/ipv4/tcp_bpf.c | 655 ++++++++++++ net/strparser/Kconfig | 4 +- 17 files changed, 2925 insertions(+), 2860 deletions(-) create mode 100644 include/linux/skmsg.h delete mode 100644 kernel/bpf/sockmap.c create mode 100644 net/core/skmsg.c create mode 100644 net/core/sock_map.c create mode 100644 net/ipv4/tcp_bpf.c (limited to 'kernel') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9b558713447f..e60fff48288b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -737,33 +737,18 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) } #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) -struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); -struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); -int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); -int sockmap_get_from_fd(const union bpf_attr *attr, int type, - struct bpf_prog *prog); +#if defined(CONFIG_BPF_STREAM_PARSER) +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); #else -static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) -{ - return NULL; -} - -static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map, - void *key) -{ - return NULL; -} - -static inline int sock_map_prog(struct bpf_map *map, - struct bpf_prog *prog, - u32 type) +static inline int sock_map_prog_update(struct bpf_map *map, + struct bpf_prog *prog, u32 which) { return -EOPNOTSUPP; } -static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, - struct bpf_prog *prog) +static inline int sock_map_get_from_fd(const union bpf_attr *attr, + struct bpf_prog *prog) { return -EINVAL; } @@ -839,6 +824,10 @@ extern const struct bpf_func_proto bpf_get_stack_proto; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; +extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; +extern const struct bpf_func_proto bpf_msg_redirect_map_proto; +extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; +extern const struct bpf_func_proto bpf_sk_redirect_map_proto; extern const struct bpf_func_proto bpf_get_local_storage_proto; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 5432f4c9f50e..fa48343a5ea1 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -57,7 +57,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) +#if defined(CONFIG_BPF_STREAM_PARSER) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) #endif diff --git a/include/linux/filter.h b/include/linux/filter.h index 6791a0ac0139..5771874bc01e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -520,24 +520,6 @@ struct bpf_skb_data_end { void *data_end; }; -struct sk_msg_buff { - void *data; - void *data_end; - __u32 apply_bytes; - __u32 cork_bytes; - int sg_copybreak; - int sg_start; - int sg_curr; - int sg_end; - struct scatterlist sg_data[MAX_SKB_FRAGS]; - bool sg_copy[MAX_SKB_FRAGS]; - __u32 flags; - struct sock *sk_redir; - struct sock *sk; - struct sk_buff *skb; - struct list_head list; -}; - struct bpf_redirect_info { u32 ifindex; u32 flags; @@ -833,9 +815,6 @@ void xdp_do_flush_map(void); void bpf_warn_invalid_xdp_action(u32 act); -struct sock *do_sk_redirect_map(struct sk_buff *skb); -struct sock *do_msg_redirect_map(struct sk_msg_buff *md); - #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h new file mode 100644 index 000000000000..95678103c4a0 --- /dev/null +++ b/include/linux/skmsg.h @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ + +#ifndef _LINUX_SKMSG_H +#define _LINUX_SKMSG_H + +#include +#include +#include +#include + +#include +#include +#include + +#define MAX_MSG_FRAGS MAX_SKB_FRAGS + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS, + __SK_REDIRECT, + __SK_NONE, +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + bool copy[MAX_MSG_FRAGS]; + struct scatterlist data[MAX_MSG_FRAGS]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *skb_parser; + struct bpf_prog *skb_verdict; +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED, +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +struct sk_psock_parser { + struct strparser strp; + bool enabled; + void (*saved_data_ready)(struct sock *sk); +}; + +struct sk_psock_work_state { + struct sk_buff *skb; + u32 len; + u32 off; +}; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct sk_psock_parser parser; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + unsigned long state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *sk); + void (*saved_close)(struct sock *sk, long timeout); + void (*saved_write_space)(struct sock *sk); + struct proto *sk_proto; + struct sk_psock_work_state work_state; + struct work_struct work; + union { + struct rcu_head rcu; + struct work_struct gc; + }; +}; + +int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, + int elem_first_coalesce); +void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); +int sk_msg_free(struct sock *sk, struct sk_msg *msg); +int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); +void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); +void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, + u32 bytes); + +void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); + +int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, + struct sk_msg *msg, u32 bytes); +int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, + struct sk_msg *msg, u32 bytes); + +static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) +{ + WARN_ON(i == msg->sg.end && bytes); +} + +static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) +{ + if (psock->apply_bytes) { + if (psock->apply_bytes < bytes) + psock->apply_bytes = 0; + else + psock->apply_bytes -= bytes; + } +} + +#define sk_msg_iter_var_prev(var) \ + do { \ + if (var == 0) \ + var = MAX_MSG_FRAGS - 1; \ + else \ + var--; \ + } while (0) + +#define sk_msg_iter_var_next(var) \ + do { \ + var++; \ + if (var == MAX_MSG_FRAGS) \ + var = 0; \ + } while (0) + +#define sk_msg_iter_prev(msg, which) \ + sk_msg_iter_var_prev(msg->sg.which) + +#define sk_msg_iter_next(msg, which) \ + sk_msg_iter_var_next(msg->sg.which) + +static inline void sk_msg_clear_meta(struct sk_msg *msg) +{ + memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy)); +} + +static inline void sk_msg_init(struct sk_msg *msg) +{ + memset(msg, 0, sizeof(*msg)); + sg_init_marker(msg->sg.data, ARRAY_SIZE(msg->sg.data)); +} + +static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, + int which, u32 size) +{ + dst->sg.data[which] = src->sg.data[which]; + dst->sg.data[which].length = size; + src->sg.data[which].length -= size; + src->sg.data[which].offset += size; +} + +static inline u32 sk_msg_elem_used(const struct sk_msg *msg) +{ + return msg->sg.end >= msg->sg.start ? + msg->sg.end - msg->sg.start : + msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start); +} + +static inline bool sk_msg_full(const struct sk_msg *msg) +{ + return (msg->sg.end == msg->sg.start) && msg->sg.size; +} + +static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) +{ + return &msg->sg.data[which]; +} + +static inline struct page *sk_msg_page(struct sk_msg *msg, int which) +{ + return sg_page(sk_msg_elem(msg, which)); +} + +static inline bool sk_msg_to_ingress(const struct sk_msg *msg) +{ + return msg->flags & BPF_F_INGRESS; +} + +static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) +{ + struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); + + if (msg->sg.copy[msg->sg.start]) { + msg->data = NULL; + msg->data_end = NULL; + } else { + msg->data = sg_virt(sge); + msg->data_end = msg->data + sge->length; + } +} + +static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, + u32 len, u32 offset) +{ + struct scatterlist *sge; + + get_page(page); + sge = sk_msg_elem(msg, msg->sg.end); + sg_set_page(sge, page, len, offset); + sg_unmark_end(sge); + + msg->sg.copy[msg->sg.end] = true; + msg->sg.size += len; + sk_msg_iter_next(msg, end); +} + +static inline struct sk_psock *sk_psock(const struct sock *sk) +{ + return rcu_dereference_sk_user_data(sk); +} + +static inline bool sk_has_psock(struct sock *sk) +{ + return sk_psock(sk) != NULL && sk->sk_prot->recvmsg == tcp_bpf_recvmsg; +} + +static inline void sk_psock_queue_msg(struct sk_psock *psock, + struct sk_msg *msg) +{ + list_add_tail(&msg->list, &psock->ingress_msg); +} + +static inline void sk_psock_report_error(struct sk_psock *psock, int err) +{ + struct sock *sk = psock->sk; + + sk->sk_err = err; + sk->sk_error_report(sk); +} + +struct sk_psock *sk_psock_init(struct sock *sk, int node); + +int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); +void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); +void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); + +int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, + struct sk_msg *msg); + +static inline struct sk_psock_link *sk_psock_init_link(void) +{ + return kzalloc(sizeof(struct sk_psock_link), + GFP_ATOMIC | __GFP_NOWARN); +} + +static inline void sk_psock_free_link(struct sk_psock_link *link) +{ + kfree(link); +} + +struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); +#if defined(CONFIG_BPF_STREAM_PARSER) +void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link); +#else +static inline void sk_psock_unlink(struct sock *sk, + struct sk_psock_link *link) +{ +} +#endif + +void __sk_psock_purge_ingress_msg(struct sk_psock *psock); + +static inline void sk_psock_cork_free(struct sk_psock *psock) +{ + if (psock->cork) { + sk_msg_free(psock->sk, psock->cork); + kfree(psock->cork); + psock->cork = NULL; + } +} + +static inline void sk_psock_update_proto(struct sock *sk, + struct sk_psock *psock, + struct proto *ops) +{ + psock->saved_unhash = sk->sk_prot->unhash; + psock->saved_close = sk->sk_prot->close; + psock->saved_write_space = sk->sk_write_space; + + psock->sk_proto = sk->sk_prot; + sk->sk_prot = ops; +} + +static inline void sk_psock_restore_proto(struct sock *sk, + struct sk_psock *psock) +{ + if (psock->sk_proto) { + sk->sk_prot = psock->sk_proto; + psock->sk_proto = NULL; + } +} + +static inline void sk_psock_set_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + set_bit(bit, &psock->state); +} + +static inline void sk_psock_clear_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + clear_bit(bit, &psock->state); +} + +static inline bool sk_psock_test_state(const struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + return test_bit(bit, &psock->state); +} + +static inline struct sk_psock *sk_psock_get(struct sock *sk) +{ + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + if (psock && !refcount_inc_not_zero(&psock->refcnt)) + psock = NULL; + rcu_read_unlock(); + return psock; +} + +void sk_psock_stop(struct sock *sk, struct sk_psock *psock); +void sk_psock_destroy(struct rcu_head *rcu); +void sk_psock_drop(struct sock *sk, struct sk_psock *psock); + +static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) +{ + if (refcount_dec_and_test(&psock->refcnt)) + sk_psock_drop(sk, psock); +} + +static inline void psock_set_prog(struct bpf_prog **pprog, + struct bpf_prog *prog) +{ + prog = xchg(pprog, prog); + if (prog) + bpf_prog_put(prog); +} + +static inline void psock_progs_drop(struct sk_psock_progs *progs) +{ + psock_set_prog(&progs->msg_parser, NULL); + psock_set_prog(&progs->skb_parser, NULL); + psock_set_prog(&progs->skb_verdict, NULL); +} + +#endif /* _LINUX_SKMSG_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 8f5cef67fd35..3600ae0f25c3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -858,6 +858,21 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); } +static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS; +} + +static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->bpf.sk_redir; +} + +static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) +{ + TCP_SKB_CB(skb)->bpf.sk_redir = NULL; +} + #if IS_ENABLED(CONFIG_IPV6) /* This is the variant of inet6_iif() that must be used by TCP, * as TCP moves IP6CB into a different location in skb->cb[] @@ -2064,6 +2079,18 @@ void tcp_cleanup_ulp(struct sock *sk); __MODULE_INFO(alias, alias_userspace, name); \ __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) +struct sk_msg; +struct sk_psock; + +int tcp_bpf_init(struct sock *sk); +void tcp_bpf_reinit(struct sock *sk); +int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, + int flags); +int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int nonblock, int flags, int *addr_len); +int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, + struct msghdr *msg, int len); + /* Call BPF_SOCK_OPS program that returns an int. If the return value * is < 0, then the BPF op failed (for example if the loaded BPF * program does not support the chosen operation or there is no BPF diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 0488b8258321..ff8262626b8f 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -13,11 +13,6 @@ ifeq ($(CONFIG_XDP_SOCKETS),y) obj-$(CONFIG_BPF_SYSCALL) += xskmap.o endif obj-$(CONFIG_BPF_SYSCALL) += offload.o -ifeq ($(CONFIG_STREAM_PARSER),y) -ifeq ($(CONFIG_INET),y) -obj-$(CONFIG_BPF_SYSCALL) += sockmap.o -endif -endif endif ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 3f5bf1af0826..defcf4df6d91 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1792,8 +1792,6 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; const struct bpf_func_proto bpf_get_current_comm_proto __weak; -const struct bpf_func_proto bpf_sock_map_update_proto __weak; -const struct bpf_func_proto bpf_sock_hash_update_proto __weak; const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; const struct bpf_func_proto bpf_get_local_storage_proto __weak; diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c deleted file mode 100644 index de6f7a65c72b..000000000000 --- a/kernel/bpf/sockmap.c +++ /dev/null @@ -1,2610 +0,0 @@ -/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* A BPF sock_map is used to store sock objects. This is primarly used - * for doing socket redirect with BPF helper routines. - * - * A sock map may have BPF programs attached to it, currently a program - * used to parse packets and a program to provide a verdict and redirect - * decision on the packet are supported. Any programs attached to a sock - * map are inherited by sock objects when they are added to the map. If - * no BPF programs are attached the sock object may only be used for sock - * redirect. - * - * A sock object may be in multiple maps, but can only inherit a single - * parse or verdict program. If adding a sock object to a map would result - * in having multiple parsing programs the update will return an EBUSY error. - * - * For reference this program is similar to devmap used in XDP context - * reviewing these together may be useful. For an example please review - * ./samples/bpf/sockmap/. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define SOCK_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) - -struct bpf_sock_progs { - struct bpf_prog *bpf_tx_msg; - struct bpf_prog *bpf_parse; - struct bpf_prog *bpf_verdict; -}; - -struct bpf_stab { - struct bpf_map map; - struct sock **sock_map; - struct bpf_sock_progs progs; - raw_spinlock_t lock; -}; - -struct bucket { - struct hlist_head head; - raw_spinlock_t lock; -}; - -struct bpf_htab { - struct bpf_map map; - struct bucket *buckets; - atomic_t count; - u32 n_buckets; - u32 elem_size; - struct bpf_sock_progs progs; - struct rcu_head rcu; -}; - -struct htab_elem { - struct rcu_head rcu; - struct hlist_node hash_node; - u32 hash; - struct sock *sk; - char key[0]; -}; - -enum smap_psock_state { - SMAP_TX_RUNNING, -}; - -struct smap_psock_map_entry { - struct list_head list; - struct bpf_map *map; - struct sock **entry; - struct htab_elem __rcu *hash_link; -}; - -struct smap_psock { - struct rcu_head rcu; - refcount_t refcnt; - - /* datapath variables */ - struct sk_buff_head rxqueue; - bool strp_enabled; - - /* datapath error path cache across tx work invocations */ - int save_rem; - int save_off; - struct sk_buff *save_skb; - - /* datapath variables for tx_msg ULP */ - struct sock *sk_redir; - int apply_bytes; - int cork_bytes; - int sg_size; - int eval; - struct sk_msg_buff *cork; - struct list_head ingress; - - struct strparser strp; - struct bpf_prog *bpf_tx_msg; - struct bpf_prog *bpf_parse; - struct bpf_prog *bpf_verdict; - struct list_head maps; - spinlock_t maps_lock; - - /* Back reference used when sock callback trigger sockmap operations */ - struct sock *sock; - unsigned long state; - - struct work_struct tx_work; - struct work_struct gc_work; - - struct proto *sk_proto; - void (*save_unhash)(struct sock *sk); - void (*save_close)(struct sock *sk, long timeout); - void (*save_data_ready)(struct sock *sk); - void (*save_write_space)(struct sock *sk); -}; - -static void smap_release_sock(struct smap_psock *psock, struct sock *sock); -static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len); -static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); -static int bpf_tcp_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags); -static void bpf_tcp_unhash(struct sock *sk); -static void bpf_tcp_close(struct sock *sk, long timeout); - -static inline struct smap_psock *smap_psock_sk(const struct sock *sk) -{ - return rcu_dereference_sk_user_data(sk); -} - -static bool bpf_tcp_stream_read(const struct sock *sk) -{ - struct smap_psock *psock; - bool empty = true; - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) - goto out; - empty = list_empty(&psock->ingress); -out: - rcu_read_unlock(); - return !empty; -} - -enum { - SOCKMAP_IPV4, - SOCKMAP_IPV6, - SOCKMAP_NUM_PROTS, -}; - -enum { - SOCKMAP_BASE, - SOCKMAP_TX, - SOCKMAP_NUM_CONFIGS, -}; - -static struct proto *saved_tcpv6_prot __read_mostly; -static DEFINE_SPINLOCK(tcpv6_prot_lock); -static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS]; - -static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], - struct proto *base) -{ - prot[SOCKMAP_BASE] = *base; - prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash; - prot[SOCKMAP_BASE].close = bpf_tcp_close; - prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; - prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; - - prot[SOCKMAP_TX] = prot[SOCKMAP_BASE]; - prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg; - prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage; -} - -static void update_sk_prot(struct sock *sk, struct smap_psock *psock) -{ - int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4; - int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE; - - sk->sk_prot = &bpf_tcp_prots[family][conf]; -} - -static int bpf_tcp_init(struct sock *sk) -{ - struct smap_psock *psock; - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - return -EINVAL; - } - - if (unlikely(psock->sk_proto)) { - rcu_read_unlock(); - return -EBUSY; - } - - psock->save_unhash = sk->sk_prot->unhash; - psock->save_close = sk->sk_prot->close; - psock->sk_proto = sk->sk_prot; - - /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */ - if (sk->sk_family == AF_INET6 && - unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { - spin_lock_bh(&tcpv6_prot_lock); - if (likely(sk->sk_prot != saved_tcpv6_prot)) { - build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot); - smp_store_release(&saved_tcpv6_prot, sk->sk_prot); - } - spin_unlock_bh(&tcpv6_prot_lock); - } - update_sk_prot(sk, psock); - rcu_read_unlock(); - return 0; -} - -static int __init bpf_sock_init(void) -{ - build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot); - return 0; -} -core_initcall(bpf_sock_init); - -static void smap_release_sock(struct smap_psock *psock, struct sock *sock); -static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge); - -static void bpf_tcp_release(struct sock *sk) -{ - struct smap_psock *psock; - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) - goto out; - - if (psock->cork) { - free_start_sg(psock->sock, psock->cork, true); - kfree(psock->cork); - psock->cork = NULL; - } - - if (psock->sk_proto) { - sk->sk_prot = psock->sk_proto; - psock->sk_proto = NULL; - } -out: - rcu_read_unlock(); -} - -static struct htab_elem *lookup_elem_raw(struct hlist_head *head, - u32 hash, void *key, u32 key_size) -{ - struct htab_elem *l; - - hlist_for_each_entry_rcu(l, head, hash_node) { - if (l->hash == hash && !memcmp(&l->key, key, key_size)) - return l; - } - - return NULL; -} - -static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) -{ - return &htab->buckets[hash & (htab->n_buckets - 1)]; -} - -static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) -{ - return &__select_bucket(htab, hash)->head; -} - -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) -{ - atomic_dec(&htab->count); - kfree_rcu(l, rcu); -} - -static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, - struct smap_psock *psock) -{ - struct smap_psock_map_entry *e; - - spin_lock_bh(&psock->maps_lock); - e = list_first_entry_or_null(&psock->maps, - struct smap_psock_map_entry, - list); - if (e) - list_del(&e->list); - spin_unlock_bh(&psock->maps_lock); - return e; -} - -static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock) -{ - struct smap_psock_map_entry *e; - struct sk_msg_buff *md, *mtmp; - struct sock *osk; - - if (psock->cork) { - free_start_sg(psock->sock, psock->cork, true); - kfree(psock->cork); - psock->cork = NULL; - } - - list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { - list_del(&md->list); - free_start_sg(psock->sock, md, true); - kfree(md); - } - - e = psock_map_pop(sk, psock); - while (e) { - if (e->entry) { - struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map); - - raw_spin_lock_bh(&stab->lock); - osk = *e->entry; - if (osk == sk) { - *e->entry = NULL; - smap_release_sock(psock, sk); - } - raw_spin_unlock_bh(&stab->lock); - } else { - struct htab_elem *link = rcu_dereference(e->hash_link); - struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map); - struct hlist_head *head; - struct htab_elem *l; - struct bucket *b; - - b = __select_bucket(htab, link->hash); - head = &b->head; - raw_spin_lock_bh(&b->lock); - l = lookup_elem_raw(head, - link->hash, link->key, - htab->map.key_size); - /* If another thread deleted this object skip deletion. - * The refcnt on psock may or may not be zero. - */ - if (l && l == link) { - hlist_del_rcu(&link->hash_node); - smap_release_sock(psock, link->sk); - free_htab_elem(htab, link); - } - raw_spin_unlock_bh(&b->lock); - } - kfree(e); - e = psock_map_pop(sk, psock); - } -} - -static void bpf_tcp_unhash(struct sock *sk) -{ - void (*unhash_fun)(struct sock *sk); - struct smap_psock *psock; - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - if (sk->sk_prot->unhash) - sk->sk_prot->unhash(sk); - return; - } - unhash_fun = psock->save_unhash; - bpf_tcp_remove(sk, psock); - rcu_read_unlock(); - unhash_fun(sk); -} - -static void bpf_tcp_close(struct sock *sk, long timeout) -{ - void (*close_fun)(struct sock *sk, long timeout); - struct smap_psock *psock; - - lock_sock(sk); - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - release_sock(sk); - return sk->sk_prot->close(sk, timeout); - } - close_fun = psock->save_close; - bpf_tcp_remove(sk, psock); - rcu_read_unlock(); - release_sock(sk); - close_fun(sk, timeout); -} - -enum __sk_action { - __SK_DROP = 0, - __SK_PASS, - __SK_REDIRECT, - __SK_NONE, -}; - -static int memcopy_from_iter(struct sock *sk, - struct sk_msg_buff *md, - struct iov_iter *from, int bytes) -{ - struct scatterlist *sg = md->sg_data; - int i = md->sg_curr, rc = -ENOSPC; - - do { - int copy; - char *to; - - if (md->sg_copybreak >= sg[i].length) { - md->sg_copybreak = 0; - - if (++i == MAX_SKB_FRAGS) - i = 0; - - if (i == md->sg_end) - break; - } - - copy = sg[i].length - md->sg_copybreak; - to = sg_virt(&sg[i]) + md->sg_copybreak; - md->sg_copybreak += copy; - - if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) - rc = copy_from_iter_nocache(to, copy, from); - else - rc = copy_from_iter(to, copy, from); - - if (rc != copy) { - rc = -EFAULT; - goto out; - } - - bytes -= copy; - if (!bytes) - break; - - md->sg_copybreak = 0; - if (++i == MAX_SKB_FRAGS) - i = 0; - } while (i != md->sg_end); -out: - md->sg_curr = i; - return rc; -} - -static int bpf_tcp_push(struct sock *sk, int apply_bytes, - struct sk_msg_buff *md, - int flags, bool uncharge) -{ - bool apply = apply_bytes; - struct scatterlist *sg; - int offset, ret = 0; - struct page *p; - size_t size; - - while (1) { - sg = md->sg_data + md->sg_start; - size = (apply && apply_bytes < sg->length) ? - apply_bytes : sg->length; - offset = sg->offset; - - tcp_rate_check_app_limited(sk); - p = sg_page(sg); -retry: - ret = do_tcp_sendpages(sk, p, offset, size, flags); - if (ret != size) { - if (ret > 0) { - if (apply) - apply_bytes -= ret; - - sg->offset += ret; - sg->length -= ret; - size -= ret; - offset += ret; - if (uncharge) - sk_mem_uncharge(sk, ret); - goto retry; - } - - return ret; - } - - if (apply) - apply_bytes -= ret; - sg->offset += ret; - sg->length -= ret; - if (uncharge) - sk_mem_uncharge(sk, ret); - - if (!sg->length) { - put_page(p); - md->sg_start++; - if (md->sg_start == MAX_SKB_FRAGS) - md->sg_start = 0; - sg_init_table(sg, 1); - - if (md->sg_start == md->sg_end) - break; - } - - if (apply && !apply_bytes) - break; - } - return 0; -} - -static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md) -{ - struct scatterlist *sg = md->sg_data + md->sg_start; - - if (md->sg_copy[md->sg_start]) { - md->data = md->data_end = 0; - } else { - md->data = sg_virt(sg); - md->data_end = md->data + sg->length; - } -} - -static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) -{ - struct scatterlist *sg = md->sg_data; - int i = md->sg_start; - - do { - int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length; - - sk_mem_uncharge(sk, uncharge); - bytes -= uncharge; - if (!bytes) - break; - i++; - if (i == MAX_SKB_FRAGS) - i = 0; - } while (i != md->sg_end); -} - -static void free_bytes_sg(struct sock *sk, int bytes, - struct sk_msg_buff *md, bool charge) -{ - struct scatterlist *sg = md->sg_data; - int i = md->sg_start, free; - - while (bytes && sg[i].length) { - free = sg[i].length; - if (bytes < free) { - sg[i].length -= bytes; - sg[i].offset += bytes; - if (charge) - sk_mem_uncharge(sk, bytes); - break; - } - - if (charge) - sk_mem_uncharge(sk, sg[i].length); - put_page(sg_page(&sg[i])); - bytes -= sg[i].length; - sg[i].length = 0; - sg[i].page_link = 0; - sg[i].offset = 0; - i++; - - if (i == MAX_SKB_FRAGS) - i = 0; - } - md->sg_start = i; -} - -static int free_sg(struct sock *sk, int start, - struct sk_msg_buff *md, bool charge) -{ - struct scatterlist *sg = md->sg_data; - int i = start, free = 0; - - while (sg[i].length) { - free += sg[i].length; - if (charge) - sk_mem_uncharge(sk, sg[i].length); - if (!md->skb) - put_page(sg_page(&sg[i])); - sg[i].length = 0; - sg[i].page_link = 0; - sg[i].offset = 0; - i++; - - if (i == MAX_SKB_FRAGS) - i = 0; - } - consume_skb(md->skb); - - return free; -} - -static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge) -{ - int free = free_sg(sk, md->sg_start, md, charge); - - md->sg_start = md->sg_end; - return free; -} - -static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) -{ - return free_sg(sk, md->sg_curr, md, true); -} - -static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) -{ - return ((_rc == SK_PASS) ? - (md->sk_redir ? __SK_REDIRECT : __SK_PASS) : - __SK_DROP); -} - -static unsigned int smap_do_tx_msg(struct sock *sk, - struct smap_psock *psock, - struct sk_msg_buff *md) -{ - struct bpf_prog *prog; - unsigned int rc, _rc; - - preempt_disable(); - rcu_read_lock(); - - /* If the policy was removed mid-send then default to 'accept' */ - prog = READ_ONCE(psock->bpf_tx_msg); - if (unlikely(!prog)) { - _rc = SK_PASS; - goto verdict; - } - - bpf_compute_data_pointers_sg(md); - md->sk = sk; - rc = (*prog->bpf_func)(md, prog->insnsi); - psock->apply_bytes = md->apply_bytes; - - /* Moving return codes from UAPI namespace into internal namespace */ - _rc = bpf_map_msg_verdict(rc, md); - - /* The psock has a refcount on the sock but not on the map and because - * we need to drop rcu read lock here its possible the map could be - * removed between here and when we need it to execute the sock - * redirect. So do the map lookup now for future use. - */ - if (_rc == __SK_REDIRECT) { - if (psock->sk_redir) - sock_put(psock->sk_redir); - psock->sk_redir = do_msg_redirect_map(md); - if (!psock->sk_redir) { - _rc = __SK_DROP; - goto verdict; - } - sock_hold(psock->sk_redir); - } -verdict: - rcu_read_unlock(); - preempt_enable(); - - return _rc; -} - -static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, - struct smap_psock *psock, - struct sk_msg_buff *md, int flags) -{ - bool apply = apply_bytes; - size_t size, copied = 0; - struct sk_msg_buff *r; - int err = 0, i; - - r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL); - if (unlikely(!r)) - return -ENOMEM; - - lock_sock(sk); - r->sg_start = md->sg_start; - i = md->sg_start; - - do { - size = (apply && apply_bytes < md->sg_data[i].length) ? - apply_bytes : md->sg_data[i].length; - - if (!sk_wmem_schedule(sk, size)) { - if (!copied) - err = -ENOMEM; - break; - } - - sk_mem_charge(sk, size); - r->sg_data[i] = md->sg_data[i]; - r->sg_data[i].length = size; - md->sg_data[i].length -= size; - md->sg_data[i].offset += size; - copied += size; - - if (md->sg_data[i].length) { - get_page(sg_page(&r->sg_data[i])); - r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1; - } else { - i++; - if (i == MAX_SKB_FRAGS) - i = 0; - r->sg_end = i; - } - - if (apply) { - apply_bytes -= size; - if (!apply_bytes) - break; - } - } while (i != md->sg_end); - - md->sg_start = i; - - if (!err) { - list_add_tail(&r->list, &psock->ingress); - sk->sk_data_ready(sk); - } else { - free_start_sg(sk, r, true); - kfree(r); - } - - release_sock(sk); - return err; -} - -static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, - struct sk_msg_buff *md, - int flags) -{ - bool ingress = !!(md->flags & BPF_F_INGRESS); - struct smap_psock *psock; - int err = 0; - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) - goto out_rcu; - - if (!refcount_inc_not_zero(&psock->refcnt)) - goto out_rcu; - - rcu_read_unlock(); - - if (ingress) { - err = bpf_tcp_ingress(sk, send, psock, md, flags); - } else { - lock_sock(sk); - err = bpf_tcp_push(sk, send, md, flags, false); - release_sock(sk); - } - smap_release_sock(psock, sk); - return err; -out_rcu: - rcu_read_unlock(); - return 0; -} - -static inline void bpf_md_init(struct smap_psock *psock) -{ - if (!psock->apply_bytes) { - psock->eval = __SK_NONE; - if (psock->sk_redir) { - sock_put(psock->sk_redir); - psock->sk_redir = NULL; - } - } -} - -static void apply_bytes_dec(struct smap_psock *psock, int i) -{ - if (psock->apply_bytes) { - if (psock->apply_bytes < i) - psock->apply_bytes = 0; - else - psock->apply_bytes -= i; - } -} - -static int bpf_exec_tx_verdict(struct smap_psock *psock, - struct sk_msg_buff *m, - struct sock *sk, - int *copied, int flags) -{ - bool cork = false, enospc = (m->sg_start == m->sg_end); - struct sock *redir; - int err = 0; - int send; - -more_data: - if (psock->eval == __SK_NONE) - psock->eval = smap_do_tx_msg(sk, psock, m); - - if (m->cork_bytes && - m->cork_bytes > psock->sg_size && !enospc) { - psock->cork_bytes = m->cork_bytes - psock->sg_size; - if (!psock->cork) { - psock->cork = kcalloc(1, - sizeof(struct sk_msg_buff), - GFP_ATOMIC | __GFP_NOWARN); - - if (!psock->cork) { - err = -ENOMEM; - goto out_err; - } - } - memcpy(psock->cork, m, sizeof(*m)); - goto out_err; - } - - send = psock->sg_size; - if (psock->apply_bytes && psock->apply_bytes < send) - send = psock->apply_bytes; - - switch (psock->eval) { - case __SK_PASS: - err = bpf_tcp_push(sk, send, m, flags, true); - if (unlikely(err)) { - *copied -= free_start_sg(sk, m, true); - break; - } - - apply_bytes_dec(psock, send); - psock->sg_size -= send; - break; - case __SK_REDIRECT: - redir = psock->sk_redir; - apply_bytes_dec(psock, send); - - if (psock->cork) { - cork = true; - psock->cork = NULL; - } - - return_mem_sg(sk, send, m); - release_sock(sk); - - err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); - lock_sock(sk); - - if (unlikely(err < 0)) { - int free = free_start_sg(sk, m, false); - - psock->sg_size = 0; - if (!cork) - *copied -= free; - } else { - psock->sg_size -= send; - } - - if (cork) { - free_start_sg(sk, m, true); - psock->sg_size = 0; - kfree(m); - m = NULL; - err = 0; - } - break; - case __SK_DROP: - default: - free_bytes_sg(sk, send, m, true); - apply_bytes_dec(psock, send); - *copied -= send; - psock->sg_size -= send; - err = -EACCES; - break; - } - - if (likely(!err)) { - bpf_md_init(psock); - if (m && - m->sg_data[m->sg_start].page_link && - m->sg_data[m->sg_start].length) - goto more_data; - } - -out_err: - return err; -} - -static int bpf_wait_data(struct sock *sk, - struct smap_psock *psk, int flags, - long timeo, int *err) -{ - int rc; - - DEFINE_WAIT_FUNC(wait, woken_wake_function); - - add_wait_queue(sk_sleep(sk), &wait); - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); - rc = sk_wait_event(sk, &timeo, - !list_empty(&psk->ingress) || - !skb_queue_empty(&sk->sk_receive_queue), - &wait); - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); - remove_wait_queue(sk_sleep(sk), &wait); - - return rc; -} - -static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) -{ - struct iov_iter *iter = &msg->msg_iter; - struct smap_psock *psock; - int copied = 0; - - if (unlikely(flags & MSG_ERRQUEUE)) - return inet_recv_error(sk, msg, len, addr_len); - if (!skb_queue_empty(&sk->sk_receive_queue)) - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) - goto out; - - if (unlikely(!refcount_inc_not_zero(&psock->refcnt))) - goto out; - rcu_read_unlock(); - - lock_sock(sk); -bytes_ready: - while (copied != len) { - struct scatterlist *sg; - struct sk_msg_buff *md; - int i; - - md = list_first_entry_or_null(&psock->ingress, - struct sk_msg_buff, list); - if (unlikely(!md)) - break; - i = md->sg_start; - do { - struct page *page; - int n, copy; - - sg = &md->sg_data[i]; - copy = sg->length; - page = sg_page(sg); - - if (copied + copy > len) - copy = len - copied; - - n = copy_page_to_iter(page, sg->offset, copy, iter); - if (n != copy) { - md->sg_start = i; - release_sock(sk); - smap_release_sock(psock, sk); - return -EFAULT; - } - - copied += copy; - sg->offset += copy; - sg->length -= copy; - sk_mem_uncharge(sk, copy); - - if (!sg->length) { - i++; - if (i == MAX_SKB_FRAGS) - i = 0; - if (!md->skb) - put_page(page); - } - if (copied == len) - break; - } while (i != md->sg_end); - md->sg_start = i; - - if (!sg->length && md->sg_start == md->sg_end) { - list_del(&md->list); - consume_skb(md->skb); - kfree(md); - } - } - - if (!copied) { - long timeo; - int data; - int err = 0; - - timeo = sock_rcvtimeo(sk, nonblock); - data = bpf_wait_data(sk, psock, flags, timeo, &err); - - if (data) { - if (!skb_queue_empty(&sk->sk_receive_queue)) { - release_sock(sk); - smap_release_sock(psock, sk); - copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); - return copied; - } - goto bytes_ready; - } - - if (err) - copied = err; - } - - release_sock(sk); - smap_release_sock(psock, sk); - return copied; -out: - rcu_read_unlock(); - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); -} - - -static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) -{ - int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS; - struct sk_msg_buff md = {0}; - unsigned int sg_copy = 0; - struct smap_psock *psock; - int copied = 0, err = 0; - struct scatterlist *sg; - long timeo; - - /* Its possible a sock event or user removed the psock _but_ the ops - * have not been reprogrammed yet so we get here. In this case fallback - * to tcp_sendmsg. Note this only works because we _only_ ever allow - * a single ULP there is no hierarchy here. - */ - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - return tcp_sendmsg(sk, msg, size); - } - - /* Increment the psock refcnt to ensure its not released while sending a - * message. Required because sk lookup and bpf programs are used in - * separate rcu critical sections. Its OK if we lose the map entry - * but we can't lose the sock reference. - */ - if (!refcount_inc_not_zero(&psock->refcnt)) { - rcu_read_unlock(); - return tcp_sendmsg(sk, msg, size); - } - - sg = md.sg_data; - sg_init_marker(sg, MAX_SKB_FRAGS); - rcu_read_unlock(); - - lock_sock(sk); - timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); - - while (msg_data_left(msg)) { - struct sk_msg_buff *m = NULL; - bool enospc = false; - int copy; - - if (sk->sk_err) { - err = -sk->sk_err; - goto out_err; - } - - copy = msg_data_left(msg); - if (!sk_stream_memory_free(sk)) - goto wait_for_sndbuf; - - m = psock->cork_bytes ? psock->cork : &md; - m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end; - err = sk_alloc_sg(sk, copy, m->sg_data, - m->sg_start, &m->sg_end, &sg_copy, - m->sg_end - 1); - if (err) { - if (err != -ENOSPC) - goto wait_for_memory; - enospc = true; - copy = sg_copy; - } - - err = memcopy_from_iter(sk, m, &msg->msg_iter, copy); - if (err < 0) { - free_curr_sg(sk, m); - goto out_err; - } - - psock->sg_size += copy; - copied += copy; - sg_copy = 0; - - /* When bytes are being corked skip running BPF program and - * applying verdict unless there is no more buffer space. In - * the ENOSPC case simply run BPF prorgram with currently - * accumulated data. We don't have much choice at this point - * we could try extending the page frags or chaining complex - * frags but even in these cases _eventually_ we will hit an - * OOM scenario. More complex recovery schemes may be - * implemented in the future, but BPF programs must handle - * the case where apply_cork requests are not honored. The - * canonical method to verify this is to check data length. - */ - if (psock->cork_bytes) { - if (copy > psock->cork_bytes) - psock->cork_bytes = 0; - else - psock->cork_bytes -= copy; - - if (psock->cork_bytes && !enospc) - goto out_cork; - - /* All cork bytes accounted for re-run filter */ - psock->eval = __SK_NONE; - psock->cork_bytes = 0; - } - - err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags); - if (unlikely(err < 0)) - goto out_err; - continue; -wait_for_sndbuf: - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); -wait_for_memory: - err = sk_stream_wait_memory(sk, &timeo); - if (err) { - if (m && m != psock->cork) - free_start_sg(sk, m, true); - goto out_err; - } - } -out_err: - if (err < 0) - err = sk_stream_error(sk, msg->msg_flags, err); -out_cork: - release_sock(sk); - smap_release_sock(psock, sk); - return copied ? copied : err; -} - -static int bpf_tcp_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags) -{ - struct sk_msg_buff md = {0}, *m = NULL; - int err = 0, copied = 0; - struct smap_psock *psock; - struct scatterlist *sg; - bool enospc = false; - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) - goto accept; - - if (!refcount_inc_not_zero(&psock->refcnt)) - goto accept; - rcu_read_unlock(); - - lock_sock(sk); - - if (psock->cork_bytes) { - m = psock->cork; - sg = &m->sg_data[m->sg_end]; - } else { - m = &md; - sg = m->sg_data; - sg_init_marker(sg, MAX_SKB_FRAGS); - } - - /* Catch case where ring is full and sendpage is stalled. */ - if (unlikely(m->sg_end == m->sg_start && - m->sg_data[m->sg_end].length)) - goto out_err; - - psock->sg_size += size; - sg_set_page(sg, page, size, offset); - get_page(page); - m->sg_copy[m->sg_end] = true; - sk_mem_charge(sk, size); - m->sg_end++; - copied = size; - - if (m->sg_end == MAX_SKB_FRAGS) - m->sg_end = 0; - - if (m->sg_end == m->sg_start) - enospc = true; - - if (psock->cork_bytes) { - if (size > psock->cork_bytes) - psock->cork_bytes = 0; - else - psock->cork_bytes -= size; - - if (psock->cork_bytes && !enospc) - goto out_err; - - /* All cork bytes accounted for re-run filter */ - psock->eval = __SK_NONE; - psock->cork_bytes = 0; - } - - err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags); -out_err: - release_sock(sk); - smap_release_sock(psock, sk); - return copied ? copied : err; -accept: - rcu_read_unlock(); - return tcp_sendpage(sk, page, offset, size, flags); -} - -static void bpf_tcp_msg_add(struct smap_psock *psock, - struct sock *sk, - struct bpf_prog *tx_msg) -{ - struct bpf_prog *orig_tx_msg; - - orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg); - if (orig_tx_msg) - bpf_prog_put(orig_tx_msg); -} - -static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) -{ - struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); - int rc; - - if (unlikely(!prog)) - return __SK_DROP; - - skb_orphan(skb); - /* We need to ensure that BPF metadata for maps is also cleared - * when we orphan the skb so that we don't have the possibility - * to reference a stale map. - */ - TCP_SKB_CB(skb)->bpf.sk_redir = NULL; - skb->sk = psock->sock; - bpf_compute_data_end_sk_skb(skb); - preempt_disable(); - rc = (*prog->bpf_func)(skb, prog->insnsi); - preempt_enable(); - skb->sk = NULL; - - /* Moving return codes from UAPI namespace into internal namespace */ - return rc == SK_PASS ? - (TCP_SKB_CB(skb)->bpf.sk_redir ? __SK_REDIRECT : __SK_PASS) : - __SK_DROP; -} - -static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb) -{ - struct sock *sk = psock->sock; - int copied = 0, num_sg; - struct sk_msg_buff *r; - - r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC); - if (unlikely(!r)) - return -EAGAIN; - - if (!sk_rmem_schedule(sk, skb, skb->len)) { - kfree(r); - return -EAGAIN; - } - - sg_init_table(r->sg_data, MAX_SKB_FRAGS); - num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len); - if (unlikely(num_sg < 0)) { - kfree(r); - return num_sg; - } - sk_mem_charge(sk, skb->len); - copied = skb->len; - r->sg_start = 0; - r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg; - r->skb = skb; - list_add_tail(&r->list, &psock->ingress); - sk->sk_data_ready(sk); - return copied; -} - -static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) -{ - struct smap_psock *peer; - struct sock *sk; - __u32 in; - int rc; - - rc = smap_verdict_func(psock, skb); - switch (rc) { - case __SK_REDIRECT: - sk = do_sk_redirect_map(skb); - if (!sk) { - kfree_skb(skb); - break; - } - - peer = smap_psock_sk(sk); - in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS; - - if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) || - !test_bit(SMAP_TX_RUNNING, &peer->state))) { - kfree_skb(skb); - break; - } - - if (!in && sock_writeable(sk)) { - skb_set_owner_w(skb, sk); - skb_queue_tail(&peer->rxqueue, skb); - schedule_work(&peer->tx_work); - break; - } else if (in && - atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { - skb_queue_tail(&peer->rxqueue, skb); - schedule_work(&peer->tx_work); - break; - } - /* Fall through and free skb otherwise */ - case __SK_DROP: - default: - kfree_skb(skb); - } -} - -static void smap_report_sk_error(struct smap_psock *psock, int err) -{ - struct sock *sk = psock->sock; - - sk->sk_err = err; - sk->sk_error_report(sk); -} - -static void smap_read_sock_strparser(struct strparser *strp, - struct sk_buff *skb) -{ - struct smap_psock *psock; - - rcu_read_lock(); - psock = container_of(strp, struct smap_psock, strp); - smap_do_verdict(psock, skb); - rcu_read_unlock(); -} - -/* Called with lock held on socket */ -static void smap_data_ready(struct sock *sk) -{ - struct smap_psock *psock; - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (likely(psock)) { - write_lock_bh(&sk->sk_callback_lock); - strp_data_ready(&psock->strp); - write_unlock_bh(&sk->sk_callback_lock); - } - rcu_read_unlock(); -} - -static void smap_tx_work(struct work_struct *w) -{ - struct smap_psock *psock; - struct sk_buff *skb; - int rem, off, n; - - psock = container_of(w, struct smap_psock, tx_work); - - /* lock sock to avoid losing sk_socket at some point during loop */ - lock_sock(psock->sock); - if (psock->save_skb) { - skb = psock->save_skb; - rem = psock->save_rem; - off = psock->save_off; - psock->save_skb = NULL; - goto start; - } - - while ((skb = skb_dequeue(&psock->rxqueue))) { - __u32 flags; - - rem = skb->len; - off = 0; -start: - flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS; - do { - if (likely(psock->sock->sk_socket)) { - if (flags) - n = smap_do_ingress(psock, skb); - else - n = skb_send_sock_locked(psock->sock, - skb, off, rem); - } else { - n = -EINVAL; - } - - if (n <= 0) { - if (n == -EAGAIN) { - /* Retry when space is available */ - psock->save_skb = skb; - psock->save_rem = rem; - psock->save_off = off; - goto out; - } - /* Hard errors break pipe and stop xmit */ - smap_report_sk_error(psock, n ? -n : EPIPE); - clear_bit(SMAP_TX_RUNNING, &psock->state); - kfree_skb(skb); - goto out; - } - rem -= n; - off += n; - } while (rem); - - if (!flags) - kfree_skb(skb); - } -out: - release_sock(psock->sock); -} - -static void smap_write_space(struct sock *sk) -{ - struct smap_psock *psock; - void (*write_space)(struct sock *sk); - - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) - schedule_work(&psock->tx_work); - write_space = psock->save_write_space; - rcu_read_unlock(); - write_space(sk); -} - -static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) -{ - if (!psock->strp_enabled) - return; - sk->sk_data_ready = psock->save_data_ready; - sk->sk_write_space = psock->save_write_space; - psock->save_data_ready = NULL; - psock->save_write_space = NULL; - strp_stop(&psock->strp); - psock->strp_enabled = false; -} - -static void smap_destroy_psock(struct rcu_head *rcu) -{ - struct smap_psock *psock = container_of(rcu, - struct smap_psock, rcu); - - /* Now that a grace period has passed there is no longer - * any reference to this sock in the sockmap so we can - * destroy the psock, strparser, and bpf programs. But, - * because we use workqueue sync operations we can not - * do it in rcu context - */ - schedule_work(&psock->gc_work); -} - -static bool psock_is_smap_sk(struct sock *sk) -{ - return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops; -} - -static void smap_release_sock(struct smap_psock *psock, struct sock *sock) -{ - if (refcount_dec_and_test(&psock->refcnt)) { - if (psock_is_smap_sk(sock)) - bpf_tcp_release(sock); - write_lock_bh(&sock->sk_callback_lock); - smap_stop_sock(psock, sock); - write_unlock_bh(&sock->sk_callback_lock); - clear_bit(SMAP_TX_RUNNING, &psock->state); - rcu_assign_sk_user_data(sock, NULL); - call_rcu_sched(&psock->rcu, smap_destroy_psock); - } -} - -static int smap_parse_func_strparser(struct strparser *strp, - struct sk_buff *skb) -{ - struct smap_psock *psock; - struct bpf_prog *prog; - int rc; - - rcu_read_lock(); - psock = container_of(strp, struct smap_psock, strp); - prog = READ_ONCE(psock->bpf_parse); - - if (unlikely(!prog)) { - rcu_read_unlock(); - return skb->len; - } - - /* Attach socket for bpf program to use if needed we can do this - * because strparser clones the skb before handing it to a upper - * layer, meaning skb_orphan has been called. We NULL sk on the - * way out to ensure we don't trigger a BUG_ON in skb/sk operations - * later and because we are not charging the memory of this skb to - * any socket yet. - */ - skb->sk = psock->sock; - bpf_compute_data_end_sk_skb(skb); - rc = (*prog->bpf_func)(skb, prog->insnsi); - skb->sk = NULL; - rcu_read_unlock(); - return rc; -} - -static int smap_read_sock_done(struct strparser *strp, int err) -{ - return err; -} - -static int smap_init_sock(struct smap_psock *psock, - struct sock *sk) -{ - static const struct strp_callbacks cb = { - .rcv_msg = smap_read_sock_strparser, - .parse_msg = smap_parse_func_strparser, - .read_sock_done = smap_read_sock_done, - }; - - return strp_init(&psock->strp, sk, &cb); -} - -static void smap_init_progs(struct smap_psock *psock, - struct bpf_prog *verdict, - struct bpf_prog *parse) -{ - struct bpf_prog *orig_parse, *orig_verdict; - - orig_parse = xchg(&psock->bpf_parse, parse); - orig_verdict = xchg(&psock->bpf_verdict, verdict); - - if (orig_verdict) - bpf_prog_put(orig_verdict); - if (orig_parse) - bpf_prog_put(orig_parse); -} - -static void smap_start_sock(struct smap_psock *psock, struct sock *sk) -{ - if (sk->sk_data_ready == smap_data_ready) - return; - psock->save_data_ready = sk->sk_data_ready; - psock->save_write_space = sk->sk_write_space; - sk->sk_data_ready = smap_data_ready; - sk->sk_write_space = smap_write_space; - psock->strp_enabled = true; -} - -static void sock_map_remove_complete(struct bpf_stab *stab) -{ - bpf_map_area_free(stab->sock_map); - kfree(stab); -} - -static void smap_gc_work(struct work_struct *w) -{ - struct smap_psock_map_entry *e, *tmp; - struct sk_msg_buff *md, *mtmp; - struct smap_psock *psock; - - psock = container_of(w, struct smap_psock, gc_work); - - /* no callback lock needed because we already detached sockmap ops */ - if (psock->strp_enabled) - strp_done(&psock->strp); - - cancel_work_sync(&psock->tx_work); - __skb_queue_purge(&psock->rxqueue); - - /* At this point all strparser and xmit work must be complete */ - if (psock->bpf_parse) - bpf_prog_put(psock->bpf_parse); - if (psock->bpf_verdict) - bpf_prog_put(psock->bpf_verdict); - if (psock->bpf_tx_msg) - bpf_prog_put(psock->bpf_tx_msg); - - if (psock->cork) { - free_start_sg(psock->sock, psock->cork, true); - kfree(psock->cork); - } - - list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { - list_del(&md->list); - free_start_sg(psock->sock, md, true); - kfree(md); - } - - list_for_each_entry_safe(e, tmp, &psock->maps, list) { - list_del(&e->list); - kfree(e); - } - - if (psock->sk_redir) - sock_put(psock->sk_redir); - - sock_put(psock->sock); - kfree(psock); -} - -static struct smap_psock *smap_init_psock(struct sock *sock, int node) -{ - struct smap_psock *psock; - - psock = kzalloc_node(sizeof(struct smap_psock), - GFP_ATOMIC | __GFP_NOWARN, - node); - if (!psock) - return ERR_PTR(-ENOMEM); - - psock->eval = __SK_NONE; - psock->sock = sock; - skb_queue_head_init(&psock->rxqueue); - INIT_WORK(&psock->tx_work, smap_tx_work); - INIT_WORK(&psock->gc_work, smap_gc_work); - INIT_LIST_HEAD(&psock->maps); - INIT_LIST_HEAD(&psock->ingress); - refcount_set(&psock->refcnt, 1); - spin_lock_init(&psock->maps_lock); - - rcu_assign_sk_user_data(sock, psock); - sock_hold(sock); - return psock; -} - -static struct bpf_map *sock_map_alloc(union bpf_attr *attr) -{ - struct bpf_stab *stab; - u64 cost; - int err; - - if (!capable(CAP_NET_ADMIN)) - return ERR_PTR(-EPERM); - - /* check sanity of attributes */ - if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) - return ERR_PTR(-EINVAL); - - stab = kzalloc(sizeof(*stab), GFP_USER); - if (!stab) - return ERR_PTR(-ENOMEM); - - bpf_map_init_from_attr(&stab->map, attr); - raw_spin_lock_init(&stab->lock); - - /* make sure page count doesn't overflow */ - cost = (u64) stab->map.max_entries * sizeof(struct sock *); - err = -EINVAL; - if (cost >= U32_MAX - PAGE_SIZE) - goto free_stab; - - stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - - /* if map size is larger than memlock limit, reject it early */ - err = bpf_map_precharge_memlock(stab->map.pages); - if (err) - goto free_stab; - - err = -ENOMEM; - stab->sock_map = bpf_map_area_alloc(stab->map.max_entries * - sizeof(struct sock *), - stab->map.numa_node); - if (!stab->sock_map) - goto free_stab; - - return &stab->map; -free_stab: - kfree(stab); - return ERR_PTR(err); -} - -static void smap_list_map_remove(struct smap_psock *psock, - struct sock **entry) -{ - struct smap_psock_map_entry *e, *tmp; - - spin_lock_bh(&psock->maps_lock); - list_for_each_entry_safe(e, tmp, &psock->maps, list) { - if (e->entry == entry) { - list_del(&e->list); - kfree(e); - } - } - spin_unlock_bh(&psock->maps_lock); -} - -static void smap_list_hash_remove(struct smap_psock *psock, - struct htab_elem *hash_link) -{ - struct smap_psock_map_entry *e, *tmp; - - spin_lock_bh(&psock->maps_lock); - list_for_each_entry_safe(e, tmp, &psock->maps, list) { - struct htab_elem *c = rcu_dereference(e->hash_link); - - if (c == hash_link) { - list_del(&e->list); - kfree(e); - } - } - spin_unlock_bh(&psock->maps_lock); -} - -static void sock_map_free(struct bpf_map *map) -{ - struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - int i; - - synchronize_rcu(); - - /* At this point no update, lookup or delete operations can happen. - * However, be aware we can still get a socket state event updates, - * and data ready callabacks that reference the psock from sk_user_data - * Also psock worker threads are still in-flight. So smap_release_sock - * will only free the psock after cancel_sync on the worker threads - * and a grace period expire to ensure psock is really safe to remove. - */ - rcu_read_lock(); - raw_spin_lock_bh(&stab->lock); - for (i = 0; i < stab->map.max_entries; i++) { - struct smap_psock *psock; - struct sock *sock; - - sock = stab->sock_map[i]; - if (!sock) - continue; - stab->sock_map[i] = NULL; - psock = smap_psock_sk(sock); - /* This check handles a racing sock event that can get the - * sk_callback_lock before this case but after xchg happens - * causing the refcnt to hit zero and sock user data (psock) - * to be null and queued for garbage collection. - */ - if (likely(psock)) { - smap_list_map_remove(psock, &stab->sock_map[i]); - smap_release_sock(psock, sock); - } - } - raw_spin_unlock_bh(&stab->lock); - rcu_read_unlock(); - - sock_map_remove_complete(stab); -} - -static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key) -{ - struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - u32 i = key ? *(u32 *)key : U32_MAX; - u32 *next = (u32 *)next_key; - - if (i >= stab->map.max_entries) { - *next = 0; - return 0; - } - - if (i == stab->map.max_entries - 1) - return -ENOENT; - - *next = i + 1; - return 0; -} - -struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) -{ - struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - - if (key >= map->max_entries) - return NULL; - - return READ_ONCE(stab->sock_map[key]); -} - -static int sock_map_delete_elem(struct bpf_map *map, void *key) -{ - struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - struct smap_psock *psock; - int k = *(u32 *)key; - struct sock *sock; - - if (k >= map->max_entries) - return -EINVAL; - - raw_spin_lock_bh(&stab->lock); - sock = stab->sock_map[k]; - stab->sock_map[k] = NULL; - raw_spin_unlock_bh(&stab->lock); - if (!sock) - return -EINVAL; - - psock = smap_psock_sk(sock); - if (!psock) - return 0; - if (psock->bpf_parse) { - write_lock_bh(&sock->sk_callback_lock); - smap_stop_sock(psock, sock); - write_unlock_bh(&sock->sk_callback_lock); - } - smap_list_map_remove(psock, &stab->sock_map[k]); - smap_release_sock(psock, sock); - return 0; -} - -/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are - * done inside rcu critical sections. This ensures on updates that the psock - * will not be released via smap_release_sock() until concurrent updates/deletes - * complete. All operations operate on sock_map using cmpxchg and xchg - * operations to ensure we do not get stale references. Any reads into the - * map must be done with READ_ONCE() because of this. - * - * A psock is destroyed via call_rcu and after any worker threads are cancelled - * and syncd so we are certain all references from the update/lookup/delete - * operations as well as references in the data path are no longer in use. - * - * Psocks may exist in multiple maps, but only a single set of parse/verdict - * programs may be inherited from the maps it belongs to. A reference count - * is kept with the total number of references to the psock from all maps. The - * psock will not be released until this reaches zero. The psock and sock - * user data data use the sk_callback_lock to protect critical data structures - * from concurrent access. This allows us to avoid two updates from modifying - * the user data in sock and the lock is required anyways for modifying - * callbacks, we simply increase its scope slightly. - * - * Rules to follow, - * - psock must always be read inside RCU critical section - * - sk_user_data must only be modified inside sk_callback_lock and read - * inside RCU critical section. - * - psock->maps list must only be read & modified inside sk_callback_lock - * - sock_map must use READ_ONCE and (cmp)xchg operations - * - BPF verdict/parse programs must use READ_ONCE and xchg operations - */ - -static int __sock_map_ctx_update_elem(struct bpf_map *map, - struct bpf_sock_progs *progs, - struct sock *sock, - void *key) -{ - struct bpf_prog *verdict, *parse, *tx_msg; - struct smap_psock *psock; - bool new = false; - int err = 0; - - /* 1. If sock map has BPF programs those will be inherited by the - * sock being added. If the sock is already attached to BPF programs - * this results in an error. - */ - verdict = READ_ONCE(progs->bpf_verdict); - parse = READ_ONCE(progs->bpf_parse); - tx_msg = READ_ONCE(progs->bpf_tx_msg); - - if (parse && verdict) { - /* bpf prog refcnt may be zero if a concurrent attach operation - * removes the program after the above READ_ONCE() but before - * we increment the refcnt. If this is the case abort with an - * error. - */ - verdict = bpf_prog_inc_not_zero(verdict); - if (IS_ERR(verdict)) - return PTR_ERR(verdict); - - parse = bpf_prog_inc_not_zero(parse); - if (IS_ERR(parse)) { - bpf_prog_put(verdict); - return PTR_ERR(parse); - } - } - - if (tx_msg) { - tx_msg = bpf_prog_inc_not_zero(tx_msg); - if (IS_ERR(tx_msg)) { - if (parse && verdict) { - bpf_prog_put(parse); - bpf_prog_put(verdict); - } - return PTR_ERR(tx_msg); - } - } - - psock = smap_psock_sk(sock); - - /* 2. Do not allow inheriting programs if psock exists and has - * already inherited programs. This would create confusion on - * which parser/verdict program is running. If no psock exists - * create one. Inside sk_callback_lock to ensure concurrent create - * doesn't update user data. - */ - if (psock) { - if (!psock_is_smap_sk(sock)) { - err = -EBUSY; - goto out_progs; - } - if (READ_ONCE(psock->bpf_parse) && parse) { - err = -EBUSY; - goto out_progs; - } - if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) { - err = -EBUSY; - goto out_progs; - } - if (!refcount_inc_not_zero(&psock->refcnt)) { - err = -EAGAIN; - goto out_progs; - } - } else { - psock = smap_init_psock(sock, map->numa_node); - if (IS_ERR(psock)) { - err = PTR_ERR(psock); - goto out_progs; - } - - set_bit(SMAP_TX_RUNNING, &psock->state); - new = true; - } - - /* 3. At this point we have a reference to a valid psock that is - * running. Attach any BPF programs needed. - */ - if (tx_msg) - bpf_tcp_msg_add(psock, sock, tx_msg); - if (new) { - err = bpf_tcp_init(sock); - if (err) - goto out_free; - } - - if (parse && verdict && !psock->strp_enabled) { - err = smap_init_sock(psock, sock); - if (err) - goto out_free; - smap_init_progs(psock, verdict, parse); - write_lock_bh(&sock->sk_callback_lock); - smap_start_sock(psock, sock); - write_unlock_bh(&sock->sk_callback_lock); - } - - return err; -out_free: - smap_release_sock(psock, sock); -out_progs: - if (parse && verdict) { - bpf_prog_put(parse); - bpf_prog_put(verdict); - } - if (tx_msg) - bpf_prog_put(tx_msg); - return err; -} - -static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, - struct bpf_map *map, - void *key, u64 flags) -{ - struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - struct bpf_sock_progs *progs = &stab->progs; - struct sock *osock, *sock = skops->sk; - struct smap_psock_map_entry *e; - struct smap_psock *psock; - u32 i = *(u32 *)key; - int err; - - if (unlikely(flags > BPF_EXIST)) - return -EINVAL; - if (unlikely(i >= stab->map.max_entries)) - return -E2BIG; - - e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); - if (!e) - return -ENOMEM; - - err = __sock_map_ctx_update_elem(map, progs, sock, key); - if (err) - goto out; - - /* psock guaranteed to be present. */ - psock = smap_psock_sk(sock); - raw_spin_lock_bh(&stab->lock); - osock = stab->sock_map[i]; - if (osock && flags == BPF_NOEXIST) { - err = -EEXIST; - goto out_unlock; - } - if (!osock && flags == BPF_EXIST) { - err = -ENOENT; - goto out_unlock; - } - - e->entry = &stab->sock_map[i]; - e->map = map; - spin_lock_bh(&psock->maps_lock); - list_add_tail(&e->list, &psock->maps); - spin_unlock_bh(&psock->maps_lock); - - stab->sock_map[i] = sock; - if (osock) { - psock = smap_psock_sk(osock); - smap_list_map_remove(psock, &stab->sock_map[i]); - smap_release_sock(psock, osock); - } - raw_spin_unlock_bh(&stab->lock); - return 0; -out_unlock: - smap_release_sock(psock, sock); - raw_spin_unlock_bh(&stab->lock); -out: - kfree(e); - return err; -} - -int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type) -{ - struct bpf_sock_progs *progs; - struct bpf_prog *orig; - - if (map->map_type == BPF_MAP_TYPE_SOCKMAP) { - struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - - progs = &stab->progs; - } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - - progs = &htab->progs; - } else { - return -EINVAL; - } - - switch (type) { - case BPF_SK_MSG_VERDICT: - orig = xchg(&progs->bpf_tx_msg, prog); - break; - case BPF_SK_SKB_STREAM_PARSER: - orig = xchg(&progs->bpf_parse, prog); - break; - case BPF_SK_SKB_STREAM_VERDICT: - orig = xchg(&progs->bpf_verdict, prog); - break; - default: - return -EOPNOTSUPP; - } - - if (orig) - bpf_prog_put(orig); - - return 0; -} - -int sockmap_get_from_fd(const union bpf_attr *attr, int type, - struct bpf_prog *prog) -{ - int ufd = attr->target_fd; - struct bpf_map *map; - struct fd f; - int err; - - f = fdget(ufd); - map = __bpf_map_get(f); - if (IS_ERR(map)) - return PTR_ERR(map); - - err = sock_map_prog(map, prog, attr->attach_type); - fdput(f); - return err; -} - -static void *sock_map_lookup(struct bpf_map *map, void *key) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static int sock_map_update_elem(struct bpf_map *map, - void *key, void *value, u64 flags) -{ - struct bpf_sock_ops_kern skops; - u32 fd = *(u32 *)value; - struct socket *socket; - int err; - - socket = sockfd_lookup(fd, &err); - if (!socket) - return err; - - skops.sk = socket->sk; - if (!skops.sk) { - fput(socket->file); - return -EINVAL; - } - - /* ULPs are currently supported only for TCP sockets in ESTABLISHED - * state. - */ - if (skops.sk->sk_type != SOCK_STREAM || - skops.sk->sk_protocol != IPPROTO_TCP || - skops.sk->sk_state != TCP_ESTABLISHED) { - fput(socket->file); - return -EOPNOTSUPP; - } - - lock_sock(skops.sk); - preempt_disable(); - rcu_read_lock(); - err = sock_map_ctx_update_elem(&skops, map, key, flags); - rcu_read_unlock(); - preempt_enable(); - release_sock(skops.sk); - fput(socket->file); - return err; -} - -static void sock_map_release(struct bpf_map *map) -{ - struct bpf_sock_progs *progs; - struct bpf_prog *orig; - - if (map->map_type == BPF_MAP_TYPE_SOCKMAP) { - struct bpf_stab *stab = container_of(map, struct bpf_stab, map); - - progs = &stab->progs; - } else { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - - progs = &htab->progs; - } - - orig = xchg(&progs->bpf_parse, NULL); - if (orig) - bpf_prog_put(orig); - orig = xchg(&progs->bpf_verdict, NULL); - if (orig) - bpf_prog_put(orig); - - orig = xchg(&progs->bpf_tx_msg, NULL); - if (orig) - bpf_prog_put(orig); -} - -static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) -{ - struct bpf_htab *htab; - int i, err; - u64 cost; - - if (!capable(CAP_NET_ADMIN)) - return ERR_PTR(-EPERM); - - /* check sanity of attributes */ - if (attr->max_entries == 0 || - attr->key_size == 0 || - attr->value_size != 4 || - attr->map_flags & ~SOCK_CREATE_FLAG_MASK) - return ERR_PTR(-EINVAL); - - if (attr->key_size > MAX_BPF_STACK) - /* eBPF programs initialize keys on stack, so they cannot be - * larger than max stack size - */ - return ERR_PTR(-E2BIG); - - htab = kzalloc(sizeof(*htab), GFP_USER); - if (!htab) - return ERR_PTR(-ENOMEM); - - bpf_map_init_from_attr(&htab->map, attr); - - htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); - htab->elem_size = sizeof(struct htab_elem) + - round_up(htab->map.key_size, 8); - err = -EINVAL; - if (htab->n_buckets == 0 || - htab->n_buckets > U32_MAX / sizeof(struct bucket)) - goto free_htab; - - cost = (u64) htab->n_buckets * sizeof(struct bucket) + - (u64) htab->elem_size * htab->map.max_entries; - - if (cost >= U32_MAX - PAGE_SIZE) - goto free_htab; - - htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; - err = bpf_map_precharge_memlock(htab->map.pages); - if (err) - goto free_htab; - - err = -ENOMEM; - htab->buckets = bpf_map_area_alloc( - htab->n_buckets * sizeof(struct bucket), - htab->map.numa_node); - if (!htab->buckets) - goto free_htab; - - for (i = 0; i < htab->n_buckets; i++) { - INIT_HLIST_HEAD(&htab->buckets[i].head); - raw_spin_lock_init(&htab->buckets[i].lock); - } - - return &htab->map; -free_htab: - kfree(htab); - return ERR_PTR(err); -} - -static void __bpf_htab_free(struct rcu_head *rcu) -{ - struct bpf_htab *htab; - - htab = container_of(rcu, struct bpf_htab, rcu); - bpf_map_area_free(htab->buckets); - kfree(htab); -} - -static void sock_hash_free(struct bpf_map *map) -{ - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - int i; - - synchronize_rcu(); - - /* At this point no update, lookup or delete operations can happen. - * However, be aware we can still get a socket state event updates, - * and data ready callabacks that reference the psock from sk_user_data - * Also psock worker threads are still in-flight. So smap_release_sock - * will only free the psock after cancel_sync on the worker threads - * and a grace period expire to ensure psock is really safe to remove. - */ - rcu_read_lock(); - for (i = 0; i < htab->n_buckets; i++) { - struct bucket *b = __select_bucket(htab, i); - struct hlist_head *head; - struct hlist_node *n; - struct htab_elem *l; - - raw_spin_lock_bh(&b->lock); - head = &b->head; - hlist_for_each_entry_safe(l, n, head, hash_node) { - struct sock *sock = l->sk; - struct smap_psock *psock; - - hlist_del_rcu(&l->hash_node); - psock = smap_psock_sk(sock); - /* This check handles a racing sock event that can get - * the sk_callback_lock before this case but after xchg - * causing the refcnt to hit zero and sock user data - * (psock) to be null and queued for garbage collection. - */ - if (likely(psock)) { - smap_list_hash_remove(psock, l); - smap_release_sock(psock, sock); - } - free_htab_elem(htab, l); - } - raw_spin_unlock_bh(&b->lock); - } - rcu_read_unlock(); - call_rcu(&htab->rcu, __bpf_htab_free); -} - -static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, - void *key, u32 key_size, u32 hash, - struct sock *sk, - struct htab_elem *old_elem) -{ - struct htab_elem *l_new; - - if (atomic_inc_return(&htab->count) > htab->map.max_entries) { - if (!old_elem) { - atomic_dec(&htab->count); - return ERR_PTR(-E2BIG); - } - } - l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, - htab->map.numa_node); - if (!l_new) { - atomic_dec(&htab->count); - return ERR_PTR(-ENOMEM); - } - - memcpy(l_new->key, key, key_size); - l_new->sk = sk; - l_new->hash = hash; - return l_new; -} - -static inline u32 htab_map_hash(const void *key, u32 key_len) -{ - return jhash(key, key_len, 0); -} - -static int sock_hash_get_next_key(struct bpf_map *map, - void *key, void *next_key) -{ - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct htab_elem *l, *next_l; - struct hlist_head *h; - u32 hash, key_size; - int i = 0; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - key_size = map->key_size; - if (!key) - goto find_first_elem; - hash = htab_map_hash(key, key_size); - h = select_bucket(htab, hash); - - l = lookup_elem_raw(h, hash, key, key_size); - if (!l) - goto find_first_elem; - next_l = hlist_entry_safe( - rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), - struct htab_elem, hash_node); - if (next_l) { - memcpy(next_key, next_l->key, key_size); - return 0; - } - - /* no more elements in this hash list, go to the next bucket */ - i = hash & (htab->n_buckets - 1); - i++; - -find_first_elem: - /* iterate over buckets */ - for (; i < htab->n_buckets; i++) { - h = select_bucket(htab, i); - - /* pick first element in the bucket */ - next_l = hlist_entry_safe( - rcu_dereference_raw(hlist_first_rcu(h)), - struct htab_elem, hash_node); - if (next_l) { - /* if it's not empty, just return it */ - memcpy(next_key, next_l->key, key_size); - return 0; - } - } - - /* iterated over all buckets and all elements */ - return -ENOENT; -} - -static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops, - struct bpf_map *map, - void *key, u64 map_flags) -{ - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct bpf_sock_progs *progs = &htab->progs; - struct htab_elem *l_new = NULL, *l_old; - struct smap_psock_map_entry *e = NULL; - struct hlist_head *head; - struct smap_psock *psock; - u32 key_size, hash; - struct sock *sock; - struct bucket *b; - int err; - - sock = skops->sk; - - if (sock->sk_type != SOCK_STREAM || - sock->sk_protocol != IPPROTO_TCP) - return -EOPNOTSUPP; - - if (unlikely(map_flags > BPF_EXIST)) - return -EINVAL; - - e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN); - if (!e) - return -ENOMEM; - - WARN_ON_ONCE(!rcu_read_lock_held()); - key_size = map->key_size; - hash = htab_map_hash(key, key_size); - b = __select_bucket(htab, hash); - head = &b->head; - - err = __sock_map_ctx_update_elem(map, progs, sock, key); - if (err) - goto err; - - /* psock is valid here because otherwise above *ctx_update_elem would - * have thrown an error. It is safe to skip error check. - */ - psock = smap_psock_sk(sock); - raw_spin_lock_bh(&b->lock); - l_old = lookup_elem_raw(head, hash, key, key_size); - if (l_old && map_flags == BPF_NOEXIST) { - err = -EEXIST; - goto bucket_err; - } - if (!l_old && map_flags == BPF_EXIST) { - err = -ENOENT; - goto bucket_err; - } - - l_new = alloc_sock_hash_elem(htab, key, key_size, hash, sock, l_old); - if (IS_ERR(l_new)) { - err = PTR_ERR(l_new); - goto bucket_err; - } - - rcu_assign_pointer(e->hash_link, l_new); - e->map = map; - spin_lock_bh(&psock->maps_lock); - list_add_tail(&e->list, &psock->maps); - spin_unlock_bh(&psock->maps_lock); - - /* add new element to the head of the list, so that - * concurrent search will find it before old elem - */ - hlist_add_head_rcu(&l_new->hash_node, head); - if (l_old) { - psock = smap_psock_sk(l_old->sk); - - hlist_del_rcu(&l_old->hash_node); - smap_list_hash_remove(psock, l_old); - smap_release_sock(psock, l_old->sk); - free_htab_elem(htab, l_old); - } - raw_spin_unlock_bh(&b->lock); - return 0; -bucket_err: - smap_release_sock(psock, sock); - raw_spin_unlock_bh(&b->lock); -err: - kfree(e); - return err; -} - -static int sock_hash_update_elem(struct bpf_map *map, - void *key, void *value, u64 flags) -{ - struct bpf_sock_ops_kern skops; - u32 fd = *(u32 *)value; - struct socket *socket; - int err; - - socket = sockfd_lookup(fd, &err); - if (!socket) - return err; - - skops.sk = socket->sk; - if (!skops.sk) { - fput(socket->file); - return -EINVAL; - } - - /* ULPs are currently supported only for TCP sockets in ESTABLISHED - * state. - */ - if (skops.sk->sk_type != SOCK_STREAM || - skops.sk->sk_protocol != IPPROTO_TCP || - skops.sk->sk_state != TCP_ESTABLISHED) { - fput(socket->file); - return -EOPNOTSUPP; - } - - lock_sock(skops.sk); - preempt_disable(); - rcu_read_lock(); - err = sock_hash_ctx_update_elem(&skops, map, key, flags); - rcu_read_unlock(); - preempt_enable(); - release_sock(skops.sk); - fput(socket->file); - return err; -} - -static int sock_hash_delete_elem(struct bpf_map *map, void *key) -{ - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct hlist_head *head; - struct bucket *b; - struct htab_elem *l; - u32 hash, key_size; - int ret = -ENOENT; - - key_size = map->key_size; - hash = htab_map_hash(key, key_size); - b = __select_bucket(htab, hash); - head = &b->head; - - raw_spin_lock_bh(&b->lock); - l = lookup_elem_raw(head, hash, key, key_size); - if (l) { - struct sock *sock = l->sk; - struct smap_psock *psock; - - hlist_del_rcu(&l->hash_node); - psock = smap_psock_sk(sock); - /* This check handles a racing sock event that can get the - * sk_callback_lock before this case but after xchg happens - * causing the refcnt to hit zero and sock user data (psock) - * to be null and queued for garbage collection. - */ - if (likely(psock)) { - smap_list_hash_remove(psock, l); - smap_release_sock(psock, sock); - } - free_htab_elem(htab, l); - ret = 0; - } - raw_spin_unlock_bh(&b->lock); - return ret; -} - -struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) -{ - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct hlist_head *head; - struct htab_elem *l; - u32 key_size, hash; - struct bucket *b; - struct sock *sk; - - key_size = map->key_size; - hash = htab_map_hash(key, key_size); - b = __select_bucket(htab, hash); - head = &b->head; - - l = lookup_elem_raw(head, hash, key, key_size); - sk = l ? l->sk : NULL; - return sk; -} - -const struct bpf_map_ops sock_map_ops = { - .map_alloc = sock_map_alloc, - .map_free = sock_map_free, - .map_lookup_elem = sock_map_lookup, - .map_get_next_key = sock_map_get_next_key, - .map_update_elem = sock_map_update_elem, - .map_delete_elem = sock_map_delete_elem, - .map_release_uref = sock_map_release, - .map_check_btf = map_check_no_btf, -}; - -const struct bpf_map_ops sock_hash_ops = { - .map_alloc = sock_hash_alloc, - .map_free = sock_hash_free, - .map_lookup_elem = sock_map_lookup, - .map_get_next_key = sock_hash_get_next_key, - .map_update_elem = sock_hash_update_elem, - .map_delete_elem = sock_hash_delete_elem, - .map_release_uref = sock_map_release, - .map_check_btf = map_check_no_btf, -}; - -static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops) -{ - return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || - ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; -} -BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, - struct bpf_map *, map, void *, key, u64, flags) -{ - WARN_ON_ONCE(!rcu_read_lock_held()); - - /* ULPs are currently supported only for TCP sockets in ESTABLISHED - * state. This checks that the sock ops triggering the update is - * one indicating we are (or will be soon) in an ESTABLISHED state. - */ - if (!bpf_is_valid_sock_op(bpf_sock)) - return -EOPNOTSUPP; - return sock_map_ctx_update_elem(bpf_sock, map, key, flags); -} - -const struct bpf_func_proto bpf_sock_map_update_proto = { - .func = bpf_sock_map_update, - .gpl_only = false, - .pkt_access = true, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_PTR_TO_MAP_KEY, - .arg4_type = ARG_ANYTHING, -}; - -BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock, - struct bpf_map *, map, void *, key, u64, flags) -{ - WARN_ON_ONCE(!rcu_read_lock_held()); - - if (!bpf_is_valid_sock_op(bpf_sock)) - return -EOPNOTSUPP; - return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); -} - -const struct bpf_func_proto bpf_sock_hash_update_proto = { - .func = bpf_sock_hash_update, - .gpl_only = false, - .pkt_access = true, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_PTR_TO_MAP_KEY, - .arg4_type = ARG_ANYTHING, -}; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 53968f82b919..f4ecd6ed2252 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1664,7 +1664,7 @@ static int bpf_prog_attach(const union bpf_attr *attr) switch (ptype) { case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: - ret = sockmap_get_from_fd(attr, ptype, prog); + ret = sock_map_get_from_fd(attr, prog); break; case BPF_PROG_TYPE_LIRC_MODE2: ret = lirc_prog_attach(attr, prog); @@ -1718,10 +1718,10 @@ static int bpf_prog_detach(const union bpf_attr *attr) ptype = BPF_PROG_TYPE_CGROUP_DEVICE; break; case BPF_SK_MSG_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL); + return sock_map_get_from_fd(attr, NULL); case BPF_SK_SKB_STREAM_PARSER: case BPF_SK_SKB_STREAM_VERDICT: - return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL); + return sock_map_get_from_fd(attr, NULL); case BPF_LIRC_MODE2: return lirc_prog_detach(attr); case BPF_FLOW_DISSECTOR: diff --git a/net/Kconfig b/net/Kconfig index 228dfa382eec..f235edb593ba 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -300,8 +300,11 @@ config BPF_JIT config BPF_STREAM_PARSER bool "enable BPF STREAM_PARSER" + depends on INET depends on BPF_SYSCALL + depends on CGROUP_BPF select STREAM_PARSER + select NET_SOCK_MSG ---help--- Enabling this allows a stream parser to be used with BPF_MAP_TYPE_SOCKMAP. @@ -413,6 +416,14 @@ config GRO_CELLS config SOCK_VALIDATE_XMIT bool +config NET_SOCK_MSG + bool + default n + help + The NET_SOCK_MSG provides a framework for plain sockets (e.g. TCP) or + ULPs (upper layer modules, e.g. TLS) to process L7 application data + with the help of BPF programs. + config NET_DEVLINK tristate "Network physical/parent device Netlink interface" help diff --git a/net/core/Makefile b/net/core/Makefile index 80175e6a2eb8..fccd31e0e7f7 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -16,6 +16,7 @@ obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ obj-y += net-sysfs.o obj-$(CONFIG_PAGE_POOL) += page_pool.o obj-$(CONFIG_PROC_FS) += net-procfs.o +obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o obj-$(CONFIG_NET_PKTGEN) += pktgen.o obj-$(CONFIG_NETPOLL) += netpoll.o obj-$(CONFIG_FIB_RULES) += fib_rules.o @@ -27,6 +28,7 @@ obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o obj-$(CONFIG_LWTUNNEL) += lwtunnel.o obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o +obj-$(CONFIG_BPF_STREAM_PARSER) += sock_map.o obj-$(CONFIG_DST_CACHE) += dst_cache.o obj-$(CONFIG_HWBM) += hwbm.o obj-$(CONFIG_NET_DEVLINK) += devlink.o diff --git a/net/core/filter.c b/net/core/filter.c index b844761b5d4c..0f5260b04bfe 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -2142,123 +2143,7 @@ static const struct bpf_func_proto bpf_redirect_proto = { .arg2_type = ARG_ANYTHING, }; -BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, - struct bpf_map *, map, void *, key, u64, flags) -{ - struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); - - /* If user passes invalid input drop the packet. */ - if (unlikely(flags & ~(BPF_F_INGRESS))) - return SK_DROP; - - tcb->bpf.flags = flags; - tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key); - if (!tcb->bpf.sk_redir) - return SK_DROP; - - return SK_PASS; -} - -static const struct bpf_func_proto bpf_sk_redirect_hash_proto = { - .func = bpf_sk_redirect_hash, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_PTR_TO_MAP_KEY, - .arg4_type = ARG_ANYTHING, -}; - -BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, - struct bpf_map *, map, u32, key, u64, flags) -{ - struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); - - /* If user passes invalid input drop the packet. */ - if (unlikely(flags & ~(BPF_F_INGRESS))) - return SK_DROP; - - tcb->bpf.flags = flags; - tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key); - if (!tcb->bpf.sk_redir) - return SK_DROP; - - return SK_PASS; -} - -struct sock *do_sk_redirect_map(struct sk_buff *skb) -{ - struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); - - return tcb->bpf.sk_redir; -} - -static const struct bpf_func_proto bpf_sk_redirect_map_proto = { - .func = bpf_sk_redirect_map, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_ANYTHING, -}; - -BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg_buff *, msg, - struct bpf_map *, map, void *, key, u64, flags) -{ - /* If user passes invalid input drop the packet. */ - if (unlikely(flags & ~(BPF_F_INGRESS))) - return SK_DROP; - - msg->flags = flags; - msg->sk_redir = __sock_hash_lookup_elem(map, key); - if (!msg->sk_redir) - return SK_DROP; - - return SK_PASS; -} - -static const struct bpf_func_proto bpf_msg_redirect_hash_proto = { - .func = bpf_msg_redirect_hash, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_PTR_TO_MAP_KEY, - .arg4_type = ARG_ANYTHING, -}; - -BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg, - struct bpf_map *, map, u32, key, u64, flags) -{ - /* If user passes invalid input drop the packet. */ - if (unlikely(flags & ~(BPF_F_INGRESS))) - return SK_DROP; - - msg->flags = flags; - msg->sk_redir = __sock_map_lookup_elem(map, key); - if (!msg->sk_redir) - return SK_DROP; - - return SK_PASS; -} - -struct sock *do_msg_redirect_map(struct sk_msg_buff *msg) -{ - return msg->sk_redir; -} - -static const struct bpf_func_proto bpf_msg_redirect_map_proto = { - .func = bpf_msg_redirect_map, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_ANYTHING, -}; - -BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes) +BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) { msg->apply_bytes = bytes; return 0; @@ -2272,7 +2157,7 @@ static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { .arg2_type = ARG_ANYTHING, }; -BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes) +BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) { msg->cork_bytes = bytes; return 0; @@ -2286,45 +2171,37 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { .arg2_type = ARG_ANYTHING, }; -#define sk_msg_iter_var(var) \ - do { \ - var++; \ - if (var == MAX_SKB_FRAGS) \ - var = 0; \ - } while (0) - -BPF_CALL_4(bpf_msg_pull_data, - struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) +BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, + u32, end, u64, flags) { - unsigned int len = 0, offset = 0, copy = 0, poffset = 0; - int bytes = end - start, bytes_sg_total; - struct scatterlist *sg = msg->sg_data; - int first_sg, last_sg, i, shift; - unsigned char *p, *to, *from; + u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; + u32 first_sge, last_sge, i, shift, bytes_sg_total; + struct scatterlist *sge; + u8 *raw, *to, *from; struct page *page; if (unlikely(flags || end <= start)) return -EINVAL; /* First find the starting scatterlist element */ - i = msg->sg_start; + i = msg->sg.start; do { - len = sg[i].length; + len = sk_msg_elem(msg, i)->length; if (start < offset + len) break; offset += len; - sk_msg_iter_var(i); - } while (i != msg->sg_end); + sk_msg_iter_var_next(i); + } while (i != msg->sg.end); if (unlikely(start >= offset + len)) return -EINVAL; - first_sg = i; + first_sge = i; /* The start may point into the sg element so we need to also * account for the headroom. */ bytes_sg_total = start - offset + bytes; - if (!msg->sg_copy[i] && bytes_sg_total <= len) + if (!msg->sg.copy[i] && bytes_sg_total <= len) goto out; /* At this point we need to linearize multiple scatterlist @@ -2338,12 +2215,12 @@ BPF_CALL_4(bpf_msg_pull_data, * will copy the entire sg entry. */ do { - copy += sg[i].length; - sk_msg_iter_var(i); + copy += sk_msg_elem(msg, i)->length; + sk_msg_iter_var_next(i); if (bytes_sg_total <= copy) break; - } while (i != msg->sg_end); - last_sg = i; + } while (i != msg->sg.end); + last_sge = i; if (unlikely(bytes_sg_total > copy)) return -EINVAL; @@ -2352,63 +2229,61 @@ BPF_CALL_4(bpf_msg_pull_data, get_order(copy)); if (unlikely(!page)) return -ENOMEM; - p = page_address(page); - i = first_sg; + raw = page_address(page); + i = first_sge; do { - from = sg_virt(&sg[i]); - len = sg[i].length; - to = p + poffset; + sge = sk_msg_elem(msg, i); + from = sg_virt(sge); + len = sge->length; + to = raw + poffset; memcpy(to, from, len); poffset += len; - sg[i].length = 0; - put_page(sg_page(&sg[i])); + sge->length = 0; + put_page(sg_page(sge)); - sk_msg_iter_var(i); - } while (i != last_sg); + sk_msg_iter_var_next(i); + } while (i != last_sge); - sg[first_sg].length = copy; - sg_set_page(&sg[first_sg], page, copy, 0); + sg_set_page(&msg->sg.data[first_sge], page, copy, 0); /* To repair sg ring we need to shift entries. If we only * had a single entry though we can just replace it and * be done. Otherwise walk the ring and shift the entries. */ - WARN_ON_ONCE(last_sg == first_sg); - shift = last_sg > first_sg ? - last_sg - first_sg - 1 : - MAX_SKB_FRAGS - first_sg + last_sg - 1; + WARN_ON_ONCE(last_sge == first_sge); + shift = last_sge > first_sge ? + last_sge - first_sge - 1 : + MAX_SKB_FRAGS - first_sge + last_sge - 1; if (!shift) goto out; - i = first_sg; - sk_msg_iter_var(i); + i = first_sge; + sk_msg_iter_var_next(i); do { - int move_from; + u32 move_from; - if (i + shift >= MAX_SKB_FRAGS) - move_from = i + shift - MAX_SKB_FRAGS; + if (i + shift >= MAX_MSG_FRAGS) + move_from = i + shift - MAX_MSG_FRAGS; else move_from = i + shift; - - if (move_from == msg->sg_end) + if (move_from == msg->sg.end) break; - sg[i] = sg[move_from]; - sg[move_from].length = 0; - sg[move_from].page_link = 0; - sg[move_from].offset = 0; - - sk_msg_iter_var(i); + msg->sg.data[i] = msg->sg.data[move_from]; + msg->sg.data[move_from].length = 0; + msg->sg.data[move_from].page_link = 0; + msg->sg.data[move_from].offset = 0; + sk_msg_iter_var_next(i); } while (1); - msg->sg_end -= shift; - if (msg->sg_end < 0) - msg->sg_end += MAX_SKB_FRAGS; + + msg->sg.end = msg->sg.end - shift > msg->sg.end ? + msg->sg.end - shift + MAX_MSG_FRAGS : + msg->sg.end - shift; out: - msg->data = sg_virt(&sg[first_sg]) + start - offset; + msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; msg->data_end = msg->data + bytes; - return 0; } @@ -5203,6 +5078,9 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } } +const struct bpf_func_proto bpf_sock_map_update_proto __weak; +const struct bpf_func_proto bpf_sock_hash_update_proto __weak; + static const struct bpf_func_proto * sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -5226,6 +5104,9 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } } +const struct bpf_func_proto bpf_msg_redirect_map_proto __weak; +const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak; + static const struct bpf_func_proto * sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -5247,6 +5128,9 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } } +const struct bpf_func_proto bpf_sk_redirect_map_proto __weak; +const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak; + static const struct bpf_func_proto * sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -7001,22 +6885,22 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, switch (si->off) { case offsetof(struct sk_msg_md, data): - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data), + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, data)); + offsetof(struct sk_msg, data)); break; case offsetof(struct sk_msg_md, data_end): - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end), + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, data_end)); + offsetof(struct sk_msg, data_end)); break; case offsetof(struct sk_msg_md, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct sk_msg_buff, sk), + struct sk_msg, sk), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, sk)); + offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_family)); break; @@ -7025,9 +6909,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct sk_msg_buff, sk), + struct sk_msg, sk), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, sk)); + offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_daddr)); break; @@ -7037,9 +6921,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct sk_msg_buff, sk), + struct sk_msg, sk), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, sk)); + offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_rcv_saddr)); @@ -7054,9 +6938,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, off = si->off; off -= offsetof(struct sk_msg_md, remote_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct sk_msg_buff, sk), + struct sk_msg, sk), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, sk)); + offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_daddr.s6_addr32[0]) + @@ -7075,9 +6959,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, off = si->off; off -= offsetof(struct sk_msg_md, local_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct sk_msg_buff, sk), + struct sk_msg, sk), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, sk)); + offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) + @@ -7091,9 +6975,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct sk_msg_buff, sk), + struct sk_msg, sk), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, sk)); + offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_dport)); #ifndef __BIG_ENDIAN_BITFIELD @@ -7105,9 +6989,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct sk_msg_buff, sk), + struct sk_msg, sk), si->dst_reg, si->src_reg, - offsetof(struct sk_msg_buff, sk)); + offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_num)); break; diff --git a/net/core/skmsg.c b/net/core/skmsg.c new file mode 100644 index 000000000000..ae2b281c9c57 --- /dev/null +++ b/net/core/skmsg.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ + +#include +#include +#include + +#include +#include + +static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) +{ + if (msg->sg.end > msg->sg.start && + elem_first_coalesce < msg->sg.end) + return true; + + if (msg->sg.end < msg->sg.start && + (elem_first_coalesce > msg->sg.start || + elem_first_coalesce < msg->sg.end)) + return true; + + return false; +} + +int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, + int elem_first_coalesce) +{ + struct page_frag *pfrag = sk_page_frag(sk); + int ret = 0; + + len -= msg->sg.size; + while (len > 0) { + struct scatterlist *sge; + u32 orig_offset; + int use, i; + + if (!sk_page_frag_refill(sk, pfrag)) + return -ENOMEM; + + orig_offset = pfrag->offset; + use = min_t(int, len, pfrag->size - orig_offset); + if (!sk_wmem_schedule(sk, use)) + return -ENOMEM; + + i = msg->sg.end; + sk_msg_iter_var_prev(i); + sge = &msg->sg.data[i]; + + if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) && + sg_page(sge) == pfrag->page && + sge->offset + sge->length == orig_offset) { + sge->length += use; + } else { + if (sk_msg_full(msg)) { + ret = -ENOSPC; + break; + } + + sge = &msg->sg.data[msg->sg.end]; + sg_unmark_end(sge); + sg_set_page(sge, pfrag->page, use, orig_offset); + get_page(pfrag->page); + sk_msg_iter_next(msg, end); + } + + sk_mem_charge(sk, use); + msg->sg.size += use; + pfrag->offset += use; + len -= use; + } + + return ret; +} +EXPORT_SYMBOL_GPL(sk_msg_alloc); + +void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes) +{ + int i = msg->sg.start; + + do { + struct scatterlist *sge = sk_msg_elem(msg, i); + + if (bytes < sge->length) { + sge->length -= bytes; + sge->offset += bytes; + sk_mem_uncharge(sk, bytes); + break; + } + + sk_mem_uncharge(sk, sge->length); + bytes -= sge->length; + sge->length = 0; + sge->offset = 0; + sk_msg_iter_var_next(i); + } while (bytes && i != msg->sg.end); + msg->sg.start = i; +} +EXPORT_SYMBOL_GPL(sk_msg_return_zero); + +void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes) +{ + int i = msg->sg.start; + + do { + struct scatterlist *sge = &msg->sg.data[i]; + int uncharge = (bytes < sge->length) ? bytes : sge->length; + + sk_mem_uncharge(sk, uncharge); + bytes -= uncharge; + sk_msg_iter_var_next(i); + } while (i != msg->sg.end); +} +EXPORT_SYMBOL_GPL(sk_msg_return); + +static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i, + bool charge) +{ + struct scatterlist *sge = sk_msg_elem(msg, i); + u32 len = sge->length; + + if (charge) + sk_mem_uncharge(sk, len); + if (!msg->skb) + put_page(sg_page(sge)); + memset(sge, 0, sizeof(*sge)); + return len; +} + +static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i, + bool charge) +{ + struct scatterlist *sge = sk_msg_elem(msg, i); + int freed = 0; + + while (msg->sg.size) { + msg->sg.size -= sge->length; + freed += sk_msg_free_elem(sk, msg, i, charge); + sk_msg_iter_var_next(i); + sk_msg_check_to_free(msg, i, msg->sg.size); + sge = sk_msg_elem(msg, i); + } + if (msg->skb) + consume_skb(msg->skb); + sk_msg_init(msg); + return freed; +} + +int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg) +{ + return __sk_msg_free(sk, msg, msg->sg.start, false); +} +EXPORT_SYMBOL_GPL(sk_msg_free_nocharge); + +int sk_msg_free(struct sock *sk, struct sk_msg *msg) +{ + return __sk_msg_free(sk, msg, msg->sg.start, true); +} +EXPORT_SYMBOL_GPL(sk_msg_free); + +static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, + u32 bytes, bool charge) +{ + struct scatterlist *sge; + u32 i = msg->sg.start; + + while (bytes) { + sge = sk_msg_elem(msg, i); + if (!sge->length) + break; + if (bytes < sge->length) { + if (charge) + sk_mem_uncharge(sk, bytes); + sge->length -= bytes; + sge->offset += bytes; + msg->sg.size -= bytes; + break; + } + + msg->sg.size -= sge->length; + bytes -= sge->length; + sk_msg_free_elem(sk, msg, i, charge); + sk_msg_iter_var_next(i); + sk_msg_check_to_free(msg, i, bytes); + } + msg->sg.start = i; +} + +void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes) +{ + __sk_msg_free_partial(sk, msg, bytes, true); +} +EXPORT_SYMBOL_GPL(sk_msg_free_partial); + +void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, + u32 bytes) +{ + __sk_msg_free_partial(sk, msg, bytes, false); +} + +void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len) +{ + int trim = msg->sg.size - len; + u32 i = msg->sg.end; + + if (trim <= 0) { + WARN_ON(trim < 0); + return; + } + + sk_msg_iter_var_prev(i); + msg->sg.size = len; + while (msg->sg.data[i].length && + trim >= msg->sg.data[i].length) { + trim -= msg->sg.data[i].length; + sk_msg_free_elem(sk, msg, i, true); + sk_msg_iter_var_prev(i); + if (!trim) + goto out; + } + + msg->sg.data[i].length -= trim; + sk_mem_uncharge(sk, trim); +out: + /* If we trim data before curr pointer update copybreak and current + * so that any future copy operations start at new copy location. + * However trimed data that has not yet been used in a copy op + * does not require an update. + */ + if (msg->sg.curr >= i) { + msg->sg.curr = i; + msg->sg.copybreak = msg->sg.data[i].length; + } + sk_msg_iter_var_next(i); + msg->sg.end = i; +} +EXPORT_SYMBOL_GPL(sk_msg_trim); + +int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, + struct sk_msg *msg, u32 bytes) +{ + int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg); + const int to_max_pages = MAX_MSG_FRAGS; + struct page *pages[MAX_MSG_FRAGS]; + ssize_t orig, copied, use, offset; + + orig = msg->sg.size; + while (bytes > 0) { + i = 0; + maxpages = to_max_pages - num_elems; + if (maxpages == 0) { + ret = -EFAULT; + goto out; + } + + copied = iov_iter_get_pages(from, pages, bytes, maxpages, + &offset); + if (copied <= 0) { + ret = -EFAULT; + goto out; + } + + iov_iter_advance(from, copied); + bytes -= copied; + msg->sg.size += copied; + + while (copied) { + use = min_t(int, copied, PAGE_SIZE - offset); + sg_set_page(&msg->sg.data[msg->sg.end], + pages[i], use, offset); + sg_unmark_end(&msg->sg.data[msg->sg.end]); + sk_mem_charge(sk, use); + + offset = 0; + copied -= use; + sk_msg_iter_next(msg, end); + num_elems++; + i++; + } + /* When zerocopy is mixed with sk_msg_*copy* operations we + * may have a copybreak set in this case clear and prefer + * zerocopy remainder when possible. + */ + msg->sg.copybreak = 0; + msg->sg.curr = msg->sg.end; + } +out: + /* Revert iov_iter updates, msg will need to use 'trim' later if it + * also needs to be cleared. + */ + if (ret) + iov_iter_revert(from, msg->sg.size - orig); + return ret; +} +EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter); + +int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, + struct sk_msg *msg, u32 bytes) +{ + int ret = -ENOSPC, i = msg->sg.curr; + struct scatterlist *sge; + u32 copy, buf_size; + void *to; + + do { + sge = sk_msg_elem(msg, i); + /* This is possible if a trim operation shrunk the buffer */ + if (msg->sg.copybreak >= sge->length) { + msg->sg.copybreak = 0; + sk_msg_iter_var_next(i); + if (i == msg->sg.end) + break; + sge = sk_msg_elem(msg, i); + } + + buf_size = sge->length - msg->sg.copybreak; + copy = (buf_size > bytes) ? bytes : buf_size; + to = sg_virt(sge) + msg->sg.copybreak; + msg->sg.copybreak += copy; + if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) + ret = copy_from_iter_nocache(to, copy, from); + else + ret = copy_from_iter(to, copy, from); + if (ret != copy) { + ret = -EFAULT; + goto out; + } + bytes -= copy; + if (!bytes) + break; + msg->sg.copybreak = 0; + sk_msg_iter_var_next(i); + } while (i != msg->sg.end); +out: + msg->sg.curr = i; + return ret; +} +EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); + +static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) +{ + struct sock *sk = psock->sk; + int copied = 0, num_sge; + struct sk_msg *msg; + + msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); + if (unlikely(!msg)) + return -EAGAIN; + if (!sk_rmem_schedule(sk, skb, skb->len)) { + kfree(msg); + return -EAGAIN; + } + + sk_msg_init(msg); + num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); + if (unlikely(num_sge < 0)) { + kfree(msg); + return num_sge; + } + + sk_mem_charge(sk, skb->len); + copied = skb->len; + msg->sg.start = 0; + msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge; + msg->skb = skb; + + sk_psock_queue_msg(psock, msg); + sk->sk_data_ready(sk); + return copied; +} + +static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, + u32 off, u32 len, bool ingress) +{ + if (ingress) + return sk_psock_skb_ingress(psock, skb); + else + return skb_send_sock_locked(psock->sk, skb, off, len); +} + +static void sk_psock_backlog(struct work_struct *work) +{ + struct sk_psock *psock = container_of(work, struct sk_psock, work); + struct sk_psock_work_state *state = &psock->work_state; + struct sk_buff *skb; + bool ingress; + u32 len, off; + int ret; + + /* Lock sock to avoid losing sk_socket during loop. */ + lock_sock(psock->sk); + if (state->skb) { + skb = state->skb; + len = state->len; + off = state->off; + state->skb = NULL; + goto start; + } + + while ((skb = skb_dequeue(&psock->ingress_skb))) { + len = skb->len; + off = 0; +start: + ingress = tcp_skb_bpf_ingress(skb); + do { + ret = -EIO; + if (likely(psock->sk->sk_socket)) + ret = sk_psock_handle_skb(psock, skb, off, + len, ingress); + if (ret <= 0) { + if (ret == -EAGAIN) { + state->skb = skb; + state->len = len; + state->off = off; + goto end; + } + /* Hard errors break pipe and stop xmit. */ + sk_psock_report_error(psock, ret ? -ret : EPIPE); + sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); + kfree_skb(skb); + goto end; + } + off += ret; + len -= ret; + } while (len); + + if (!ingress) + kfree_skb(skb); + } +end: + release_sock(psock->sk); +} + +struct sk_psock *sk_psock_init(struct sock *sk, int node) +{ + struct sk_psock *psock = kzalloc_node(sizeof(*psock), + GFP_ATOMIC | __GFP_NOWARN, + node); + if (!psock) + return NULL; + + psock->sk = sk; + psock->eval = __SK_NONE; + + INIT_LIST_HEAD(&psock->link); + spin_lock_init(&psock->link_lock); + + INIT_WORK(&psock->work, sk_psock_backlog); + INIT_LIST_HEAD(&psock->ingress_msg); + skb_queue_head_init(&psock->ingress_skb); + + sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); + refcount_set(&psock->refcnt, 1); + + rcu_assign_sk_user_data(sk, psock); + sock_hold(sk); + + return psock; +} +EXPORT_SYMBOL_GPL(sk_psock_init); + +struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock) +{ + struct sk_psock_link *link; + + spin_lock_bh(&psock->link_lock); + link = list_first_entry_or_null(&psock->link, struct sk_psock_link, + list); + if (link) + list_del(&link->list); + spin_unlock_bh(&psock->link_lock); + return link; +} + +void __sk_psock_purge_ingress_msg(struct sk_psock *psock) +{ + struct sk_msg *msg, *tmp; + + list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { + list_del(&msg->list); + sk_msg_free(psock->sk, msg); + kfree(msg); + } +} + +static void sk_psock_zap_ingress(struct sk_psock *psock) +{ + __skb_queue_purge(&psock->ingress_skb); + __sk_psock_purge_ingress_msg(psock); +} + +static void sk_psock_link_destroy(struct sk_psock *psock) +{ + struct sk_psock_link *link, *tmp; + + list_for_each_entry_safe(link, tmp, &psock->link, list) { + list_del(&link->list); + sk_psock_free_link(link); + } +} + +static void sk_psock_destroy_deferred(struct work_struct *gc) +{ + struct sk_psock *psock = container_of(gc, struct sk_psock, gc); + + /* No sk_callback_lock since already detached. */ + if (psock->parser.enabled) + strp_done(&psock->parser.strp); + + cancel_work_sync(&psock->work); + + psock_progs_drop(&psock->progs); + + sk_psock_link_destroy(psock); + sk_psock_cork_free(psock); + sk_psock_zap_ingress(psock); + + if (psock->sk_redir) + sock_put(psock->sk_redir); + sock_put(psock->sk); + kfree(psock); +} + +void sk_psock_destroy(struct rcu_head *rcu) +{ + struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu); + + INIT_WORK(&psock->gc, sk_psock_destroy_deferred); + schedule_work(&psock->gc); +} +EXPORT_SYMBOL_GPL(sk_psock_destroy); + +void sk_psock_drop(struct sock *sk, struct sk_psock *psock) +{ + rcu_assign_sk_user_data(sk, NULL); + sk_psock_cork_free(psock); + sk_psock_restore_proto(sk, psock); + + write_lock_bh(&sk->sk_callback_lock); + if (psock->progs.skb_parser) + sk_psock_stop_strp(sk, psock); + write_unlock_bh(&sk->sk_callback_lock); + sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); + + call_rcu_sched(&psock->rcu, sk_psock_destroy); +} +EXPORT_SYMBOL_GPL(sk_psock_drop); + +static int sk_psock_map_verd(int verdict, bool redir) +{ + switch (verdict) { + case SK_PASS: + return redir ? __SK_REDIRECT : __SK_PASS; + case SK_DROP: + default: + break; + } + + return __SK_DROP; +} + +int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, + struct sk_msg *msg) +{ + struct bpf_prog *prog; + int ret; + + preempt_disable(); + rcu_read_lock(); + prog = READ_ONCE(psock->progs.msg_parser); + if (unlikely(!prog)) { + ret = __SK_PASS; + goto out; + } + + sk_msg_compute_data_pointers(msg); + msg->sk = sk; + ret = BPF_PROG_RUN(prog, msg); + ret = sk_psock_map_verd(ret, msg->sk_redir); + psock->apply_bytes = msg->apply_bytes; + if (ret == __SK_REDIRECT) { + if (psock->sk_redir) + sock_put(psock->sk_redir); + psock->sk_redir = msg->sk_redir; + if (!psock->sk_redir) { + ret = __SK_DROP; + goto out; + } + sock_hold(psock->sk_redir); + } +out: + rcu_read_unlock(); + preempt_enable(); + return ret; +} +EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); + +static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog, + struct sk_buff *skb) +{ + int ret; + + skb->sk = psock->sk; + bpf_compute_data_end_sk_skb(skb); + preempt_disable(); + ret = BPF_PROG_RUN(prog, skb); + preempt_enable(); + /* strparser clones the skb before handing it to a upper layer, + * meaning skb_orphan has been called. We NULL sk on the way out + * to ensure we don't trigger a BUG_ON() in skb/sk operations + * later and because we are not charging the memory of this skb + * to any socket yet. + */ + skb->sk = NULL; + return ret; +} + +static struct sk_psock *sk_psock_from_strp(struct strparser *strp) +{ + struct sk_psock_parser *parser; + + parser = container_of(strp, struct sk_psock_parser, strp); + return container_of(parser, struct sk_psock, parser); +} + +static void sk_psock_verdict_apply(struct sk_psock *psock, + struct sk_buff *skb, int verdict) +{ + struct sk_psock *psock_other; + struct sock *sk_other; + bool ingress; + + switch (verdict) { + case __SK_REDIRECT: + sk_other = tcp_skb_bpf_redirect_fetch(skb); + if (unlikely(!sk_other)) + goto out_free; + psock_other = sk_psock(sk_other); + if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || + !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) + goto out_free; + ingress = tcp_skb_bpf_ingress(skb); + if ((!ingress && sock_writeable(sk_other)) || + (ingress && + atomic_read(&sk_other->sk_rmem_alloc) <= + sk_other->sk_rcvbuf)) { + if (!ingress) + skb_set_owner_w(skb, sk_other); + skb_queue_tail(&psock_other->ingress_skb, skb); + schedule_work(&psock_other->work); + break; + } + /* fall-through */ + case __SK_DROP: + /* fall-through */ + default: +out_free: + kfree_skb(skb); + } +} + +static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) +{ + struct sk_psock *psock = sk_psock_from_strp(strp); + struct bpf_prog *prog; + int ret = __SK_DROP; + + rcu_read_lock(); + prog = READ_ONCE(psock->progs.skb_verdict); + if (likely(prog)) { + skb_orphan(skb); + tcp_skb_bpf_redirect_clear(skb); + ret = sk_psock_bpf_run(psock, prog, skb); + ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); + } + rcu_read_unlock(); + sk_psock_verdict_apply(psock, skb, ret); +} + +static int sk_psock_strp_read_done(struct strparser *strp, int err) +{ + return err; +} + +static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb) +{ + struct sk_psock *psock = sk_psock_from_strp(strp); + struct bpf_prog *prog; + int ret = skb->len; + + rcu_read_lock(); + prog = READ_ONCE(psock->progs.skb_parser); + if (likely(prog)) + ret = sk_psock_bpf_run(psock, prog, skb); + rcu_read_unlock(); + return ret; +} + +/* Called with socket lock held. */ +static void sk_psock_data_ready(struct sock *sk) +{ + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + if (likely(psock)) { + write_lock_bh(&sk->sk_callback_lock); + strp_data_ready(&psock->parser.strp); + write_unlock_bh(&sk->sk_callback_lock); + } + rcu_read_unlock(); +} + +static void sk_psock_write_space(struct sock *sk) +{ + struct sk_psock *psock; + void (*write_space)(struct sock *sk); + + rcu_read_lock(); + psock = sk_psock(sk); + if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))) + schedule_work(&psock->work); + write_space = psock->saved_write_space; + rcu_read_unlock(); + write_space(sk); +} + +int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) +{ + static const struct strp_callbacks cb = { + .rcv_msg = sk_psock_strp_read, + .read_sock_done = sk_psock_strp_read_done, + .parse_msg = sk_psock_strp_parse, + }; + + psock->parser.enabled = false; + return strp_init(&psock->parser.strp, sk, &cb); +} + +void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) +{ + struct sk_psock_parser *parser = &psock->parser; + + if (parser->enabled) + return; + + parser->saved_data_ready = sk->sk_data_ready; + sk->sk_data_ready = sk_psock_data_ready; + sk->sk_write_space = sk_psock_write_space; + parser->enabled = true; +} + +void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) +{ + struct sk_psock_parser *parser = &psock->parser; + + if (!parser->enabled) + return; + + sk->sk_data_ready = parser->saved_data_ready; + parser->saved_data_ready = NULL; + strp_stop(&parser->strp); + parser->enabled = false; +} diff --git a/net/core/sock_map.c b/net/core/sock_map.c new file mode 100644 index 000000000000..3c0e44cb811a --- /dev/null +++ b/net/core/sock_map.c @@ -0,0 +1,1002 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct bpf_stab { + struct bpf_map map; + struct sock **sks; + struct sk_psock_progs progs; + raw_spinlock_t lock; +}; + +#define SOCK_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + +static struct bpf_map *sock_map_alloc(union bpf_attr *attr) +{ + struct bpf_stab *stab; + u64 cost; + int err; + + if (!capable(CAP_NET_ADMIN)) + return ERR_PTR(-EPERM); + if (attr->max_entries == 0 || + attr->key_size != 4 || + attr->value_size != 4 || + attr->map_flags & ~SOCK_CREATE_FLAG_MASK) + return ERR_PTR(-EINVAL); + + stab = kzalloc(sizeof(*stab), GFP_USER); + if (!stab) + return ERR_PTR(-ENOMEM); + + bpf_map_init_from_attr(&stab->map, attr); + raw_spin_lock_init(&stab->lock); + + /* Make sure page count doesn't overflow. */ + cost = (u64) stab->map.max_entries * sizeof(struct sock *); + if (cost >= U32_MAX - PAGE_SIZE) { + err = -EINVAL; + goto free_stab; + } + + stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + err = bpf_map_precharge_memlock(stab->map.pages); + if (err) + goto free_stab; + + stab->sks = bpf_map_area_alloc(stab->map.max_entries * + sizeof(struct sock *), + stab->map.numa_node); + if (stab->sks) + return &stab->map; + err = -ENOMEM; +free_stab: + kfree(stab); + return ERR_PTR(err); +} + +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) +{ + u32 ufd = attr->target_fd; + struct bpf_map *map; + struct fd f; + int ret; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + ret = sock_map_prog_update(map, prog, attr->attach_type); + fdput(f); + return ret; +} + +static void sock_map_sk_acquire(struct sock *sk) + __acquires(&sk->sk_lock.slock) +{ + lock_sock(sk); + preempt_disable(); + rcu_read_lock(); +} + +static void sock_map_sk_release(struct sock *sk) + __releases(&sk->sk_lock.slock) +{ + rcu_read_unlock(); + preempt_enable(); + release_sock(sk); +} + +static void sock_map_add_link(struct sk_psock *psock, + struct sk_psock_link *link, + struct bpf_map *map, void *link_raw) +{ + link->link_raw = link_raw; + link->map = map; + spin_lock_bh(&psock->link_lock); + list_add_tail(&link->list, &psock->link); + spin_unlock_bh(&psock->link_lock); +} + +static void sock_map_del_link(struct sock *sk, + struct sk_psock *psock, void *link_raw) +{ + struct sk_psock_link *link, *tmp; + bool strp_stop = false; + + spin_lock_bh(&psock->link_lock); + list_for_each_entry_safe(link, tmp, &psock->link, list) { + if (link->link_raw == link_raw) { + struct bpf_map *map = link->map; + struct bpf_stab *stab = container_of(map, struct bpf_stab, + map); + if (psock->parser.enabled && stab->progs.skb_parser) + strp_stop = true; + list_del(&link->list); + sk_psock_free_link(link); + } + } + spin_unlock_bh(&psock->link_lock); + if (strp_stop) { + write_lock_bh(&sk->sk_callback_lock); + sk_psock_stop_strp(sk, psock); + write_unlock_bh(&sk->sk_callback_lock); + } +} + +static void sock_map_unref(struct sock *sk, void *link_raw) +{ + struct sk_psock *psock = sk_psock(sk); + + if (likely(psock)) { + sock_map_del_link(sk, psock, link_raw); + sk_psock_put(sk, psock); + } +} + +static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs, + struct sock *sk) +{ + struct bpf_prog *msg_parser, *skb_parser, *skb_verdict; + bool skb_progs, sk_psock_is_new = false; + struct sk_psock *psock; + int ret; + + skb_verdict = READ_ONCE(progs->skb_verdict); + skb_parser = READ_ONCE(progs->skb_parser); + skb_progs = skb_parser && skb_verdict; + if (skb_progs) { + skb_verdict = bpf_prog_inc_not_zero(skb_verdict); + if (IS_ERR(skb_verdict)) + return PTR_ERR(skb_verdict); + skb_parser = bpf_prog_inc_not_zero(skb_parser); + if (IS_ERR(skb_parser)) { + bpf_prog_put(skb_verdict); + return PTR_ERR(skb_parser); + } + } + + msg_parser = READ_ONCE(progs->msg_parser); + if (msg_parser) { + msg_parser = bpf_prog_inc_not_zero(msg_parser); + if (IS_ERR(msg_parser)) { + ret = PTR_ERR(msg_parser); + goto out; + } + } + + psock = sk_psock_get(sk); + if (psock) { + if (!sk_has_psock(sk)) { + ret = -EBUSY; + goto out_progs; + } + if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || + (skb_progs && READ_ONCE(psock->progs.skb_parser))) { + sk_psock_put(sk, psock); + ret = -EBUSY; + goto out_progs; + } + } else { + psock = sk_psock_init(sk, map->numa_node); + if (!psock) { + ret = -ENOMEM; + goto out_progs; + } + sk_psock_is_new = true; + } + + if (msg_parser) + psock_set_prog(&psock->progs.msg_parser, msg_parser); + if (sk_psock_is_new) { + ret = tcp_bpf_init(sk); + if (ret < 0) + goto out_drop; + } else { + tcp_bpf_reinit(sk); + } + + write_lock_bh(&sk->sk_callback_lock); + if (skb_progs && !psock->parser.enabled) { + ret = sk_psock_init_strp(sk, psock); + if (ret) { + write_unlock_bh(&sk->sk_callback_lock); + goto out_drop; + } + psock_set_prog(&psock->progs.skb_verdict, skb_verdict); + psock_set_prog(&psock->progs.skb_parser, skb_parser); + sk_psock_start_strp(sk, psock); + } + write_unlock_bh(&sk->sk_callback_lock); + return 0; +out_drop: + sk_psock_put(sk, psock); +out_progs: + if (msg_parser) + bpf_prog_put(msg_parser); +out: + if (skb_progs) { + bpf_prog_put(skb_verdict); + bpf_prog_put(skb_parser); + } + return ret; +} + +static void sock_map_free(struct bpf_map *map) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + int i; + + synchronize_rcu(); + rcu_read_lock(); + raw_spin_lock_bh(&stab->lock); + for (i = 0; i < stab->map.max_entries; i++) { + struct sock **psk = &stab->sks[i]; + struct sock *sk; + + sk = xchg(psk, NULL); + if (sk) + sock_map_unref(sk, psk); + } + raw_spin_unlock_bh(&stab->lock); + rcu_read_unlock(); + + bpf_map_area_free(stab->sks); + kfree(stab); +} + +static void sock_map_release_progs(struct bpf_map *map) +{ + psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); +} + +static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (unlikely(key >= map->max_entries)) + return NULL; + return READ_ONCE(stab->sks[key]); +} + +static void *sock_map_lookup(struct bpf_map *map, void *key) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, + struct sock **psk) +{ + struct sock *sk; + + raw_spin_lock_bh(&stab->lock); + sk = *psk; + if (!sk_test || sk_test == sk) + *psk = NULL; + raw_spin_unlock_bh(&stab->lock); + if (unlikely(!sk)) + return -EINVAL; + sock_map_unref(sk, psk); + return 0; +} + +static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, + void *link_raw) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + + __sock_map_delete(stab, sk, link_raw); +} + +static int sock_map_delete_elem(struct bpf_map *map, void *key) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + u32 i = *(u32 *)key; + struct sock **psk; + + if (unlikely(i >= map->max_entries)) + return -EINVAL; + + psk = &stab->sks[i]; + return __sock_map_delete(stab, NULL, psk); +} + +static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + u32 i = key ? *(u32 *)key : U32_MAX; + u32 *key_next = next; + + if (i == stab->map.max_entries - 1) + return -ENOENT; + if (i >= stab->map.max_entries) + *key_next = 0; + else + *key_next = i + 1; + return 0; +} + +static int sock_map_update_common(struct bpf_map *map, u32 idx, + struct sock *sk, u64 flags) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + struct sk_psock_link *link; + struct sk_psock *psock; + struct sock *osk; + int ret; + + WARN_ON_ONCE(!rcu_read_lock_held()); + if (unlikely(flags > BPF_EXIST)) + return -EINVAL; + if (unlikely(idx >= map->max_entries)) + return -E2BIG; + + link = sk_psock_init_link(); + if (!link) + return -ENOMEM; + + ret = sock_map_link(map, &stab->progs, sk); + if (ret < 0) + goto out_free; + + psock = sk_psock(sk); + WARN_ON_ONCE(!psock); + + raw_spin_lock_bh(&stab->lock); + osk = stab->sks[idx]; + if (osk && flags == BPF_NOEXIST) { + ret = -EEXIST; + goto out_unlock; + } else if (!osk && flags == BPF_EXIST) { + ret = -ENOENT; + goto out_unlock; + } + + sock_map_add_link(psock, link, map, &stab->sks[idx]); + stab->sks[idx] = sk; + if (osk) + sock_map_unref(osk, &stab->sks[idx]); + raw_spin_unlock_bh(&stab->lock); + return 0; +out_unlock: + raw_spin_unlock_bh(&stab->lock); + if (psock) + sk_psock_put(sk, psock); +out_free: + sk_psock_free_link(link); + return ret; +} + +static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) +{ + return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || + ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; +} + +static bool sock_map_sk_is_suitable(const struct sock *sk) +{ + return sk->sk_type == SOCK_STREAM && + sk->sk_protocol == IPPROTO_TCP; +} + +static int sock_map_update_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + u32 ufd = *(u32 *)value; + u32 idx = *(u32 *)key; + struct socket *sock; + struct sock *sk; + int ret; + + sock = sockfd_lookup(ufd, &ret); + if (!sock) + return ret; + sk = sock->sk; + if (!sk) { + ret = -EINVAL; + goto out; + } + if (!sock_map_sk_is_suitable(sk) || + sk->sk_state != TCP_ESTABLISHED) { + ret = -EOPNOTSUPP; + goto out; + } + + sock_map_sk_acquire(sk); + ret = sock_map_update_common(map, idx, sk, flags); + sock_map_sk_release(sk); +out: + fput(sock->file); + return ret; +} + +BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, + struct bpf_map *, map, void *, key, u64, flags) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (likely(sock_map_sk_is_suitable(sops->sk) && + sock_map_op_okay(sops))) + return sock_map_update_common(map, *(u32 *)key, sops->sk, + flags); + return -EOPNOTSUPP; +} + +const struct bpf_func_proto bpf_sock_map_update_proto = { + .func = bpf_sock_map_update, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_PTR_TO_MAP_KEY, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, + struct bpf_map *, map, u32, key, u64, flags) +{ + struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); + + if (unlikely(flags & ~(BPF_F_INGRESS))) + return SK_DROP; + tcb->bpf.flags = flags; + tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key); + if (!tcb->bpf.sk_redir) + return SK_DROP; + return SK_PASS; +} + +const struct bpf_func_proto bpf_sk_redirect_map_proto = { + .func = bpf_sk_redirect_map, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, + struct bpf_map *, map, u32, key, u64, flags) +{ + if (unlikely(flags & ~(BPF_F_INGRESS))) + return SK_DROP; + msg->flags = flags; + msg->sk_redir = __sock_map_lookup_elem(map, key); + if (!msg->sk_redir) + return SK_DROP; + return SK_PASS; +} + +const struct bpf_func_proto bpf_msg_redirect_map_proto = { + .func = bpf_msg_redirect_map, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_ANYTHING, +}; + +const struct bpf_map_ops sock_map_ops = { + .map_alloc = sock_map_alloc, + .map_free = sock_map_free, + .map_get_next_key = sock_map_get_next_key, + .map_update_elem = sock_map_update_elem, + .map_delete_elem = sock_map_delete_elem, + .map_lookup_elem = sock_map_lookup, + .map_release_uref = sock_map_release_progs, + .map_check_btf = map_check_no_btf, +}; + +struct bpf_htab_elem { + struct rcu_head rcu; + u32 hash; + struct sock *sk; + struct hlist_node node; + u8 key[0]; +}; + +struct bpf_htab_bucket { + struct hlist_head head; + raw_spinlock_t lock; +}; + +struct bpf_htab { + struct bpf_map map; + struct bpf_htab_bucket *buckets; + u32 buckets_num; + u32 elem_size; + struct sk_psock_progs progs; + atomic_t count; +}; + +static inline u32 sock_hash_bucket_hash(const void *key, u32 len) +{ + return jhash(key, len, 0); +} + +static struct bpf_htab_bucket *sock_hash_select_bucket(struct bpf_htab *htab, + u32 hash) +{ + return &htab->buckets[hash & (htab->buckets_num - 1)]; +} + +static struct bpf_htab_elem * +sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, + u32 key_size) +{ + struct bpf_htab_elem *elem; + + hlist_for_each_entry_rcu(elem, head, node) { + if (elem->hash == hash && + !memcmp(&elem->key, key, key_size)) + return elem; + } + + return NULL; +} + +static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + u32 key_size = map->key_size, hash; + struct bpf_htab_bucket *bucket; + struct bpf_htab_elem *elem; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + hash = sock_hash_bucket_hash(key, key_size); + bucket = sock_hash_select_bucket(htab, hash); + elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); + + return elem ? elem->sk : NULL; +} + +static void sock_hash_free_elem(struct bpf_htab *htab, + struct bpf_htab_elem *elem) +{ + atomic_dec(&htab->count); + kfree_rcu(elem, rcu); +} + +static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, + void *link_raw) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_htab_elem *elem_probe, *elem = link_raw; + struct bpf_htab_bucket *bucket; + + WARN_ON_ONCE(!rcu_read_lock_held()); + bucket = sock_hash_select_bucket(htab, elem->hash); + + /* elem may be deleted in parallel from the map, but access here + * is okay since it's going away only after RCU grace period. + * However, we need to check whether it's still present. + */ + raw_spin_lock_bh(&bucket->lock); + elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, + elem->key, map->key_size); + if (elem_probe && elem_probe == elem) { + hlist_del_rcu(&elem->node); + sock_map_unref(elem->sk, elem); + sock_hash_free_elem(htab, elem); + } + raw_spin_unlock_bh(&bucket->lock); +} + +static int sock_hash_delete_elem(struct bpf_map *map, void *key) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + u32 hash, key_size = map->key_size; + struct bpf_htab_bucket *bucket; + struct bpf_htab_elem *elem; + int ret = -ENOENT; + + hash = sock_hash_bucket_hash(key, key_size); + bucket = sock_hash_select_bucket(htab, hash); + + raw_spin_lock_bh(&bucket->lock); + elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); + if (elem) { + hlist_del_rcu(&elem->node); + sock_map_unref(elem->sk, elem); + sock_hash_free_elem(htab, elem); + ret = 0; + } + raw_spin_unlock_bh(&bucket->lock); + return ret; +} + +static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab, + void *key, u32 key_size, + u32 hash, struct sock *sk, + struct bpf_htab_elem *old) +{ + struct bpf_htab_elem *new; + + if (atomic_inc_return(&htab->count) > htab->map.max_entries) { + if (!old) { + atomic_dec(&htab->count); + return ERR_PTR(-E2BIG); + } + } + + new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, + htab->map.numa_node); + if (!new) { + atomic_dec(&htab->count); + return ERR_PTR(-ENOMEM); + } + memcpy(new->key, key, key_size); + new->sk = sk; + new->hash = hash; + return new; +} + +static int sock_hash_update_common(struct bpf_map *map, void *key, + struct sock *sk, u64 flags) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + u32 key_size = map->key_size, hash; + struct bpf_htab_elem *elem, *elem_new; + struct bpf_htab_bucket *bucket; + struct sk_psock_link *link; + struct sk_psock *psock; + int ret; + + WARN_ON_ONCE(!rcu_read_lock_held()); + if (unlikely(flags > BPF_EXIST)) + return -EINVAL; + + link = sk_psock_init_link(); + if (!link) + return -ENOMEM; + + ret = sock_map_link(map, &htab->progs, sk); + if (ret < 0) + goto out_free; + + psock = sk_psock(sk); + WARN_ON_ONCE(!psock); + + hash = sock_hash_bucket_hash(key, key_size); + bucket = sock_hash_select_bucket(htab, hash); + + raw_spin_lock_bh(&bucket->lock); + elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); + if (elem && flags == BPF_NOEXIST) { + ret = -EEXIST; + goto out_unlock; + } else if (!elem && flags == BPF_EXIST) { + ret = -ENOENT; + goto out_unlock; + } + + elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); + if (IS_ERR(elem_new)) { + ret = PTR_ERR(elem_new); + goto out_unlock; + } + + sock_map_add_link(psock, link, map, elem_new); + /* Add new element to the head of the list, so that + * concurrent search will find it before old elem. + */ + hlist_add_head_rcu(&elem_new->node, &bucket->head); + if (elem) { + hlist_del_rcu(&elem->node); + sock_map_unref(elem->sk, elem); + sock_hash_free_elem(htab, elem); + } + raw_spin_unlock_bh(&bucket->lock); + return 0; +out_unlock: + raw_spin_unlock_bh(&bucket->lock); + sk_psock_put(sk, psock); +out_free: + sk_psock_free_link(link); + return ret; +} + +static int sock_hash_update_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + u32 ufd = *(u32 *)value; + struct socket *sock; + struct sock *sk; + int ret; + + sock = sockfd_lookup(ufd, &ret); + if (!sock) + return ret; + sk = sock->sk; + if (!sk) { + ret = -EINVAL; + goto out; + } + if (!sock_map_sk_is_suitable(sk) || + sk->sk_state != TCP_ESTABLISHED) { + ret = -EOPNOTSUPP; + goto out; + } + + sock_map_sk_acquire(sk); + ret = sock_hash_update_common(map, key, sk, flags); + sock_map_sk_release(sk); +out: + fput(sock->file); + return ret; +} + +static int sock_hash_get_next_key(struct bpf_map *map, void *key, + void *key_next) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_htab_elem *elem, *elem_next; + u32 hash, key_size = map->key_size; + struct hlist_head *head; + int i = 0; + + if (!key) + goto find_first_elem; + hash = sock_hash_bucket_hash(key, key_size); + head = &sock_hash_select_bucket(htab, hash)->head; + elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); + if (!elem) + goto find_first_elem; + + elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)), + struct bpf_htab_elem, node); + if (elem_next) { + memcpy(key_next, elem_next->key, key_size); + return 0; + } + + i = hash & (htab->buckets_num - 1); + i++; +find_first_elem: + for (; i < htab->buckets_num; i++) { + head = &sock_hash_select_bucket(htab, i)->head; + elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), + struct bpf_htab_elem, node); + if (elem_next) { + memcpy(key_next, elem_next->key, key_size); + return 0; + } + } + + return -ENOENT; +} + +static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) +{ + struct bpf_htab *htab; + int i, err; + u64 cost; + + if (!capable(CAP_NET_ADMIN)) + return ERR_PTR(-EPERM); + if (attr->max_entries == 0 || + attr->key_size == 0 || + attr->value_size != 4 || + attr->map_flags & ~SOCK_CREATE_FLAG_MASK) + return ERR_PTR(-EINVAL); + if (attr->key_size > MAX_BPF_STACK) + return ERR_PTR(-E2BIG); + + htab = kzalloc(sizeof(*htab), GFP_USER); + if (!htab) + return ERR_PTR(-ENOMEM); + + bpf_map_init_from_attr(&htab->map, attr); + + htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); + htab->elem_size = sizeof(struct bpf_htab_elem) + + round_up(htab->map.key_size, 8); + if (htab->buckets_num == 0 || + htab->buckets_num > U32_MAX / sizeof(struct bpf_htab_bucket)) { + err = -EINVAL; + goto free_htab; + } + + cost = (u64) htab->buckets_num * sizeof(struct bpf_htab_bucket) + + (u64) htab->elem_size * htab->map.max_entries; + if (cost >= U32_MAX - PAGE_SIZE) { + err = -EINVAL; + goto free_htab; + } + + htab->buckets = bpf_map_area_alloc(htab->buckets_num * + sizeof(struct bpf_htab_bucket), + htab->map.numa_node); + if (!htab->buckets) { + err = -ENOMEM; + goto free_htab; + } + + for (i = 0; i < htab->buckets_num; i++) { + INIT_HLIST_HEAD(&htab->buckets[i].head); + raw_spin_lock_init(&htab->buckets[i].lock); + } + + return &htab->map; +free_htab: + kfree(htab); + return ERR_PTR(err); +} + +static void sock_hash_free(struct bpf_map *map) +{ + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_htab_bucket *bucket; + struct bpf_htab_elem *elem; + struct hlist_node *node; + int i; + + synchronize_rcu(); + rcu_read_lock(); + for (i = 0; i < htab->buckets_num; i++) { + bucket = sock_hash_select_bucket(htab, i); + raw_spin_lock_bh(&bucket->lock); + hlist_for_each_entry_safe(elem, node, &bucket->head, node) { + hlist_del_rcu(&elem->node); + sock_map_unref(elem->sk, elem); + } + raw_spin_unlock_bh(&bucket->lock); + } + rcu_read_unlock(); + + bpf_map_area_free(htab->buckets); + kfree(htab); +} + +static void sock_hash_release_progs(struct bpf_map *map) +{ + psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs); +} + +BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, + struct bpf_map *, map, void *, key, u64, flags) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (likely(sock_map_sk_is_suitable(sops->sk) && + sock_map_op_okay(sops))) + return sock_hash_update_common(map, key, sops->sk, flags); + return -EOPNOTSUPP; +} + +const struct bpf_func_proto bpf_sock_hash_update_proto = { + .func = bpf_sock_hash_update, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_PTR_TO_MAP_KEY, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, + struct bpf_map *, map, void *, key, u64, flags) +{ + struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); + + if (unlikely(flags & ~(BPF_F_INGRESS))) + return SK_DROP; + tcb->bpf.flags = flags; + tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key); + if (!tcb->bpf.sk_redir) + return SK_DROP; + return SK_PASS; +} + +const struct bpf_func_proto bpf_sk_redirect_hash_proto = { + .func = bpf_sk_redirect_hash, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_PTR_TO_MAP_KEY, + .arg4_type = ARG_ANYTHING, +}; + +BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, + struct bpf_map *, map, void *, key, u64, flags) +{ + if (unlikely(flags & ~(BPF_F_INGRESS))) + return SK_DROP; + msg->flags = flags; + msg->sk_redir = __sock_hash_lookup_elem(map, key); + if (!msg->sk_redir) + return SK_DROP; + return SK_PASS; +} + +const struct bpf_func_proto bpf_msg_redirect_hash_proto = { + .func = bpf_msg_redirect_hash, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_PTR_TO_MAP_KEY, + .arg4_type = ARG_ANYTHING, +}; + +const struct bpf_map_ops sock_hash_ops = { + .map_alloc = sock_hash_alloc, + .map_free = sock_hash_free, + .map_get_next_key = sock_hash_get_next_key, + .map_update_elem = sock_hash_update_elem, + .map_delete_elem = sock_hash_delete_elem, + .map_lookup_elem = sock_map_lookup, + .map_release_uref = sock_hash_release_progs, + .map_check_btf = map_check_no_btf, +}; + +static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) +{ + switch (map->map_type) { + case BPF_MAP_TYPE_SOCKMAP: + return &container_of(map, struct bpf_stab, map)->progs; + case BPF_MAP_TYPE_SOCKHASH: + return &container_of(map, struct bpf_htab, map)->progs; + default: + break; + } + + return NULL; +} + +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + u32 which) +{ + struct sk_psock_progs *progs = sock_map_progs(map); + + if (!progs) + return -EOPNOTSUPP; + + switch (which) { + case BPF_SK_MSG_VERDICT: + psock_set_prog(&progs->msg_parser, prog); + break; + case BPF_SK_SKB_STREAM_PARSER: + psock_set_prog(&progs->skb_parser, prog); + break; + case BPF_SK_SKB_STREAM_VERDICT: + psock_set_prog(&progs->skb_verdict, prog); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link) +{ + switch (link->map->map_type) { + case BPF_MAP_TYPE_SOCKMAP: + return sock_map_delete_from_link(link->map, sk, + link->link_raw); + case BPF_MAP_TYPE_SOCKHASH: + return sock_hash_delete_from_link(link->map, sk, + link->link_raw); + default: + break; + } +} diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 7446b98661d8..58629314eae9 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o +obj-$(CONFIG_NET_SOCK_MSG) += tcp_bpf.o obj-$(CONFIG_NETLABEL) += cipso_ipv4.o obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c new file mode 100644 index 000000000000..80debb0daf37 --- /dev/null +++ b/net/ipv4/tcp_bpf.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ + +#include +#include +#include +#include +#include + +#include + +static bool tcp_bpf_stream_read(const struct sock *sk) +{ + struct sk_psock *psock; + bool empty = true; + + rcu_read_lock(); + psock = sk_psock(sk); + if (likely(psock)) + empty = list_empty(&psock->ingress_msg); + rcu_read_unlock(); + return !empty; +} + +static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock, + int flags, long timeo, int *err) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int ret; + + add_wait_queue(sk_sleep(sk), &wait); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + ret = sk_wait_event(sk, &timeo, + !list_empty(&psock->ingress_msg) || + !skb_queue_empty(&sk->sk_receive_queue), &wait); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + remove_wait_queue(sk_sleep(sk), &wait); + return ret; +} + +int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, + struct msghdr *msg, int len) +{ + struct iov_iter *iter = &msg->msg_iter; + int i, ret, copied = 0; + + while (copied != len) { + struct scatterlist *sge; + struct sk_msg *msg_rx; + + msg_rx = list_first_entry_or_null(&psock->ingress_msg, + struct sk_msg, list); + if (unlikely(!msg_rx)) + break; + + i = msg_rx->sg.start; + do { + struct page *page; + int copy; + + sge = sk_msg_elem(msg_rx, i); + copy = sge->length; + page = sg_page(sge); + if (copied + copy > len) + copy = len - copied; + ret = copy_page_to_iter(page, sge->offset, copy, iter); + if (ret != copy) { + msg_rx->sg.start = i; + return -EFAULT; + } + + copied += copy; + sge->offset += copy; + sge->length -= copy; + sk_mem_uncharge(sk, copy); + if (!sge->length) { + i++; + if (i == MAX_SKB_FRAGS) + i = 0; + if (!msg_rx->skb) + put_page(page); + } + + if (copied == len) + break; + } while (i != msg_rx->sg.end); + + msg_rx->sg.start = i; + if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) { + list_del(&msg_rx->list); + if (msg_rx->skb) + consume_skb(msg_rx->skb); + kfree(msg_rx); + } + } + + return copied; +} +EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg); + +int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int nonblock, int flags, int *addr_len) +{ + struct sk_psock *psock; + int copied, ret; + + if (unlikely(flags & MSG_ERRQUEUE)) + return inet_recv_error(sk, msg, len, addr_len); + if (!skb_queue_empty(&sk->sk_receive_queue)) + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + + psock = sk_psock_get(sk); + if (unlikely(!psock)) + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + lock_sock(sk); +msg_bytes_ready: + copied = __tcp_bpf_recvmsg(sk, psock, msg, len); + if (!copied) { + int data, err = 0; + long timeo; + + timeo = sock_rcvtimeo(sk, nonblock); + data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err); + if (data) { + if (skb_queue_empty(&sk->sk_receive_queue)) + goto msg_bytes_ready; + release_sock(sk); + sk_psock_put(sk, psock); + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + } + if (err) { + ret = err; + goto out; + } + } + ret = copied; +out: + release_sock(sk); + sk_psock_put(sk, psock); + return ret; +} + +static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, + struct sk_msg *msg, u32 apply_bytes, int flags) +{ + bool apply = apply_bytes; + struct scatterlist *sge; + u32 size, copied = 0; + struct sk_msg *tmp; + int i, ret = 0; + + tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); + if (unlikely(!tmp)) + return -ENOMEM; + + lock_sock(sk); + tmp->sg.start = msg->sg.start; + i = msg->sg.start; + do { + sge = sk_msg_elem(msg, i); + size = (apply && apply_bytes < sge->length) ? + apply_bytes : sge->length; + if (!sk_wmem_schedule(sk, size)) { + if (!copied) + ret = -ENOMEM; + break; + } + + sk_mem_charge(sk, size); + sk_msg_xfer(tmp, msg, i, size); + copied += size; + if (sge->length) + get_page(sk_msg_page(tmp, i)); + sk_msg_iter_var_next(i); + tmp->sg.end = i; + if (apply) { + apply_bytes -= size; + if (!apply_bytes) + break; + } + } while (i != msg->sg.end); + + if (!ret) { + msg->sg.start = i; + msg->sg.size -= apply_bytes; + sk_psock_queue_msg(psock, tmp); + sk->sk_data_ready(sk); + } else { + sk_msg_free(sk, tmp); + kfree(tmp); + } + + release_sock(sk); + return ret; +} + +static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, + int flags, bool uncharge) +{ + bool apply = apply_bytes; + struct scatterlist *sge; + struct page *page; + int size, ret = 0; + u32 off; + + while (1) { + sge = sk_msg_elem(msg, msg->sg.start); + size = (apply && apply_bytes < sge->length) ? + apply_bytes : sge->length; + off = sge->offset; + page = sg_page(sge); + + tcp_rate_check_app_limited(sk); +retry: + ret = do_tcp_sendpages(sk, page, off, size, flags); + if (ret <= 0) + return ret; + if (apply) + apply_bytes -= ret; + msg->sg.size -= ret; + sge->offset += ret; + sge->length -= ret; + if (uncharge) + sk_mem_uncharge(sk, ret); + if (ret != size) { + size -= ret; + off += ret; + goto retry; + } + if (!sge->length) { + put_page(page); + sk_msg_iter_next(msg, start); + sg_init_table(sge, 1); + if (msg->sg.start == msg->sg.end) + break; + } + if (apply && !apply_bytes) + break; + } + + return 0; +} + +static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, + u32 apply_bytes, int flags, bool uncharge) +{ + int ret; + + lock_sock(sk); + ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); + release_sock(sk); + return ret; +} + +int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, + u32 bytes, int flags) +{ + bool ingress = sk_msg_to_ingress(msg); + struct sk_psock *psock = sk_psock_get(sk); + int ret; + + if (unlikely(!psock)) { + sk_msg_free(sk, msg); + return 0; + } + ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : + tcp_bpf_push_locked(sk, msg, bytes, flags, false); + sk_psock_put(sk, psock); + return ret; +} +EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); + +static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, + struct sk_msg *msg, int *copied, int flags) +{ + bool cork = false, enospc = msg->sg.start == msg->sg.end; + struct sock *sk_redir; + u32 tosend; + int ret; + +more_data: + if (psock->eval == __SK_NONE) + psock->eval = sk_psock_msg_verdict(sk, psock, msg); + + if (msg->cork_bytes && + msg->cork_bytes > msg->sg.size && !enospc) { + psock->cork_bytes = msg->cork_bytes - msg->sg.size; + if (!psock->cork) { + psock->cork = kzalloc(sizeof(*psock->cork), + GFP_ATOMIC | __GFP_NOWARN); + if (!psock->cork) + return -ENOMEM; + } + memcpy(psock->cork, msg, sizeof(*msg)); + return 0; + } + + tosend = msg->sg.size; + if (psock->apply_bytes && psock->apply_bytes < tosend) + tosend = psock->apply_bytes; + + switch (psock->eval) { + case __SK_PASS: + ret = tcp_bpf_push(sk, msg, tosend, flags, true); + if (unlikely(ret)) { + *copied -= sk_msg_free(sk, msg); + break; + } + sk_msg_apply_bytes(psock, tosend); + break; + case __SK_REDIRECT: + sk_redir = psock->sk_redir; + sk_msg_apply_bytes(psock, tosend); + if (psock->cork) { + cork = true; + psock->cork = NULL; + } + sk_msg_return(sk, msg, tosend); + release_sock(sk); + ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); + lock_sock(sk); + if (unlikely(ret < 0)) { + int free = sk_msg_free_nocharge(sk, msg); + + if (!cork) + *copied -= free; + } + if (cork) { + sk_msg_free(sk, msg); + kfree(msg); + msg = NULL; + ret = 0; + } + break; + case __SK_DROP: + default: + sk_msg_free_partial(sk, msg, tosend); + sk_msg_apply_bytes(psock, tosend); + *copied -= tosend; + return -EACCES; + } + + if (likely(!ret)) { + if (!psock->apply_bytes) { + psock->eval = __SK_NONE; + if (psock->sk_redir) { + sock_put(psock->sk_redir); + psock->sk_redir = NULL; + } + } + if (msg && + msg->sg.data[msg->sg.start].page_link && + msg->sg.data[msg->sg.start].length) + goto more_data; + } + return ret; +} + +static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct sk_msg tmp, *msg_tx = NULL; + int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS; + int copied = 0, err = 0; + struct sk_psock *psock; + long timeo; + + psock = sk_psock_get(sk); + if (unlikely(!psock)) + return tcp_sendmsg(sk, msg, size); + + lock_sock(sk); + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); + while (msg_data_left(msg)) { + bool enospc = false; + u32 copy, osize; + + if (sk->sk_err) { + err = -sk->sk_err; + goto out_err; + } + + copy = msg_data_left(msg); + if (!sk_stream_memory_free(sk)) + goto wait_for_sndbuf; + if (psock->cork) { + msg_tx = psock->cork; + } else { + msg_tx = &tmp; + sk_msg_init(msg_tx); + } + + osize = msg_tx->sg.size; + err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); + if (err) { + if (err != -ENOSPC) + goto wait_for_memory; + enospc = true; + copy = msg_tx->sg.size - osize; + } + + err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, + copy); + if (err < 0) { + sk_msg_trim(sk, msg_tx, osize); + goto out_err; + } + + copied += copy; + if (psock->cork_bytes) { + if (size > psock->cork_bytes) + psock->cork_bytes = 0; + else + psock->cork_bytes -= size; + if (psock->cork_bytes && !enospc) + goto out_err; + /* All cork bytes are accounted, rerun the prog. */ + psock->eval = __SK_NONE; + psock->cork_bytes = 0; + } + + err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); + if (unlikely(err < 0)) + goto out_err; + continue; +wait_for_sndbuf: + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); +wait_for_memory: + err = sk_stream_wait_memory(sk, &timeo); + if (err) { + if (msg_tx && msg_tx != psock->cork) + sk_msg_free(sk, msg_tx); + goto out_err; + } + } +out_err: + if (err < 0) + err = sk_stream_error(sk, msg->msg_flags, err); + release_sock(sk); + sk_psock_put(sk, psock); + return copied ? copied : err; +} + +static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, + size_t size, int flags) +{ + struct sk_msg tmp, *msg = NULL; + int err = 0, copied = 0; + struct sk_psock *psock; + bool enospc = false; + + psock = sk_psock_get(sk); + if (unlikely(!psock)) + return tcp_sendpage(sk, page, offset, size, flags); + + lock_sock(sk); + if (psock->cork) { + msg = psock->cork; + } else { + msg = &tmp; + sk_msg_init(msg); + } + + /* Catch case where ring is full and sendpage is stalled. */ + if (unlikely(sk_msg_full(msg))) + goto out_err; + + sk_msg_page_add(msg, page, size, offset); + sk_mem_charge(sk, size); + copied = size; + if (sk_msg_full(msg)) + enospc = true; + if (psock->cork_bytes) { + if (size > psock->cork_bytes) + psock->cork_bytes = 0; + else + psock->cork_bytes -= size; + if (psock->cork_bytes && !enospc) + goto out_err; + /* All cork bytes are accounted, rerun the prog. */ + psock->eval = __SK_NONE; + psock->cork_bytes = 0; + } + + err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags); +out_err: + release_sock(sk); + sk_psock_put(sk, psock); + return copied ? copied : err; +} + +static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock) +{ + struct sk_psock_link *link; + + sk_psock_cork_free(psock); + __sk_psock_purge_ingress_msg(psock); + while ((link = sk_psock_link_pop(psock))) { + sk_psock_unlink(sk, link); + sk_psock_free_link(link); + } +} + +static void tcp_bpf_unhash(struct sock *sk) +{ + void (*saved_unhash)(struct sock *sk); + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + if (sk->sk_prot->unhash) + sk->sk_prot->unhash(sk); + return; + } + + saved_unhash = psock->saved_unhash; + tcp_bpf_remove(sk, psock); + rcu_read_unlock(); + saved_unhash(sk); +} + +static void tcp_bpf_close(struct sock *sk, long timeout) +{ + void (*saved_close)(struct sock *sk, long timeout); + struct sk_psock *psock; + + lock_sock(sk); + rcu_read_lock(); + psock = sk_psock(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + release_sock(sk); + return sk->sk_prot->close(sk, timeout); + } + + saved_close = psock->saved_close; + tcp_bpf_remove(sk, psock); + rcu_read_unlock(); + release_sock(sk); + saved_close(sk, timeout); +} + +enum { + TCP_BPF_IPV4, + TCP_BPF_IPV6, + TCP_BPF_NUM_PROTS, +}; + +enum { + TCP_BPF_BASE, + TCP_BPF_TX, + TCP_BPF_NUM_CFGS, +}; + +static struct proto *tcpv6_prot_saved __read_mostly; +static DEFINE_SPINLOCK(tcpv6_prot_lock); +static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; + +static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], + struct proto *base) +{ + prot[TCP_BPF_BASE] = *base; + prot[TCP_BPF_BASE].unhash = tcp_bpf_unhash; + prot[TCP_BPF_BASE].close = tcp_bpf_close; + prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; + prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read; + + prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; + prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; + prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage; +} + +static void tcp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops) +{ + if (sk->sk_family == AF_INET6 && + unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { + spin_lock_bh(&tcpv6_prot_lock); + if (likely(ops != tcpv6_prot_saved)) { + tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); + smp_store_release(&tcpv6_prot_saved, ops); + } + spin_unlock_bh(&tcpv6_prot_lock); + } +} + +static int __init tcp_bpf_v4_build_proto(void) +{ + tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); + return 0; +} +core_initcall(tcp_bpf_v4_build_proto); + +static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock) +{ + int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; + int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; + + sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]); +} + +static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock) +{ + int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; + int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; + + /* Reinit occurs when program types change e.g. TCP_BPF_TX is removed + * or added requiring sk_prot hook updates. We keep original saved + * hooks in this case. + */ + sk->sk_prot = &tcp_bpf_prots[family][config]; +} + +static int tcp_bpf_assert_proto_ops(struct proto *ops) +{ + /* In order to avoid retpoline, we make assumptions when we call + * into ops if e.g. a psock is not present. Make sure they are + * indeed valid assumptions. + */ + return ops->recvmsg == tcp_recvmsg && + ops->sendmsg == tcp_sendmsg && + ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; +} + +void tcp_bpf_reinit(struct sock *sk) +{ + struct sk_psock *psock; + + sock_owned_by_me(sk); + + rcu_read_lock(); + psock = sk_psock(sk); + tcp_bpf_reinit_sk_prot(sk, psock); + rcu_read_unlock(); +} + +int tcp_bpf_init(struct sock *sk) +{ + struct proto *ops = READ_ONCE(sk->sk_prot); + struct sk_psock *psock; + + sock_owned_by_me(sk); + + rcu_read_lock(); + psock = sk_psock(sk); + if (unlikely(!psock || psock->sk_proto || + tcp_bpf_assert_proto_ops(ops))) { + rcu_read_unlock(); + return -EINVAL; + } + tcp_bpf_check_v6_needs_rebuild(sk, ops); + tcp_bpf_update_sk_prot(sk, psock); + rcu_read_unlock(); + return 0; +} diff --git a/net/strparser/Kconfig b/net/strparser/Kconfig index 6cff3f6d0c3a..94da19a2a220 100644 --- a/net/strparser/Kconfig +++ b/net/strparser/Kconfig @@ -1,4 +1,2 @@ - config STREAM_PARSER - tristate - default n + def_bool n -- cgit v1.2.3 From 4766ab5677a2842834f9bc4a21587256a811531c Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Fri, 12 Oct 2018 17:42:27 -0400 Subject: locking/lockdep: Remove duplicated 'lock_class_ops' percpu array Remove the duplicated 'lock_class_ops' percpu array that is not used anywhere. Signed-off-by: Waiman Long Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Fixes: 8ca2b56cd7da ("locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y") Link: http://lkml.kernel.org/r/1539380547-16726-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index a0f83058d6aa..8a732c856624 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -436,7 +436,6 @@ unsigned int max_lockdep_depth; * Various lockdep statistics: */ DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); -DEFINE_PER_CPU(unsigned long [MAX_LOCKDEP_KEYS], lock_class_ops); #endif /* -- cgit v1.2.3 From 9845c49cc9bbb317a0bc9e9cf78d8e09d54c9af0 Mon Sep 17 00:00:00 2001 From: Song Muchun Date: Sun, 14 Oct 2018 19:26:12 +0800 Subject: sched/fair: Fix the min_vruntime update logic in dequeue_entity() The comment and the code around the update_min_vruntime() call in dequeue_entity() are not in agreement. From commit: b60205c7c558 ("sched/fair: Fix min_vruntime tracking") I think that we want to update min_vruntime when a task is sleeping/migrating. So, the check is inverted there - fix it. Signed-off-by: Song Muchun Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: b60205c7c558 ("sched/fair: Fix min_vruntime tracking") Link: http://lkml.kernel.org/r/20181014112612.2614-1-smuchun@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f88e00705b55..908c9cdae2f0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4001,7 +4001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * put back on, and if we advance min_vruntime, we'll be placed back * further than we started -- ie. we'll be penalized. */ - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) + if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) update_min_vruntime(cfs_rq); } -- cgit v1.2.3 From 53bf57fab7321fb42b703056a4c80fc9d986d170 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 26 Sep 2018 13:01:18 +0200 Subject: locking/qspinlock: Re-order code Flip the branch condition after atomic_fetch_or_acquire(_Q_PENDING_VAL) such that we loose the indent. This also result in a more natural code flow IMO. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: andrea.parri@amarulasolutions.com Cc: longman@redhat.com Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.org Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 56 ++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 29 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index bfaeb05123ff..ec343276f975 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -330,39 +330,37 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * 0,0,1 -> 0,1,1 ; pending */ val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); - if (!(val & ~_Q_LOCKED_MASK)) { - /* - * We're pending, wait for the owner to go away. - * - * *,1,1 -> *,1,0 - * - * this wait loop must be a load-acquire such that we match the - * store-release that clears the locked bit and create lock - * sequentiality; this is because not all - * clear_pending_set_locked() implementations imply full - * barriers. - */ - if (val & _Q_LOCKED_MASK) { - atomic_cond_read_acquire(&lock->val, - !(VAL & _Q_LOCKED_MASK)); - } - - /* - * take ownership and clear the pending bit. - * - * *,1,0 -> *,0,1 - */ - clear_pending_set_locked(lock); - qstat_inc(qstat_lock_pending, true); - return; + /* + * If we observe any contention; undo and queue. + */ + if (unlikely(val & ~_Q_LOCKED_MASK)) { + if (!(val & _Q_PENDING_MASK)) + clear_pending(lock); + goto queue; } /* - * If pending was clear but there are waiters in the queue, then - * we need to undo our setting of pending before we queue ourselves. + * We're pending, wait for the owner to go away. + * + * 0,1,1 -> 0,1,0 + * + * this wait loop must be a load-acquire such that we match the + * store-release that clears the locked bit and create lock + * sequentiality; this is because not all + * clear_pending_set_locked() implementations imply full + * barriers. + */ + if (val & _Q_LOCKED_MASK) + atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK)); + + /* + * take ownership and clear the pending bit. + * + * 0,1,0 -> 0,0,1 */ - if (!(val & _Q_PENDING_MASK)) - clear_pending(lock); + clear_pending_set_locked(lock); + qstat_inc(qstat_lock_pending, true); + return; /* * End of pending bit optimistic spinning and beginning of MCS -- cgit v1.2.3 From 756b1df4c2c82a1cdffeafa9d2aa76c92e7fb405 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 26 Sep 2018 13:01:19 +0200 Subject: locking/qspinlock: Rework some comments While working my way through the code again; I felt the comments could use help. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: andrea.parri@amarulasolutions.com Cc: longman@redhat.com Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.org Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index ec343276f975..47cb99787e4d 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -326,16 +326,23 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) /* * trylock || pending * - * 0,0,0 -> 0,0,1 ; trylock - * 0,0,1 -> 0,1,1 ; pending + * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock */ val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); + /* - * If we observe any contention; undo and queue. + * If we observe contention, there is a concurrent locker. + * + * Undo and queue; our setting of PENDING might have made the + * n,0,0 -> 0,0,0 transition fail and it will now be waiting + * on @next to become !NULL. */ if (unlikely(val & ~_Q_LOCKED_MASK)) { + + /* Undo PENDING if we set it. */ if (!(val & _Q_PENDING_MASK)) clear_pending(lock); + goto queue; } @@ -474,16 +481,25 @@ locked: */ /* - * In the PV case we might already have _Q_LOCKED_VAL set. + * In the PV case we might already have _Q_LOCKED_VAL set, because + * of lock stealing; therefore we must also allow: * - * The atomic_cond_read_acquire() call above has provided the - * necessary acquire semantics required for locking. + * n,0,1 -> 0,0,1 + * + * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the + * above wait condition, therefore any concurrent setting of + * PENDING will make the uncontended transition fail. */ - if (((val & _Q_TAIL_MASK) == tail) && - atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) - goto release; /* No contention */ + if ((val & _Q_TAIL_MASK) == tail) { + if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) + goto release; /* No contention */ + } - /* Either somebody is queued behind us or _Q_PENDING_VAL is set */ + /* + * Either somebody is queued behind us or _Q_PENDING_VAL got set + * which will then detect the remaining tail and queue behind us + * ensuring we'll see a @next. + */ set_locked(lock); /* -- cgit v1.2.3 From 7aa54be2976550f17c11a1c3e3630002dea39303 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 26 Sep 2018 13:01:20 +0200 Subject: locking/qspinlock, x86: Provide liveness guarantee On x86 we cannot do fetch_or() with a single instruction and thus end up using a cmpxchg loop, this reduces determinism. Replace the fetch_or() with a composite operation: tas-pending + load. Using two instructions of course opens a window we previously did not have. Consider the scenario: CPU0 CPU1 CPU2 1) lock trylock -> (0,0,1) 2) lock trylock /* fail */ 3) unlock -> (0,0,0) 4) lock trylock -> (0,0,1) 5) tas-pending -> (0,1,1) load-val <- (0,1,0) from 3 6) clear-pending-set-locked -> (0,0,1) FAIL: _2_ owners where 5) is our new composite operation. When we consider each part of the qspinlock state as a separate variable (as we can when _Q_PENDING_BITS == 8) then the above is entirely possible, because tas-pending will only RmW the pending byte, so the later load is able to observe prior tail and lock state (but not earlier than its own trylock, which operates on the whole word, due to coherence). To avoid this we need 2 things: - the load must come after the tas-pending (obviously, otherwise it can trivially observe prior state). - the tas-pending must be a full word RmW instruction, it cannot be an XCHGB for example, such that we cannot observe other state prior to setting pending. On x86 we can realize this by using "LOCK BTS m32, r32" for tas-pending followed by a regular load. Note that observing later state is not a problem: - if we fail to observe a later unlock, we'll simply spin-wait for that store to become visible. - if we observe a later xchg_tail(), there is no difference from that xchg_tail() having taken place before the tas-pending. Suggested-by: Will Deacon Reported-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Will Deacon Cc: Linus Torvalds Cc: Peter Zijlstra Cc: andrea.parri@amarulasolutions.com Cc: longman@redhat.com Fixes: 59fb586b4a07 ("locking/qspinlock: Remove unbounded cmpxchg() loop from locking slowpath") Link: https://lkml.kernel.org/r/20181003130957.183726335@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/qspinlock.h | 15 +++++++++++++++ kernel/locking/qspinlock.c | 16 +++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 3e70bed8a978..87623c6b13db 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -6,9 +6,24 @@ #include #include #include +#include #define _Q_PENDING_LOOPS (1 << 9) +#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) +{ + u32 val = 0; + + if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, + "I", _Q_PENDING_OFFSET)) + val |= _Q_PENDING_VAL; + + val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; + + return val; +} + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 47cb99787e4d..341ca666bc60 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -231,6 +231,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) } #endif /* _Q_PENDING_BITS == 8 */ +/** + * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending + * @lock : Pointer to queued spinlock structure + * Return: The previous lock value + * + * *,*,* -> *,1,* + */ +#ifndef queued_fetch_set_pending_acquire +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) +{ + return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); +} +#endif + /** * set_locked - Set the lock bit and own the lock * @lock: Pointer to queued spinlock structure @@ -328,7 +342,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock */ - val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); + val = queued_fetch_set_pending_acquire(lock); /* * If we observe contention, there is a concurrent locker. -- cgit v1.2.3 From 1222109a53637f96c581224198b86856d503f892 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 16 Oct 2018 09:45:06 -0400 Subject: locking/qspinlock_stat: Count instances of nested lock slowpaths Queued spinlock supports up to 4 levels of lock slowpath nesting - user context, soft IRQ, hard IRQ and NMI. However, we are not sure how often the nesting happens. So add 3 more per-CPU stat counters to track the number of instances where nesting index goes to 1, 2 and 3 respectively. On a dual-socket 64-core 128-thread Zen server, the following were the new stat counter values under different circumstances: State slowpath index1 index2 index3 ----- -------- ------ ------ ------- After bootup 1,012,150 82 0 0 After parallel build + perf-top 125,195,009 82 0 0 So the chance of having more than 2 levels of nesting is extremely low. [ mingo: Minor changelog edits. ] Signed-off-by: Waiman Long Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1539697507-28084-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 5 +++++ kernel/locking/qspinlock_stat.h | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 341ca666bc60..ce6af1ee2cac 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -396,6 +396,11 @@ pv_queue: node += idx; + /* + * Keep counts of non-zero index values: + */ + qstat_inc(qstat_lock_idx1 + idx - 1, idx); + /* * Ensure that we increment the head node->count before initialising * the actual node. If the compiler is kind enough to reorder these diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index 6bd78c0740fc..42d3d8dc8f49 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -55,6 +55,9 @@ enum qlock_stats { qstat_pv_wait_node, qstat_lock_pending, qstat_lock_slowpath, + qstat_lock_idx1, + qstat_lock_idx2, + qstat_lock_idx3, qstat_num, /* Total number of statistical counters */ qstat_reset_cnts = qstat_num, }; @@ -82,6 +85,9 @@ static const char * const qstat_names[qstat_num + 1] = { [qstat_pv_wait_node] = "pv_wait_node", [qstat_lock_pending] = "lock_pending", [qstat_lock_slowpath] = "lock_slowpath", + [qstat_lock_idx1] = "lock_index1", + [qstat_lock_idx2] = "lock_index2", + [qstat_lock_idx3] = "lock_index3", [qstat_reset_cnts] = "reset_counters", }; -- cgit v1.2.3 From 0fa809ca7f81c47bea6706bc689e941eb25d7e89 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 16 Oct 2018 09:45:07 -0400 Subject: locking/pvqspinlock: Extend node size when pvqspinlock is configured The qspinlock code supports up to 4 levels of slowpath nesting using four per-CPU mcs_spinlock structures. For 64-bit architectures, they fit nicely in one 64-byte cacheline. For para-virtualized (PV) qspinlocks it needs to store more information in the per-CPU node structure than there is space for. It uses a trick to use a second cacheline to hold the extra information that it needs. So PV qspinlock needs to access two extra cachelines for its information whereas the native qspinlock code only needs one extra cacheline. Freshly added counter profiling of the qspinlock code, however, revealed that it was very rare to use more than two levels of slowpath nesting. So it doesn't make sense to penalize PV qspinlock code in order to have four mcs_spinlock structures in the same cacheline to optimize for a case in the native qspinlock code that rarely happens. Extend the per-CPU node structure to have two more long words when PV qspinlock locks are configured to hold the extra data that it needs. As a result, the PV qspinlock code will enjoy the same benefit of using just one extra cacheline like the native counterpart, for most cases. [ mingo: Minor changelog edits. ] Signed-off-by: Waiman Long Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1539697507-28084-2-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 34 ++++++++++++++++++++++++++-------- kernel/locking/qspinlock_paravirt.h | 4 +--- 2 files changed, 27 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index ce6af1ee2cac..8a8c3c208c5e 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -74,12 +74,24 @@ */ #include "mcs_spinlock.h" +#define MAX_NODES 4 +/* + * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in + * size and four of them will fit nicely in one 64-byte cacheline. For + * pvqspinlock, however, we need more space for extra data. To accommodate + * that, we insert two more long words to pad it up to 32 bytes. IOW, only + * two of them can fit in a cacheline in this case. That is OK as it is rare + * to have more than 2 levels of slowpath nesting in actual use. We don't + * want to penalize pvqspinlocks to optimize for a rare case in native + * qspinlocks. + */ +struct qnode { + struct mcs_spinlock mcs; #ifdef CONFIG_PARAVIRT_SPINLOCKS -#define MAX_NODES 8 -#else -#define MAX_NODES 4 + long reserved[2]; #endif +}; /* * The pending bit spinning loop count. @@ -101,7 +113,7 @@ * * PV doubles the storage and uses the second cacheline for PV state. */ -static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); +static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]); /* * We must be able to distinguish between no-tail and the tail at 0:0, @@ -126,7 +138,13 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail) int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; - return per_cpu_ptr(&mcs_nodes[idx], cpu); + return per_cpu_ptr(&qnodes[idx].mcs, cpu); +} + +static inline __pure +struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx) +{ + return &((struct qnode *)base + idx)->mcs; } #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) @@ -390,11 +408,11 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) queue: qstat_inc(qstat_lock_slowpath, true); pv_queue: - node = this_cpu_ptr(&mcs_nodes[0]); + node = this_cpu_ptr(&qnodes[0].mcs); idx = node->count++; tail = encode_tail(smp_processor_id(), idx); - node += idx; + node = grab_mcs_node(node, idx); /* * Keep counts of non-zero index values: @@ -534,7 +552,7 @@ release: /* * release the node */ - __this_cpu_dec(mcs_nodes[0].count); + __this_cpu_dec(qnodes[0].mcs.count); } EXPORT_SYMBOL(queued_spin_lock_slowpath); diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 5a0cf5f9008c..0130e488ebfe 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -49,8 +49,6 @@ enum vcpu_state { struct pv_node { struct mcs_spinlock mcs; - struct mcs_spinlock __res[3]; - int cpu; u8 state; }; @@ -281,7 +279,7 @@ static void pv_init_node(struct mcs_spinlock *node) { struct pv_node *pn = (struct pv_node *)node; - BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock)); + BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode)); pn->cpu = smp_processor_id(); pn->state = vcpu_running; -- cgit v1.2.3 From 9c0be3f6b5d776dfe3ed249862c244a4486414dc Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 13 Oct 2018 15:10:50 -0400 Subject: tracepoint: Fix tracepoint array element size mismatch commit 46e0c9be206f ("kernel: tracepoints: add support for relative references") changes the layout of the __tracepoint_ptrs section on architectures supporting relative references. However, it does so without turning struct tracepoint * const into const int elsewhere in the tracepoint code, which has the following side-effect: Setting mod->num_tracepoints is done in by module.c: mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", sizeof(*mod->tracepoints_ptrs), &mod->num_tracepoints); Basically, since sizeof(*mod->tracepoints_ptrs) is a pointer size (rather than sizeof(int)), num_tracepoints is erroneously set to half the size it should be on 64-bit arch. So a module with an odd number of tracepoints misses the last tracepoint due to effect of integer division. So in the module going notifier: for_each_tracepoint_range(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints, tp_module_going_check_quiescent, NULL); the expression (mod->tracepoints_ptrs + mod->num_tracepoints) actually evaluates to something within the bounds of the array, but miss the last tracepoint if the number of tracepoints is odd on 64-bit arch. Fix this by introducing a new typedef: tracepoint_ptr_t, which is either "const int" on architectures that have PREL32 relocations, or "struct tracepoint * const" on architectures that does not have this feature. Also provide a new tracepoint_ptr_defer() static inline to encapsulate deferencing this type rather than duplicate code and ugly idefs within the for_each_tracepoint_range() implementation. This issue appears in 4.19-rc kernels, and should ideally be fixed before the end of the rc cycle. Acked-by: Ard Biesheuvel Acked-by: Jessica Yu Link: http://lkml.kernel.org/r/20181013191050.22389-1-mathieu.desnoyers@efficios.com Link: http://lkml.kernel.org/r/20180704083651.24360-7-ard.biesheuvel@linaro.org Cc: Michael Ellerman Cc: Ingo Molnar Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: James Morris Cc: James Morris Cc: Josh Poimboeuf Cc: Kees Cook Cc: Nicolas Pitre Cc: Paul Mackerras Cc: Petr Mladek Cc: Russell King Cc: "Serge E. Hallyn" Cc: Sergey Senozhatsky Cc: Thomas Garnier Cc: Thomas Gleixner Cc: Will Deacon Cc: Andrew Morton Cc: Linus Torvalds Cc: Greg Kroah-Hartman Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (VMware) --- include/linux/module.h | 3 ++- include/linux/tracepoint-defs.h | 6 ++++++ include/linux/tracepoint.h | 36 +++++++++++++++++++++++------------- kernel/tracepoint.c | 24 ++++++++---------------- 4 files changed, 39 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/include/linux/module.h b/include/linux/module.h index f807f15bebbe..e19ae08c7fb8 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -430,7 +431,7 @@ struct module { #ifdef CONFIG_TRACEPOINTS unsigned int num_tracepoints; - struct tracepoint * const *tracepoints_ptrs; + tracepoint_ptr_t *tracepoints_ptrs; #endif #ifdef HAVE_JUMP_LABEL struct jump_entry *jump_entries; diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 22c5a46e9693..49ba9cde7e4b 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -35,6 +35,12 @@ struct tracepoint { struct tracepoint_func __rcu *funcs; }; +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +typedef const int tracepoint_ptr_t; +#else +typedef struct tracepoint * const tracepoint_ptr_t; +#endif + struct bpf_raw_event_map { struct tracepoint *tp; void *bpf_func; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 041f7e56a289..538ba1a58f5b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -99,6 +99,29 @@ extern void syscall_unregfunc(void); #define TRACE_DEFINE_ENUM(x) #define TRACE_DEFINE_SIZEOF(x) +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) +{ + return offset_to_ptr(p); +} + +#define __TRACEPOINT_ENTRY(name) \ + asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ + " .balign 4 \n" \ + " .long __tracepoint_" #name " - . \n" \ + " .previous \n") +#else +static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) +{ + return *p; +} + +#define __TRACEPOINT_ENTRY(name) \ + static tracepoint_ptr_t __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ + &__tracepoint_##name +#endif + #endif /* _LINUX_TRACEPOINT_H */ /* @@ -253,19 +276,6 @@ extern void syscall_unregfunc(void); return static_key_false(&__tracepoint_##name.key); \ } -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#define __TRACEPOINT_ENTRY(name) \ - asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ - " .balign 4 \n" \ - " .long __tracepoint_" #name " - . \n" \ - " .previous \n") -#else -#define __TRACEPOINT_ENTRY(name) \ - static struct tracepoint * const __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ - &__tracepoint_##name -#endif - /* * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index bf2c06ef9afc..a3be42304485 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -28,8 +28,8 @@ #include #include -extern struct tracepoint * const __start___tracepoints_ptrs[]; -extern struct tracepoint * const __stop___tracepoints_ptrs[]; +extern tracepoint_ptr_t __start___tracepoints_ptrs[]; +extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; DEFINE_SRCU(tracepoint_srcu); EXPORT_SYMBOL_GPL(tracepoint_srcu); @@ -371,25 +371,17 @@ int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) } EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); -static void for_each_tracepoint_range(struct tracepoint * const *begin, - struct tracepoint * const *end, +static void for_each_tracepoint_range( + tracepoint_ptr_t *begin, tracepoint_ptr_t *end, void (*fct)(struct tracepoint *tp, void *priv), void *priv) { + tracepoint_ptr_t *iter; + if (!begin) return; - - if (IS_ENABLED(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)) { - const int *iter; - - for (iter = (const int *)begin; iter < (const int *)end; iter++) - fct(offset_to_ptr(iter), priv); - } else { - struct tracepoint * const *iter; - - for (iter = begin; iter < end; iter++) - fct(*iter, priv); - } + for (iter = begin; iter < end; iter++) + fct(tracepoint_ptr_deref(iter), priv); } #ifdef CONFIG_MODULES -- cgit v1.2.3 From 12ad0cb2123aed30241a14792ef5bef9efcccbcd Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 15 Oct 2018 23:31:42 -0400 Subject: tracing: Use trace_clock_local() for looping in preemptirq_delay_test.c The preemptirq_delay_test module is used for the ftrace selftest code that tests the latency tracers. The problem is that it uses ktime for the delay loop, and then checks the tracer to see if the delay loop is caught, but the tracer uses trace_clock_local() which uses various different other clocks to measure the latency. As ktime uses the clock cycles, and the code then converts that to nanoseconds, it causes rounding errors, and the preemptirq latency tests are failing due to being off by 1 (it expects to see a delay of 500000 us, but the delay is only 499999 us). This is happening due to a rounding error in the ktime (which is totally legit). The purpose of the test is to see if it can catch the delay, not to test the accuracy between trace_clock_local() and ktime_get(). Best to use apples to apples, and have the delay loop use the same clock as the latency tracer does. Cc: stable@vger.kernel.org Fixes: f96e8577da102 ("lib: Add module for testing preemptoff/irqsoff latency tracers") Acked-by: Joel Fernandes (Google) Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/preemptirq_delay_test.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c index f704390db9fc..d8765c952fab 100644 --- a/kernel/trace/preemptirq_delay_test.c +++ b/kernel/trace/preemptirq_delay_test.c @@ -5,12 +5,12 @@ * Copyright (C) 2018 Joel Fernandes (Google) */ +#include #include #include #include #include #include -#include #include #include #include @@ -25,13 +25,13 @@ MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt or irq (default ir static void busy_wait(ulong time) { - ktime_t start, end; - start = ktime_get(); + u64 start, end; + start = trace_clock_local(); do { - end = ktime_get(); + end = trace_clock_local(); if (kthread_should_stop()) break; - } while (ktime_to_ns(ktime_sub(end, start)) < (time * 1000)); + } while ((end - start) < (time * 1000)); } static int preemptirq_delay_run(void *data) -- cgit v1.2.3 From e45506ac0af9b56b221863e9649fe122d8bb42ff Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Thu, 18 Oct 2018 10:21:33 -0400 Subject: softirq: Fix typo in __do_softirq() comments s/s/as [ mingo: Also add a missing 'the', add proper punctuation and clarify what 'swap' means here. ] Signed-off-by: Yangtao Li Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: alexander.levin@verizon.com Cc: frederic@kernel.org Cc: joel@joelfernandes.org Cc: paulmck@linux.vnet.ibm.com Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/20181018142133.12341-1-tiny.windzz@gmail.com Signed-off-by: Ingo Molnar --- kernel/softirq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/softirq.c b/kernel/softirq.c index 6f584861d329..9526895fe4ac 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -257,9 +257,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) int softirq_bit; /* - * Mask out PF_MEMALLOC s current task context is borrowed for the - * softirq. A softirq handled such as network RX might set PF_MEMALLOC - * again if the socket is related to swap + * Mask out PF_MEMALLOC as the current task context is borrowed for the + * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC + * again if the socket is related to swapping. */ current->flags &= ~PF_MEMALLOC; -- cgit v1.2.3 From 9506a7425b094d2f1d9c877ed5a78f416669269b Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 18 Oct 2018 21:45:17 -0400 Subject: locking/lockdep: Fix debug_locks off performance problem It was found that when debug_locks was turned off because of a problem found by the lockdep code, the system performance could drop quite significantly when the lock_stat code was also configured into the kernel. For instance, parallel kernel build time on a 4-socket x86-64 server nearly doubled. Further analysis into the cause of the slowdown traced back to the frequent call to debug_locks_off() from the __lock_acquired() function probably due to some inconsistent lockdep states with debug_locks off. The debug_locks_off() function did an unconditional atomic xchg to write a 0 value into debug_locks which had already been set to 0. This led to severe cacheline contention in the cacheline that held debug_locks. As debug_locks is being referenced in quite a few different places in the kernel, this greatly slow down the system performance. To prevent that trashing of debug_locks cacheline, lock_acquired() and lock_contended() now checks the state of debug_locks before proceeding. The debug_locks_off() function is also modified to check debug_locks before calling __debug_locks_off(). Signed-off-by: Waiman Long Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Link: http://lkml.kernel.org/r/1539913518-15598-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 4 ++-- lib/debug_locks.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index be76f476c63f..1efada2dd9dd 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4066,7 +4066,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; - if (unlikely(!lock_stat)) + if (unlikely(!lock_stat || !debug_locks)) return; if (unlikely(current->lockdep_recursion)) @@ -4086,7 +4086,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; - if (unlikely(!lock_stat)) + if (unlikely(!lock_stat || !debug_locks)) return; if (unlikely(current->lockdep_recursion)) diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 96c4c633d95e..124fdf238b3d 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent); */ int debug_locks_off(void) { - if (__debug_locks_off()) { + if (debug_locks && __debug_locks_off()) { if (!debug_locks_silent) { console_verbose(); return 1; -- cgit v1.2.3 From 21bb9d64c5adc0a87c3736bc9a2d386023b88a5b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 12 Apr 2018 10:46:01 +0200 Subject: swiotlb: remove a pointless comment This comments describes an aspect of the map_sg interface that isn't even exploited by swiotlb. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Reviewed-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 4f8a6dbf0b60..9062b14bc7f4 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -925,12 +925,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, * appropriate dma address and length. They are obtained via * sg_dma_{address,length}(SG). * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * * Device ownership issues as mentioned above for swiotlb_map_page are the * same here. */ -- cgit v1.2.3 From b65125c6acf38388d3342b37c18c3b6cc97eeb75 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 12 Apr 2018 14:49:23 +0200 Subject: swiotlb: mark is_swiotlb_buffer static Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Reviewed-by: Konrad Rzeszutek Wilk --- include/linux/swiotlb.h | 1 - kernel/dma/swiotlb.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 965be92c33b5..7ef541ce8f34 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -121,7 +121,6 @@ static inline unsigned int swiotlb_max_segment(void) { return 0; } #endif extern void swiotlb_print_info(void); -extern int is_swiotlb_buffer(phys_addr_t paddr); extern void swiotlb_set_max_segment(unsigned int); extern const struct dma_map_ops swiotlb_dma_ops; diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 9062b14bc7f4..26d3af52956f 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -429,7 +429,7 @@ void __init swiotlb_exit(void) max_segment = 0; } -int is_swiotlb_buffer(phys_addr_t paddr) +static int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= io_tlb_start && paddr < io_tlb_end; } -- cgit v1.2.3 From 8088546832aa2c0d8f99dd56edf6384f8a9b63b3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 12 Apr 2018 10:38:08 +0200 Subject: swiotlb: do not panic on mapping failures All properly written drivers now have error handling in the dma_map_single / dma_map_page callers. As swiotlb_tbl_map_single already prints a useful warning when running out of swiotlb pool space we can also remove swiotlb_full entirely as it serves no purpose now. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/swiotlb.c | 33 +-------------------------------- 1 file changed, 1 insertion(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 26d3af52956f..69bf305ee5f8 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -761,34 +761,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, return true; } -static void -swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, - int do_panic) -{ - if (swiotlb_force == SWIOTLB_NO_FORCE) - return; - - /* - * Ran out of IOMMU space for this operation. This is very bad. - * Unfortunately the drivers cannot handle this operation properly. - * unless they check for dma_mapping_error (most don't) - * When the mapping is small enough return a static buffer to limit - * the damage, or panic when the transfer is too big. - */ - dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n", - size); - - if (size <= io_tlb_overflow || !do_panic) - return; - - if (dir == DMA_BIDIRECTIONAL) - panic("DMA: Random memory could be DMA accessed\n"); - if (dir == DMA_FROM_DEVICE) - panic("DMA: Random memory could be DMA written\n"); - if (dir == DMA_TO_DEVICE) - panic("DMA: Random memory could be DMA read\n"); -} - /* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. @@ -817,10 +789,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* Oh well, have to allocate and map a bounce buffer. */ map = map_single(dev, phys, size, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) { - swiotlb_full(dev, size, dir, 1); + if (map == SWIOTLB_MAP_ERROR) return __phys_to_dma(dev, io_tlb_overflow_buffer); - } dev_addr = __phys_to_dma(dev, map); @@ -948,7 +918,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, if (map == SWIOTLB_MAP_ERROR) { /* Don't panic here, we expect map_sg users to do proper error handling. */ - swiotlb_full(hwdev, sg->length, dir, 0); attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, attrs); -- cgit v1.2.3 From dff8d6c1ed584de65aac40494d3e7468c50980c3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 16 Aug 2018 15:30:39 +0300 Subject: swiotlb: remove the overflow buffer Like all other dma mapping drivers just return an error code instead of an actual memory buffer. The reason for the overflow buffer was that at the time swiotlb was invented there was no way to check for dma mapping errors, but this has long been fixed. Signed-off-by: Christoph Hellwig Acked-by: Catalin Marinas Reviewed-by: Robin Murphy Reviewed-by: Konrad Rzeszutek Wilk --- arch/arm64/mm/dma-mapping.c | 2 +- arch/powerpc/kernel/dma-swiotlb.c | 4 +-- include/linux/dma-direct.h | 2 ++ include/linux/swiotlb.h | 3 -- kernel/dma/direct.c | 2 -- kernel/dma/swiotlb.c | 59 ++------------------------------------- 6 files changed, 8 insertions(+), 64 deletions(-) (limited to 'kernel') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 072c51fb07d7..8d91b927e09e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) { if (swiotlb) - return swiotlb_dma_mapping_error(hwdev, addr); + return dma_direct_mapping_error(hwdev, addr); return 0; } diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 88f3963ca30f..5fc335f4d9cd 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -11,7 +11,7 @@ * */ -#include +#include #include #include #include @@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = { .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, - .mapping_error = swiotlb_dma_mapping_error, + .mapping_error = dma_direct_mapping_error, .get_required_mask = swiotlb_powerpc_get_required, }; diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index fbca184ff5a0..bd73e7a91410 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -5,6 +5,8 @@ #include #include +#define DIRECT_MAPPING_ERROR 0 + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include #else diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 7ef541ce8f34..f847c1b265c4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -106,9 +106,6 @@ extern void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir); -extern int -swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); - extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 87a6bc2a96c0..f14c376937e5 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -14,8 +14,6 @@ #include #include -#define DIRECT_MAPPING_ERROR 0 - /* * Most architectures use ZONE_DMA for the first 16 Megabytes, but * some use it for entirely different regions: diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 69bf305ee5f8..11dbcd80b4a6 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -72,13 +72,6 @@ static phys_addr_t io_tlb_start, io_tlb_end; */ static unsigned long io_tlb_nslabs; -/* - * When the IOMMU overflows we return a fallback buffer. This sets the size. - */ -static unsigned long io_tlb_overflow = 32*1024; - -static phys_addr_t io_tlb_overflow_buffer; - /* * This is a free list describing the number of free entries available from * each index @@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str) return 0; } early_param("swiotlb", setup_io_tlb_npages); -/* make io_tlb_overflow tunable too? */ unsigned long swiotlb_nr_tbl(void) { @@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void) bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); - - vaddr = phys_to_virt(io_tlb_overflow_buffer); - bytes = PAGE_ALIGN(io_tlb_overflow); - set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); - memset(vaddr, 0, bytes); } int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { - void *v_overflow_buffer; unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; @@ -212,17 +198,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) io_tlb_start = __pa(tlb); io_tlb_end = io_tlb_start + bytes; - /* - * Get the overflow emergency buffer - */ - v_overflow_buffer = memblock_virt_alloc_low_nopanic( - PAGE_ALIGN(io_tlb_overflow), - PAGE_SIZE); - if (!v_overflow_buffer) - return -ENOMEM; - - io_tlb_overflow_buffer = __pa(v_overflow_buffer); - /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE @@ -330,7 +305,6 @@ int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { unsigned long i, bytes; - unsigned char *v_overflow_buffer; bytes = nslabs << IO_TLB_SHIFT; @@ -341,19 +315,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); - /* - * Get the overflow emergency buffer - */ - v_overflow_buffer = (void *)__get_free_pages(GFP_DMA, - get_order(io_tlb_overflow)); - if (!v_overflow_buffer) - goto cleanup2; - - set_memory_decrypted((unsigned long)v_overflow_buffer, - io_tlb_overflow >> PAGE_SHIFT); - memset(v_overflow_buffer, 0, io_tlb_overflow); - io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); - /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE @@ -390,10 +351,6 @@ cleanup4: sizeof(int))); io_tlb_list = NULL; cleanup3: - free_pages((unsigned long)v_overflow_buffer, - get_order(io_tlb_overflow)); - io_tlb_overflow_buffer = 0; -cleanup2: io_tlb_end = 0; io_tlb_start = 0; io_tlb_nslabs = 0; @@ -407,8 +364,6 @@ void __init swiotlb_exit(void) return; if (late_alloc) { - free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer), - get_order(io_tlb_overflow)); free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * @@ -416,8 +371,6 @@ void __init swiotlb_exit(void) free_pages((unsigned long)phys_to_virt(io_tlb_start), get_order(io_tlb_nslabs << IO_TLB_SHIFT)); } else { - memblock_free_late(io_tlb_overflow_buffer, - PAGE_ALIGN(io_tlb_overflow)); memblock_free_late(__pa(io_tlb_orig_addr), PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); memblock_free_late(__pa(io_tlb_list), @@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* Oh well, have to allocate and map a bounce buffer. */ map = map_single(dev, phys, size, dir, attrs); if (map == SWIOTLB_MAP_ERROR) - return __phys_to_dma(dev, io_tlb_overflow_buffer); + return DIRECT_MAPPING_ERROR; dev_addr = __phys_to_dma(dev, map); @@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - return __phys_to_dma(dev, io_tlb_overflow_buffer); + return DIRECT_MAPPING_ERROR; } /* @@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } -int -swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) -{ - return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer)); -} - /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits @@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr, } const struct dma_map_ops swiotlb_dma_ops = { - .mapping_error = swiotlb_dma_mapping_error, + .mapping_error = dma_direct_mapping_error, .alloc = swiotlb_alloc, .free = swiotlb_free, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, -- cgit v1.2.3 From 27744e0077f4c8b40aaa3126256708c21e56655d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 12 Apr 2018 09:56:56 +0200 Subject: swiotlb: merge swiotlb_unmap_page and unmap_single Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Reviewed-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 11dbcd80b4a6..15335f3a1bf3 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -765,9 +765,9 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ -static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) +void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); @@ -790,13 +790,6 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, dma_mark_clean(phys_to_virt(paddr), size); } -void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - unmap_single(hwdev, dev_addr, size, dir, attrs); -} - /* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. @@ -900,7 +893,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) - unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, + swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); } -- cgit v1.2.3 From 4803b44e68fc08e76f00dec90074d199a11ad6f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Aug 2018 15:56:05 +0200 Subject: swiotlb: use swiotlb_map_page in swiotlb_map_sg_attrs No need to duplicate the code - map_sg is equivalent to map_page for each page in the scatterlist. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Reviewed-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 15335f3a1bf3..15755d7a5242 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -845,37 +845,27 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, * same here. */ int -swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, +swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; - BUG_ON(dir == DMA_NONE); - for_each_sg(sgl, sg, nelems, i) { - phys_addr_t paddr = sg_phys(sg); - dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); - - if (swiotlb_force == SWIOTLB_FORCE || - !dma_capable(hwdev, dev_addr, sg->length)) { - phys_addr_t map = map_single(hwdev, sg_phys(sg), - sg->length, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) { - /* Don't panic here, we expect map_sg users - to do proper error handling. */ - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, - attrs); - sg_dma_len(sgl) = 0; - return 0; - } - sg->dma_address = __phys_to_dma(hwdev, map); - } else - sg->dma_address = dev_addr; + sg->dma_address = swiotlb_map_page(dev, sg_page(sg), sg->offset, + sg->length, dir, attrs); + if (sg->dma_address == DIRECT_MAPPING_ERROR) + goto out_error; sg_dma_len(sg) = sg->length; } + return nelems; + +out_error: + swiotlb_unmap_sg_attrs(dev, sgl, i, dir, + attrs | DMA_ATTR_SKIP_CPU_SYNC); + sg_dma_len(sgl) = 0; + return 0; } /* -- cgit v1.2.3 From c4dae366925f929749b2a26efa53b561904a9a4f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Aug 2018 16:21:10 +0200 Subject: swiotlb: refactor swiotlb_map_page Remove the somewhat useless map_single function, and replace it with a swiotlb_bounce_page handler that handles everything related to actually bouncing a page. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Reviewed-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 67 +++++++++++++++++++++++----------------------------- 1 file changed, 30 insertions(+), 37 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 15755d7a5242..57507b18caa4 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -543,26 +543,6 @@ found: return tlb_addr; } -/* - * Allocates bounce buffer and returns its physical address. - */ -static phys_addr_t -map_single(struct device *hwdev, phys_addr_t phys, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - dma_addr_t start_dma_addr; - - if (swiotlb_force == SWIOTLB_NO_FORCE) { - dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n", - &phys); - return SWIOTLB_MAP_ERROR; - } - - start_dma_addr = __phys_to_dma(hwdev, io_tlb_start); - return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, - dir, attrs); -} - /* * tlb_addr is the physical address of the bounce buffer to unmap. */ @@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, return true; } +static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + dma_addr_t dma_addr; + + if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) { + dev_warn_ratelimited(dev, + "Cannot do DMA to address %pa\n", phys); + return DIRECT_MAPPING_ERROR; + } + + /* Oh well, have to allocate and map a bounce buffer. */ + *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), + *phys, size, dir, attrs); + if (*phys == SWIOTLB_MAP_ERROR) + return DIRECT_MAPPING_ERROR; + + /* Ensure that the address returned is DMA'ble */ + dma_addr = __phys_to_dma(dev, *phys); + if (unlikely(!dma_capable(dev, dma_addr, size))) { + swiotlb_tbl_unmap_single(dev, *phys, size, dir, + attrs | DMA_ATTR_SKIP_CPU_SYNC); + return DIRECT_MAPPING_ERROR; + } + + return dma_addr; +} + /* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. @@ -726,7 +734,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - phys_addr_t map, phys = page_to_phys(page) + offset; + phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dev_addr = phys_to_dma(dev, phys); BUG_ON(dir == DMA_NONE); @@ -739,22 +747,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, return dev_addr; trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - - /* Oh well, have to allocate and map a bounce buffer. */ - map = map_single(dev, phys, size, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) - return DIRECT_MAPPING_ERROR; - - dev_addr = __phys_to_dma(dev, map); - - /* Ensure that the address returned is DMA'ble */ - if (dma_capable(dev, dev_addr, size)) - return dev_addr; - - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - - return DIRECT_MAPPING_ERROR; + return swiotlb_bounce_page(dev, &phys, size, dir, attrs); } /* -- cgit v1.2.3 From fafadcd16595c1df82df399f62421718ec9bf70a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 30 Sep 2018 16:13:33 -0700 Subject: swiotlb: don't dip into swiotlb pool for coherent allocations All architectures that support swiotlb also have a zone that backs up these less than full addressing allocations (usually ZONE_DMA32). Because of that it is rather pointless to fall back to the global swiotlb buffer if the normal dma direct allocation failed - the only thing this will do is to eat up bounce buffers that would be more useful to serve streaming mappings. Signed-off-by: Christoph Hellwig Acked-by: Catalin Marinas Acked-by: Konrad Rzeszutek Wilk --- arch/arm64/mm/dma-mapping.c | 6 +-- include/linux/swiotlb.h | 5 --- kernel/dma/swiotlb.c | 105 +------------------------------------------- 3 files changed, 5 insertions(+), 111 deletions(-) (limited to 'kernel') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 8d91b927e09e..eee6cfcfde9e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -112,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return addr; } - ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); + ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; @@ -133,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return coherent_ptr; no_map: - swiotlb_free(dev, size, ptr, *dma_handle, attrs); + dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs); no_mem: return NULL; } @@ -151,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size, return; vunmap(vaddr); } - swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); + dma_direct_free_pages(dev, size, swiotlb_addr, dma_handle, attrs); } static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index f847c1b265c4..a387b59640a4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, /* Accessory functions. */ -void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, - gfp_t flags, unsigned long attrs); -void swiotlb_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs); - extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 57507b18caa4..1a01b0ac0a5e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -622,78 +622,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, } } -static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, - size_t size) -{ - u64 mask = DMA_BIT_MASK(32); - - if (dev && dev->coherent_dma_mask) - mask = dev->coherent_dma_mask; - return addr + size - 1 <= mask; -} - -static void * -swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned long attrs) -{ - phys_addr_t phys_addr; - - if (swiotlb_force == SWIOTLB_NO_FORCE) - goto out_warn; - - phys_addr = swiotlb_tbl_map_single(dev, - __phys_to_dma(dev, io_tlb_start), - 0, size, DMA_FROM_DEVICE, attrs); - if (phys_addr == SWIOTLB_MAP_ERROR) - goto out_warn; - - *dma_handle = __phys_to_dma(dev, phys_addr); - if (!dma_coherent_ok(dev, *dma_handle, size)) - goto out_unmap; - - memset(phys_to_virt(phys_addr), 0, size); - return phys_to_virt(phys_addr); - -out_unmap: - dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)dev->coherent_dma_mask, - (unsigned long long)*dma_handle); - - /* - * DMA_TO_DEVICE to avoid memcpy in unmap_single. - * DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); -out_warn: - if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { - dev_warn(dev, - "swiotlb: coherent allocation failed, size=%zu\n", - size); - dump_stack(); - } - return NULL; -} - -static bool swiotlb_free_buffer(struct device *dev, size_t size, - dma_addr_t dma_addr) -{ - phys_addr_t phys_addr = dma_to_phys(dev, dma_addr); - - WARN_ON_ONCE(irqs_disabled()); - - if (!is_swiotlb_buffer(phys_addr)) - return false; - - /* - * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. - * DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - return true; -} - static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -926,39 +854,10 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask) return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; } -void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - void *vaddr; - - /* temporary workaround: */ - if (gfp & __GFP_NOWARN) - attrs |= DMA_ATTR_NO_WARN; - - /* - * Don't print a warning when the first allocation attempt fails. - * swiotlb_alloc_coherent() will print a warning when the DMA memory - * allocation ultimately failed. - */ - gfp |= __GFP_NOWARN; - - vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); - if (!vaddr) - vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); - return vaddr; -} - -void swiotlb_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs) -{ - if (!swiotlb_free_buffer(dev, size, dma_addr)) - dma_direct_free(dev, size, vaddr, dma_addr, attrs); -} - const struct dma_map_ops swiotlb_dma_ops = { .mapping_error = dma_direct_mapping_error, - .alloc = swiotlb_alloc, - .free = swiotlb_free, + .alloc = dma_direct_alloc, + .free = dma_direct_free, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, -- cgit v1.2.3 From a4a4330db46a17289cf2ca5f9fb153d536267b97 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 19 Oct 2018 08:51:53 +0200 Subject: swiotlb: add support for non-coherent DMA Handle architectures that are not cache coherent directly in the main swiotlb code by calling arch_sync_dma_for_{device,cpu} in all the right places from the various dma_map/unmap/sync methods when the device is non-coherent. Because swiotlb now uses dma_direct_alloc for the coherent allocation that side is already taken care of by the dma-direct code calling into arch_dma_{alloc,free} for devices that are non-coherent. Signed-off-by: Christoph Hellwig Acked-by: Konrad Rzeszutek Wilk --- kernel/dma/swiotlb.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 1a01b0ac0a5e..ebecaf255ea2 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) - return dev_addr; + if (!dma_capable(dev, dev_addr, size) || + swiotlb_force == SWIOTLB_FORCE) { + trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); + dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs); + } + + if (!dev_is_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + arch_sync_dma_for_device(dev, phys, size, dir); - trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - return swiotlb_bounce_page(dev, &phys, size, dir, attrs); + return dev_addr; } /* @@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, BUG_ON(dir == DMA_NONE); + if (!dev_is_dma_coherent(hwdev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + arch_sync_dma_for_cpu(hwdev, paddr, size, dir); + if (is_swiotlb_buffer(paddr)) { swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); return; @@ -730,15 +741,17 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(paddr)) { + if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU) + arch_sync_dma_for_cpu(hwdev, paddr, size, dir); + + if (is_swiotlb_buffer(paddr)) swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); - return; - } - if (dir != DMA_FROM_DEVICE) - return; + if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE) + arch_sync_dma_for_device(hwdev, paddr, size, dir); - dma_mark_clean(phys_to_virt(paddr), size); + if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE) + dma_mark_clean(phys_to_virt(paddr), size); } void -- cgit v1.2.3 From 746a923b863a1065ef77324e1e43f19b1a3eab5c Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 18 Oct 2018 15:15:05 +0200 Subject: genirq: Fix race on spurious interrupt detection Commit 1e77d0a1ed74 ("genirq: Sanitize spurious interrupt detection of threaded irqs") made detection of spurious interrupts work for threaded handlers by: a) incrementing a counter every time the thread returns IRQ_HANDLED, and b) checking whether that counter has increased every time the thread is woken. However for oneshot interrupts, the commit unmasks the interrupt before incrementing the counter. If another interrupt occurs right after unmasking but before the counter is incremented, that interrupt is incorrectly considered spurious: time | irq_thread() | irq_thread_fn() | action->thread_fn() | irq_finalize_oneshot() | unmask_threaded_irq() /* interrupt is unmasked */ | | /* interrupt fires, incorrectly deemed spurious */ | | atomic_inc(&desc->threads_handled); /* counter is incremented */ v This is observed with a hi3110 CAN controller receiving data at high volume (from a separate machine sending with "cangen -g 0 -i -x"): The controller signals a huge number of interrupts (hundreds of millions per day) and every second there are about a dozen which are deemed spurious. In theory with high CPU load and the presence of higher priority tasks, the number of incorrectly detected spurious interrupts might increase beyond the 99,900 threshold and cause disablement of the interrupt. In practice it just increments the spurious interrupt count. But that can cause people to waste time investigating it over and over. Fix it by moving the accounting before the invocation of irq_finalize_oneshot(). [ tglx: Folded change log update ] Fixes: 1e77d0a1ed74 ("genirq: Sanitize spurious interrupt detection of threaded irqs") Signed-off-by: Lukas Wunner Signed-off-by: Thomas Gleixner Cc: Mathias Duckeck Cc: Akshay Bhat Cc: Casey Fitzpatrick Cc: stable@vger.kernel.org # v3.16+ Link: https://lkml.kernel.org/r/1dfd8bbd16163940648045495e3e9698e63b50ad.1539867047.git.lukas@wunner.de --- kernel/irq/manage.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index fb86146037a7..9dbdccab3b6a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -927,6 +927,9 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); + if (ret == IRQ_HANDLED) + atomic_inc(&desc->threads_handled); + irq_finalize_oneshot(desc, action); local_bh_enable(); return ret; @@ -943,6 +946,9 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, irqreturn_t ret; ret = action->thread_fn(action->irq, action->dev_id); + if (ret == IRQ_HANDLED) + atomic_inc(&desc->threads_handled); + irq_finalize_oneshot(desc, action); return ret; } @@ -1020,8 +1026,6 @@ static int irq_thread(void *data) irq_thread_check_affinity(desc, action); action_ret = handler_fn(desc, action); - if (action_ret == IRQ_HANDLED) - atomic_inc(&desc->threads_handled); if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); -- cgit v1.2.3 From 144991602e6a14d667b295f1b099e609ce857772 Mon Sep 17 00:00:00 2001 From: Mauricio Vasquez B Date: Thu, 18 Oct 2018 15:16:09 +0200 Subject: bpf: rename stack trace map operations In the following patches queue and stack maps (FIFO and LIFO datastructures) will be implemented. In order to avoid confusion and a possible name clash rename stack_map_ops to stack_trace_map_ops Signed-off-by: Mauricio Vasquez B Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/linux/bpf_types.h | 2 +- kernel/bpf/stackmap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index fa48343a5ea1..7bad4e1947ed 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -51,7 +51,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) #ifdef CONFIG_PERF_EVENTS -BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index b2ade10f7ec3..90daf285de03 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -600,7 +600,7 @@ static void stack_map_free(struct bpf_map *map) put_callchain_buffers(); } -const struct bpf_map_ops stack_map_ops = { +const struct bpf_map_ops stack_trace_map_ops = { .map_alloc = stack_map_alloc, .map_free = stack_map_free, .map_get_next_key = stack_map_get_next_key, -- cgit v1.2.3 From c9d29f4658a5a6d2c2ba2afeb20ff763fc6286f9 Mon Sep 17 00:00:00 2001 From: Mauricio Vasquez B Date: Thu, 18 Oct 2018 15:16:14 +0200 Subject: bpf/syscall: allow key to be null in map functions This commit adds the required logic to allow key being NULL in case the key_size of the map is 0. A new __bpf_copy_key function helper only copies the key from userpsace when key_size != 0, otherwise it enforces that key must be null. Signed-off-by: Mauricio Vasquez B Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- kernel/bpf/syscall.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index f4ecd6ed2252..78d9dd95e25f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -651,6 +651,17 @@ int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) return -ENOTSUPP; } +static void *__bpf_copy_key(void __user *ukey, u64 key_size) +{ + if (key_size) + return memdup_user(ukey, key_size); + + if (ukey) + return ERR_PTR(-EINVAL); + + return NULL; +} + /* last field in 'union bpf_attr' used by this command */ #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value @@ -678,7 +689,7 @@ static int map_lookup_elem(union bpf_attr *attr) goto err_put; } - key = memdup_user(ukey, map->key_size); + key = __bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); goto err_put; @@ -785,7 +796,7 @@ static int map_update_elem(union bpf_attr *attr) goto err_put; } - key = memdup_user(ukey, map->key_size); + key = __bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); goto err_put; @@ -888,7 +899,7 @@ static int map_delete_elem(union bpf_attr *attr) goto err_put; } - key = memdup_user(ukey, map->key_size); + key = __bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); goto err_put; @@ -941,7 +952,7 @@ static int map_get_next_key(union bpf_attr *attr) } if (ukey) { - key = memdup_user(ukey, map->key_size); + key = __bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); goto err_put; -- cgit v1.2.3 From 2ea864c58f19bf70a0e2415f9f1c53814e07f1b4 Mon Sep 17 00:00:00 2001 From: Mauricio Vasquez B Date: Thu, 18 Oct 2018 15:16:20 +0200 Subject: bpf/verifier: add ARG_PTR_TO_UNINIT_MAP_VALUE ARG_PTR_TO_UNINIT_MAP_VALUE argument is a pointer to a memory zone used to save the value of a map. Basically the same as ARG_PTR_TO_UNINIT_MEM, but the size has not be passed as an extra argument. This will be used in the following patch that implements some new helpers that receive a pointer to be filled with a map value. Signed-off-by: Mauricio Vasquez B Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + kernel/bpf/verifier.c | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e60fff48288b..0f8b863e0229 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -138,6 +138,7 @@ enum bpf_arg_type { ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ + ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ /* the following constraints used to prototype bpf_memcmp() and other * functions that access data on eBPF program stack diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3f93a548a642..d84c91ac3b70 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2117,7 +2117,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, } if (arg_type == ARG_PTR_TO_MAP_KEY || - arg_type == ARG_PTR_TO_MAP_VALUE) { + arg_type == ARG_PTR_TO_MAP_VALUE || + arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) @@ -2187,7 +2188,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL); - } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { + } else if (arg_type == ARG_PTR_TO_MAP_VALUE || + arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ @@ -2196,9 +2198,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } + meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, false, - NULL); + meta); } else if (arg_type_is_mem_size(arg_type)) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); -- cgit v1.2.3 From f1a2e44a3aeccb3ff18d3ccc0b0203e70b95bd92 Mon Sep 17 00:00:00 2001 From: Mauricio Vasquez B Date: Thu, 18 Oct 2018 15:16:25 +0200 Subject: bpf: add queue and stack maps Queue/stack maps implement a FIFO/LIFO data storage for ebpf programs. These maps support peek, pop and push operations that are exposed to eBPF programs through the new bpf_map[peek/pop/push] helpers. Those operations are exposed to userspace applications through the already existing syscalls in the following way: BPF_MAP_LOOKUP_ELEM -> peek BPF_MAP_LOOKUP_AND_DELETE_ELEM -> pop BPF_MAP_UPDATE_ELEM -> push Queue/stack maps are implemented using a buffer, tail and head indexes, hence BPF_F_NO_PREALLOC is not supported. As opposite to other maps, queue and stack do not use RCU for protecting maps values, the bpf_map[peek/pop] have a ARG_PTR_TO_UNINIT_MAP_VALUE argument that is a pointer to a memory zone where to save the value of a map. Basically the same as ARG_PTR_TO_UNINIT_MEM, but the size has not be passed as an extra argument. Our main motivation for implementing queue/stack maps was to keep track of a pool of elements, like network ports in a SNAT, however we forsee other use cases, like for exampling saving last N kernel events in a map and then analysing from userspace. Signed-off-by: Mauricio Vasquez B Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 6 + include/linux/bpf_types.h | 2 + include/uapi/linux/bpf.h | 29 ++++- kernel/bpf/Makefile | 2 +- kernel/bpf/core.c | 3 + kernel/bpf/helpers.c | 43 +++++++ kernel/bpf/queue_stack_maps.c | 288 ++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 6 + kernel/bpf/verifier.c | 19 ++- net/core/filter.c | 6 + 10 files changed, 401 insertions(+), 3 deletions(-) create mode 100644 kernel/bpf/queue_stack_maps.c (limited to 'kernel') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0f8b863e0229..33014ae73103 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -39,6 +39,9 @@ struct bpf_map_ops { void *(*map_lookup_elem)(struct bpf_map *map, void *key); int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); + int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); + int (*map_pop_elem)(struct bpf_map *map, void *value); + int (*map_peek_elem)(struct bpf_map *map, void *value); /* funcs called by prog_array and perf_event_array map */ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, @@ -811,6 +814,9 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_map_push_elem_proto; +extern const struct bpf_func_proto bpf_map_pop_elem_proto; +extern const struct bpf_func_proto bpf_map_peek_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 7bad4e1947ed..44d9ab4809bd 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -69,3 +69,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) #endif #endif +BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5e46f6732781..70082cb626b4 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -128,6 +128,8 @@ enum bpf_map_type { BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + BPF_MAP_TYPE_QUEUE, + BPF_MAP_TYPE_STACK, }; enum bpf_prog_type { @@ -462,6 +464,28 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * + * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * Description + * Push an element *value* in *map*. *flags* is one of: + * + * **BPF_EXIST** + * If the queue/stack is full, the oldest element is removed to + * make room for this. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * Description + * Pop an element from *map*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * Description + * Get an element from *map* without removing it. + * Return + * 0 on success, or a negative error in case of failure. + * * int bpf_probe_read(void *dst, u32 size, const void *src) * Description * For tracing programs, safely attempt to read *size* bytes from @@ -2303,7 +2327,10 @@ union bpf_attr { FN(skb_ancestor_cgroup_id), \ FN(sk_lookup_tcp), \ FN(sk_lookup_udp), \ - FN(sk_release), + FN(sk_release), \ + FN(map_push_elem), \ + FN(map_pop_elem), \ + FN(map_peek_elem), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index ff8262626b8f..4c2fa3ac56f6 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -3,7 +3,7 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o -obj-$(CONFIG_BPF_SYSCALL) += local_storage.o +obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o obj-$(CONFIG_BPF_SYSCALL) += btf.o ifeq ($(CONFIG_NET),y) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index defcf4df6d91..7c7eeea8cffc 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1783,6 +1783,9 @@ BPF_CALL_0(bpf_user_rnd_u32) const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; const struct bpf_func_proto bpf_map_update_elem_proto __weak; const struct bpf_func_proto bpf_map_delete_elem_proto __weak; +const struct bpf_func_proto bpf_map_push_elem_proto __weak; +const struct bpf_func_proto bpf_map_pop_elem_proto __weak; +const struct bpf_func_proto bpf_map_peek_elem_proto __weak; const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 6502115e8f55..ab0d5e3f9892 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -76,6 +76,49 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = { .arg2_type = ARG_PTR_TO_MAP_KEY, }; +BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) +{ + return map->ops->map_push_elem(map, value, flags); +} + +const struct bpf_func_proto bpf_map_push_elem_proto = { + .func = bpf_map_push_elem, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_VALUE, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) +{ + return map->ops->map_pop_elem(map, value); +} + +const struct bpf_func_proto bpf_map_pop_elem_proto = { + .func = bpf_map_pop_elem, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, +}; + +BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) +{ + return map->ops->map_peek_elem(map, value); +} + +const struct bpf_func_proto bpf_map_peek_elem_proto = { + .func = bpf_map_pop_elem, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, +}; + const struct bpf_func_proto bpf_get_prandom_u32_proto = { .func = bpf_user_rnd_u32, .gpl_only = false, diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c new file mode 100644 index 000000000000..12a93fb37449 --- /dev/null +++ b/kernel/bpf/queue_stack_maps.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * queue_stack_maps.c: BPF queue and stack maps + * + * Copyright (c) 2018 Politecnico di Torino + */ +#include +#include +#include +#include "percpu_freelist.h" + +#define QUEUE_STACK_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + + +struct bpf_queue_stack { + struct bpf_map map; + raw_spinlock_t lock; + u32 head, tail; + u32 size; /* max_entries + 1 */ + + char elements[0] __aligned(8); +}; + +static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map) +{ + return container_of(map, struct bpf_queue_stack, map); +} + +static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) +{ + return qs->head == qs->tail; +} + +static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) +{ + u32 head = qs->head + 1; + + if (unlikely(head >= qs->size)) + head = 0; + + return head == qs->tail; +} + +/* Called from syscall */ +static int queue_stack_map_alloc_check(union bpf_attr *attr) +{ + /* check sanity of attributes */ + if (attr->max_entries == 0 || attr->key_size != 0 || + attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK) + return -EINVAL; + + if (attr->value_size > KMALLOC_MAX_SIZE) + /* if value_size is bigger, the user space won't be able to + * access the elements. + */ + return -E2BIG; + + return 0; +} + +static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) +{ + int ret, numa_node = bpf_map_attr_numa_node(attr); + struct bpf_queue_stack *qs; + u32 size, value_size; + u64 queue_size, cost; + + size = attr->max_entries + 1; + value_size = attr->value_size; + + queue_size = sizeof(*qs) + (u64) value_size * size; + + cost = queue_size; + if (cost >= U32_MAX - PAGE_SIZE) + return ERR_PTR(-E2BIG); + + cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + ret = bpf_map_precharge_memlock(cost); + if (ret < 0) + return ERR_PTR(ret); + + qs = bpf_map_area_alloc(queue_size, numa_node); + if (!qs) + return ERR_PTR(-ENOMEM); + + memset(qs, 0, sizeof(*qs)); + + bpf_map_init_from_attr(&qs->map, attr); + + qs->map.pages = cost; + qs->size = size; + + raw_spin_lock_init(&qs->lock); + + return &qs->map; +} + +/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ +static void queue_stack_map_free(struct bpf_map *map) +{ + struct bpf_queue_stack *qs = bpf_queue_stack(map); + + /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, + * so the programs (can be more than one that used this map) were + * disconnected from events. Wait for outstanding critical sections in + * these programs to complete + */ + synchronize_rcu(); + + bpf_map_area_free(qs); +} + +static int __queue_map_get(struct bpf_map *map, void *value, bool delete) +{ + struct bpf_queue_stack *qs = bpf_queue_stack(map); + unsigned long flags; + int err = 0; + void *ptr; + + raw_spin_lock_irqsave(&qs->lock, flags); + + if (queue_stack_map_is_empty(qs)) { + err = -ENOENT; + goto out; + } + + ptr = &qs->elements[qs->tail * qs->map.value_size]; + memcpy(value, ptr, qs->map.value_size); + + if (delete) { + if (unlikely(++qs->tail >= qs->size)) + qs->tail = 0; + } + +out: + raw_spin_unlock_irqrestore(&qs->lock, flags); + return err; +} + + +static int __stack_map_get(struct bpf_map *map, void *value, bool delete) +{ + struct bpf_queue_stack *qs = bpf_queue_stack(map); + unsigned long flags; + int err = 0; + void *ptr; + u32 index; + + raw_spin_lock_irqsave(&qs->lock, flags); + + if (queue_stack_map_is_empty(qs)) { + err = -ENOENT; + goto out; + } + + index = qs->head - 1; + if (unlikely(index >= qs->size)) + index = qs->size - 1; + + ptr = &qs->elements[index * qs->map.value_size]; + memcpy(value, ptr, qs->map.value_size); + + if (delete) + qs->head = index; + +out: + raw_spin_unlock_irqrestore(&qs->lock, flags); + return err; +} + +/* Called from syscall or from eBPF program */ +static int queue_map_peek_elem(struct bpf_map *map, void *value) +{ + return __queue_map_get(map, value, false); +} + +/* Called from syscall or from eBPF program */ +static int stack_map_peek_elem(struct bpf_map *map, void *value) +{ + return __stack_map_get(map, value, false); +} + +/* Called from syscall or from eBPF program */ +static int queue_map_pop_elem(struct bpf_map *map, void *value) +{ + return __queue_map_get(map, value, true); +} + +/* Called from syscall or from eBPF program */ +static int stack_map_pop_elem(struct bpf_map *map, void *value) +{ + return __stack_map_get(map, value, true); +} + +/* Called from syscall or from eBPF program */ +static int queue_stack_map_push_elem(struct bpf_map *map, void *value, + u64 flags) +{ + struct bpf_queue_stack *qs = bpf_queue_stack(map); + unsigned long irq_flags; + int err = 0; + void *dst; + + /* BPF_EXIST is used to force making room for a new element in case the + * map is full + */ + bool replace = (flags & BPF_EXIST); + + /* Check supported flags for queue and stack maps */ + if (flags & BPF_NOEXIST || flags > BPF_EXIST) + return -EINVAL; + + raw_spin_lock_irqsave(&qs->lock, irq_flags); + + if (queue_stack_map_is_full(qs)) { + if (!replace) { + err = -E2BIG; + goto out; + } + /* advance tail pointer to overwrite oldest element */ + if (unlikely(++qs->tail >= qs->size)) + qs->tail = 0; + } + + dst = &qs->elements[qs->head * qs->map.value_size]; + memcpy(dst, value, qs->map.value_size); + + if (unlikely(++qs->head >= qs->size)) + qs->head = 0; + +out: + raw_spin_unlock_irqrestore(&qs->lock, irq_flags); + return err; +} + +/* Called from syscall or from eBPF program */ +static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key) +{ + return NULL; +} + +/* Called from syscall or from eBPF program */ +static int queue_stack_map_update_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + return -EINVAL; +} + +/* Called from syscall or from eBPF program */ +static int queue_stack_map_delete_elem(struct bpf_map *map, void *key) +{ + return -EINVAL; +} + +/* Called from syscall */ +static int queue_stack_map_get_next_key(struct bpf_map *map, void *key, + void *next_key) +{ + return -EINVAL; +} + +const struct bpf_map_ops queue_map_ops = { + .map_alloc_check = queue_stack_map_alloc_check, + .map_alloc = queue_stack_map_alloc, + .map_free = queue_stack_map_free, + .map_lookup_elem = queue_stack_map_lookup_elem, + .map_update_elem = queue_stack_map_update_elem, + .map_delete_elem = queue_stack_map_delete_elem, + .map_push_elem = queue_stack_map_push_elem, + .map_pop_elem = queue_map_pop_elem, + .map_peek_elem = queue_map_peek_elem, + .map_get_next_key = queue_stack_map_get_next_key, +}; + +const struct bpf_map_ops stack_map_ops = { + .map_alloc_check = queue_stack_map_alloc_check, + .map_alloc = queue_stack_map_alloc, + .map_free = queue_stack_map_free, + .map_lookup_elem = queue_stack_map_lookup_elem, + .map_update_elem = queue_stack_map_update_elem, + .map_delete_elem = queue_stack_map_delete_elem, + .map_push_elem = queue_stack_map_push_elem, + .map_pop_elem = stack_map_pop_elem, + .map_peek_elem = stack_map_peek_elem, + .map_get_next_key = queue_stack_map_get_next_key, +}; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 78d9dd95e25f..1617407f9ee5 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -727,6 +727,9 @@ static int map_lookup_elem(union bpf_attr *attr) err = bpf_fd_htab_map_lookup_elem(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { err = bpf_fd_reuseport_array_lookup_elem(map, key, value); + } else if (map->map_type == BPF_MAP_TYPE_QUEUE || + map->map_type == BPF_MAP_TYPE_STACK) { + err = map->ops->map_peek_elem(map, value); } else { rcu_read_lock(); ptr = map->ops->map_lookup_elem(map, key); @@ -857,6 +860,9 @@ static int map_update_elem(union bpf_attr *attr) /* rcu_read_lock() is not needed */ err = bpf_fd_reuseport_array_update_elem(map, key, value, attr->flags); + } else if (map->map_type == BPF_MAP_TYPE_QUEUE || + map->map_type == BPF_MAP_TYPE_STACK) { + err = map->ops->map_push_elem(map, value, attr->flags); } else { rcu_read_lock(); err = map->ops->map_update_elem(map, key, value, attr->flags); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d84c91ac3b70..7d6d9cf9ebd5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2324,6 +2324,13 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (func_id != BPF_FUNC_sk_select_reuseport) goto error; break; + case BPF_MAP_TYPE_QUEUE: + case BPF_MAP_TYPE_STACK: + if (func_id != BPF_FUNC_map_peek_elem && + func_id != BPF_FUNC_map_pop_elem && + func_id != BPF_FUNC_map_push_elem) + goto error; + break; default: break; } @@ -2380,6 +2387,13 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) goto error; break; + case BPF_FUNC_map_peek_elem: + case BPF_FUNC_map_pop_elem: + case BPF_FUNC_map_push_elem: + if (map->map_type != BPF_MAP_TYPE_QUEUE && + map->map_type != BPF_MAP_TYPE_STACK) + goto error; + break; default: break; } @@ -2675,7 +2689,10 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, if (func_id != BPF_FUNC_tail_call && func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_update_elem && - func_id != BPF_FUNC_map_delete_elem) + func_id != BPF_FUNC_map_delete_elem && + func_id != BPF_FUNC_map_push_elem && + func_id != BPF_FUNC_map_pop_elem && + func_id != BPF_FUNC_map_peek_elem) return 0; if (meta->map_ptr == NULL) { diff --git a/net/core/filter.c b/net/core/filter.c index 1a3ac6c46873..ea48ec789b5c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4876,6 +4876,12 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_map_update_elem_proto; case BPF_FUNC_map_delete_elem: return &bpf_map_delete_elem_proto; + case BPF_FUNC_map_push_elem: + return &bpf_map_push_elem_proto; + case BPF_FUNC_map_pop_elem: + return &bpf_map_pop_elem_proto; + case BPF_FUNC_map_peek_elem: + return &bpf_map_peek_elem_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_get_smp_processor_id: -- cgit v1.2.3 From bd513cd08f10cbe28856f99ae951e86e86803861 Mon Sep 17 00:00:00 2001 From: Mauricio Vasquez B Date: Thu, 18 Oct 2018 15:16:30 +0200 Subject: bpf: add MAP_LOOKUP_AND_DELETE_ELEM syscall The previous patch implemented a bpf queue/stack maps that provided the peek/pop/push functions. There is not a direct relationship between those functions and the current maps syscalls, hence a new MAP_LOOKUP_AND_DELETE_ELEM syscall is added, this is mapped to the pop operation in the queue/stack maps and it is still to implement in other kind of maps. Signed-off-by: Mauricio Vasquez B Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) (limited to 'kernel') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 70082cb626b4..a2fb333290dc 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -103,6 +103,7 @@ enum bpf_cmd { BPF_BTF_LOAD, BPF_BTF_GET_FD_BY_ID, BPF_TASK_FD_QUERY, + BPF_MAP_LOOKUP_AND_DELETE_ELEM, }; enum bpf_map_type { diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1617407f9ee5..49ae64a26562 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -999,6 +999,69 @@ err_put: return err; } +#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value + +static int map_lookup_and_delete_elem(union bpf_attr *attr) +{ + void __user *ukey = u64_to_user_ptr(attr->key); + void __user *uvalue = u64_to_user_ptr(attr->value); + int ufd = attr->map_fd; + struct bpf_map *map; + void *key, *value, *ptr; + u32 value_size; + struct fd f; + int err; + + if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) + return -EINVAL; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + if (!(f.file->f_mode & FMODE_CAN_WRITE)) { + err = -EPERM; + goto err_put; + } + + key = __bpf_copy_key(ukey, map->key_size); + if (IS_ERR(key)) { + err = PTR_ERR(key); + goto err_put; + } + + value_size = map->value_size; + + err = -ENOMEM; + value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); + if (!value) + goto free_key; + + if (map->map_type == BPF_MAP_TYPE_QUEUE || + map->map_type == BPF_MAP_TYPE_STACK) { + err = map->ops->map_pop_elem(map, value); + } else { + err = -ENOTSUPP; + } + + if (err) + goto free_value; + + if (copy_to_user(uvalue, value, value_size) != 0) + goto free_value; + + err = 0; + +free_value: + kfree(value); +free_key: + kfree(key); +err_put: + fdput(f); + return err; +} + static const struct bpf_prog_ops * const bpf_prog_types[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _prog_ops, @@ -2472,6 +2535,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_TASK_FD_QUERY: err = bpf_task_fd_query(&attr, uattr); break; + case BPF_MAP_LOOKUP_AND_DELETE_ELEM: + err = map_lookup_and_delete_elem(&attr); + break; default: err = -EINVAL; break; -- cgit v1.2.3 From b39b5f411dcfce28ff954e5d6acb2c11be3cb0ec Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 19 Oct 2018 09:57:57 -0700 Subject: bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB BPF programs of BPF_PROG_TYPE_CGROUP_SKB need to access headers in the skb. This patch enables direct access of skb for these programs. Two helper functions bpf_compute_and_save_data_end() and bpf_restore_data_end() are introduced. There are used in __cgroup_bpf_run_filter_skb(), to compute proper data_end for the BPF program, and restore original data afterwards. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 21 +++++++++++++++++++++ kernel/bpf/cgroup.c | 6 ++++++ net/core/filter.c | 36 +++++++++++++++++++++++++++++++++++- 3 files changed, 62 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/filter.h b/include/linux/filter.h index 5771874bc01e..91b4c934f02e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -548,6 +548,27 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb) cb->data_end = skb->data + skb_headlen(skb); } +/* Similar to bpf_compute_data_pointers(), except that save orginal + * data in cb->data and cb->meta_data for restore. + */ +static inline void bpf_compute_and_save_data_end( + struct sk_buff *skb, void **saved_data_end) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + *saved_data_end = cb->data_end; + cb->data_end = skb->data + skb_headlen(skb); +} + +/* Restore data saved by bpf_compute_data_pointers(). */ +static inline void bpf_restore_data_end( + struct sk_buff *skb, void *saved_data_end) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + cb->data_end = saved_data_end; +} + static inline u8 *bpf_skb_cb(struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 00f6ed2e4f9a..9425c2fb872f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -553,6 +553,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, { unsigned int offset = skb->data - skb_network_header(skb); struct sock *save_sk; + void *saved_data_end; struct cgroup *cgrp; int ret; @@ -566,8 +567,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, save_sk = skb->sk; skb->sk = sk; __skb_push(skb, offset); + + /* compute pointers for the bpf prog */ + bpf_compute_and_save_data_end(skb, &saved_data_end); + ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, bpf_prog_run_save_cb); + bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); skb->sk = save_sk; return ret == 1 ? 0 : -EPERM; diff --git a/net/core/filter.c b/net/core/filter.c index ea48ec789b5c..5fd5139e8638 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5352,6 +5352,40 @@ static bool sk_filter_is_valid_access(int off, int size, return bpf_skb_is_valid_access(off, size, type, prog, info); } +static bool cg_skb_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + switch (off) { + case bpf_ctx_range(struct __sk_buff, tc_classid): + case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range(struct __sk_buff, flow_keys): + return false; + } + if (type == BPF_WRITE) { + switch (off) { + case bpf_ctx_range(struct __sk_buff, mark): + case bpf_ctx_range(struct __sk_buff, priority): + case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): + break; + default: + return false; + } + } + + switch (off) { + case bpf_ctx_range(struct __sk_buff, data): + info->reg_type = PTR_TO_PACKET; + break; + case bpf_ctx_range(struct __sk_buff, data_end): + info->reg_type = PTR_TO_PACKET_END; + break; + } + + return bpf_skb_is_valid_access(off, size, type, prog, info); +} + static bool lwt_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, @@ -7044,7 +7078,7 @@ const struct bpf_prog_ops xdp_prog_ops = { const struct bpf_verifier_ops cg_skb_verifier_ops = { .get_func_proto = cg_skb_func_proto, - .is_valid_access = sk_filter_is_valid_access, + .is_valid_access = cg_skb_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, }; -- cgit v1.2.3 From 540fefc08f75aedb517acbf525d393b8efddabd9 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 19 Oct 2018 13:52:38 -0700 Subject: bpf: remove unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix the following warning ../kernel/bpf/syscall.c: In function ‘map_lookup_and_delete_elem’: ../kernel/bpf/syscall.c:1010:22: warning: unused variable ‘ptr’ [-Wunused-variable] void *key, *value, *ptr; ^~~ Fixes: bd513cd08f10 ("bpf: add MAP_LOOKUP_AND_DELETE_ELEM syscall") Signed-off-by: Alexei Starovoitov --- kernel/bpf/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 49ae64a26562..ccb93277aae2 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1007,7 +1007,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) void __user *uvalue = u64_to_user_ptr(attr->value); int ufd = attr->map_fd; struct bpf_map *map; - void *key, *value, *ptr; + void *key, *value; u32 value_size; struct fd f; int err; -- cgit v1.2.3 From 282447ba6b00c64678ffdf964f44e5c8b1c68377 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 18 Oct 2018 22:12:05 +0900 Subject: tracing: Fix synthetic event to accept unsigned modifier Fix synthetic event to accept unsigned modifier for its field type correctly. Currently, synthetic_events interface returns error for "unsigned" modifiers as below; # echo "myevent unsigned long var" >> synthetic_events sh: write error: Invalid argument This is because argv_split() breaks "unsigned long" into "unsigned" and "long", but parse_synth_field() doesn't expected it. With this fix, synthetic_events can handle the "unsigned long" correctly like as below; # echo "myevent unsigned long var" >> synthetic_events # cat synthetic_events myevent unsigned long var Link: http://lkml.kernel.org/r/153986832571.18251.8448135724590496531.stgit@devbox Cc: Shuah Khan Cc: Tom Zanussi Cc: stable@vger.kernel.org Fixes: commit 4b147936fa50 ("tracing: Add support for 'synthetic' events") Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 85f6b01431c7..6ff83941065a 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -738,16 +738,30 @@ static void free_synth_field(struct synth_field *field) kfree(field); } -static struct synth_field *parse_synth_field(char *field_type, - char *field_name) +static struct synth_field *parse_synth_field(int argc, char **argv, + int *consumed) { struct synth_field *field; + const char *prefix = NULL; + char *field_type = argv[0], *field_name; int len, ret = 0; char *array; if (field_type[0] == ';') field_type++; + if (!strcmp(field_type, "unsigned")) { + if (argc < 3) + return ERR_PTR(-EINVAL); + prefix = "unsigned "; + field_type = argv[1]; + field_name = argv[2]; + *consumed = 3; + } else { + field_name = argv[1]; + *consumed = 2; + } + len = strlen(field_name); if (field_name[len - 1] == ';') field_name[len - 1] = '\0'; @@ -760,11 +774,15 @@ static struct synth_field *parse_synth_field(char *field_type, array = strchr(field_name, '['); if (array) len += strlen(array); + if (prefix) + len += strlen(prefix); field->type = kzalloc(len, GFP_KERNEL); if (!field->type) { ret = -ENOMEM; goto free; } + if (prefix) + strcat(field->type, prefix); strcat(field->type, field_type); if (array) { strcat(field->type, array); @@ -1009,7 +1027,7 @@ static int create_synth_event(int argc, char **argv) struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; struct synth_event *event = NULL; bool delete_event = false; - int i, n_fields = 0, ret = 0; + int i, consumed = 0, n_fields = 0, ret = 0; char *name; mutex_lock(&synth_event_mutex); @@ -1061,13 +1079,13 @@ static int create_synth_event(int argc, char **argv) goto err; } - field = parse_synth_field(argv[i], argv[i + 1]); + field = parse_synth_field(argc - i, &argv[i], &consumed); if (IS_ERR(field)) { ret = PTR_ERR(field); goto err; } - fields[n_fields] = field; - i++; n_fields++; + fields[n_fields++] = field; + i += consumed - 1; } if (i < argc) { -- cgit v1.2.3 From a360d9e4016c1fcf41553b37ad496870dc5723d0 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 18 Oct 2018 22:12:34 +0900 Subject: tracing: Fix synthetic event to allow semicolon at end Fix synthetic event to allow independent semicolon at end. The synthetic_events interface accepts a semicolon after the last word if there is no space. # echo "myevent u64 var;" >> synthetic_events But if there is a space, it returns an error. # echo "myevent u64 var ;" > synthetic_events sh: write error: Invalid argument This behavior is difficult for users to understand. Let's allow the last independent semicolon too. Link: http://lkml.kernel.org/r/153986835420.18251.2191216690677025744.stgit@devbox Cc: Shuah Khan Cc: Tom Zanussi Cc: stable@vger.kernel.org Fixes: commit 4b147936fa50 ("tracing: Add support for 'synthetic' events") Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 6ff83941065a..d239004aaf29 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1088,7 +1088,7 @@ static int create_synth_event(int argc, char **argv) i += consumed - 1; } - if (i < argc) { + if (i < argc && strcmp(argv[i], ";") != 0) { ret = -EINVAL; goto err; } -- cgit v1.2.3 From 2a159c6f82381a458bc56e7e202b6bee57a2ccb7 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 21 Oct 2018 02:09:24 +0200 Subject: bpf, verifier: fix register type dump in xadd and st Using reg_type_str[insn->dst_reg] is incorrect since insn->dst_reg contains the register number but not the actual register type. Add a small reg_state() helper and use it to get to the type. Also fix up the test_verifier test cases that have an incorrect errstr. Fixes: 9d2be44a7f33 ("bpf: Reuse canonical string formatter for ctx errs") Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 19 +++++++++++++------ tools/testing/selftests/bpf/test_verifier.c | 10 +++++----- 2 files changed, 18 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7d6d9cf9ebd5..64e0981a4074 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1528,14 +1528,19 @@ static bool __is_pointer_value(bool allow_ptr_leaks, return reg->type != SCALAR_VALUE; } +static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) +{ + return cur_regs(env) + regno; +} + static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { - return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); + return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); } static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { - const struct bpf_reg_state *reg = cur_regs(env) + regno; + const struct bpf_reg_state *reg = reg_state(env, regno); return reg->type == PTR_TO_CTX || reg->type == PTR_TO_SOCKET; @@ -1543,7 +1548,7 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) { - const struct bpf_reg_state *reg = cur_regs(env) + regno; + const struct bpf_reg_state *reg = reg_state(env, regno); return type_is_pkt_pointer(reg->type); } @@ -1958,7 +1963,8 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", - insn->dst_reg, reg_type_str[insn->dst_reg]); + insn->dst_reg, + reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } @@ -1983,7 +1989,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { - struct bpf_reg_state *reg = cur_regs(env) + regno; + struct bpf_reg_state *reg = reg_state(env, regno); struct bpf_func_state *state = func(env, reg); int off, i, slot, spi; @@ -5264,7 +5270,8 @@ static int do_check(struct bpf_verifier_env *env) if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d %s is not allowed\n", - insn->dst_reg, reg_type_str[insn->dst_reg]); + insn->dst_reg, + reg_type_str[reg_state(env, insn->dst_reg)->type]); return -EACCES; } diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index f1ae8d09770f..769d68a48f30 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3430,7 +3430,7 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_ST stores into R1 inv is not allowed", + .errstr = "BPF_ST stores into R1 ctx is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -3442,7 +3442,7 @@ static struct bpf_test tests[] = { BPF_REG_0, offsetof(struct __sk_buff, mark), 0), BPF_EXIT_INSN(), }, - .errstr = "BPF_XADD stores into R1 inv is not allowed", + .errstr = "BPF_XADD stores into R1 ctx is not allowed", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -5670,7 +5670,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R2 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 inv is not allowed", + .errstr = "BPF_XADD stores into R1 ctx is not allowed", }, { "leak pointer into ctx 2", @@ -5685,7 +5685,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R10 leaks addr into mem", .result_unpriv = REJECT, .result = REJECT, - .errstr = "BPF_XADD stores into R1 inv is not allowed", + .errstr = "BPF_XADD stores into R1 ctx is not allowed", }, { "leak pointer into ctx 3", @@ -12634,7 +12634,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "BPF_XADD stores into R2 ctx", + .errstr = "BPF_XADD stores into R2 pkt is not allowed", .prog_type = BPF_PROG_TYPE_XDP, }, { -- cgit v1.2.3 From 4b5defdec398491c5b301a6255cdf468eedfb228 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 21 Oct 2018 02:09:25 +0200 Subject: bpf, verifier: reject xadd on flow key memory We should not enable xadd operation for flow key memory if not needed there anyway. There is no such issue as described in the commit f37a8cb84cce ("bpf: reject stores into ctx via st and xadd") since there's no context rewriter for flow keys today, but it also shouldn't become part of the user facing behavior to allow for it. After patch: 0: (79) r7 = *(u64 *)(r1 +144) 1: (b7) r3 = 4096 2: (db) lock *(u64 *)(r7 +0) += r3 BPF_XADD stores into R7 flow_keys is not allowed Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 64e0981a4074..0450ffcc3de4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1553,6 +1553,14 @@ static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) return type_is_pkt_pointer(reg->type); } +static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) +{ + const struct bpf_reg_state *reg = reg_state(env, regno); + + /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ + return reg->type == PTR_TO_FLOW_KEYS; +} + static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) @@ -1961,7 +1969,8 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins } if (is_ctx_reg(env, insn->dst_reg) || - is_pkt_reg(env, insn->dst_reg)) { + is_pkt_reg(env, insn->dst_reg) || + is_flow_key_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str[reg_state(env, insn->dst_reg)->type]); -- cgit v1.2.3 From ad38911dcdb6978a415db65b3e00e0f3fcd9edfc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 21 Oct 2018 02:09:26 +0200 Subject: bpf, verifier: remove unneeded flow key in check_helper_mem_access They PTR_TO_FLOW_KEYS is not used today to be passed into a helper as memory, so it can be removed from check_helper_mem_access(). Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0450ffcc3de4..4f727c9eb45c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2077,8 +2077,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); - case PTR_TO_FLOW_KEYS: - return check_flow_keys_access(env, reg->off, access_size); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); -- cgit v1.2.3 From 84430d4232c36cb858564374f839d233fd6f623a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sun, 21 Oct 2018 02:09:27 +0200 Subject: bpf, verifier: avoid retpoline for map push/pop/peek operation Extend prior work from 09772d92cd5a ("bpf: avoid retpoline for lookup/update/delete calls on maps") to also apply to the recently added map helpers that perform push/pop/peek operations so that the indirect call can be avoided. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4f727c9eb45c..98fa0be35370 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6178,7 +6178,10 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || - insn->imm == BPF_FUNC_map_delete_elem)) { + insn->imm == BPF_FUNC_map_delete_elem || + insn->imm == BPF_FUNC_map_push_elem || + insn->imm == BPF_FUNC_map_pop_elem || + insn->imm == BPF_FUNC_map_peek_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; @@ -6211,6 +6214,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_push_elem, + (int (*)(struct bpf_map *map, void *value, + u64 flags))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_pop_elem, + (int (*)(struct bpf_map *map, void *value))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_peek_elem, + (int (*)(struct bpf_map *map, void *value))NULL)); + switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - @@ -6224,6 +6235,18 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; + case BPF_FUNC_map_push_elem: + insn->imm = BPF_CAST_CALL(ops->map_push_elem) - + __bpf_call_base; + continue; + case BPF_FUNC_map_pop_elem: + insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - + __bpf_call_base; + continue; + case BPF_FUNC_map_peek_elem: + insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - + __bpf_call_base; + continue; } goto patch_call_imm; -- cgit v1.2.3 From bcfa4b72111c9a4d483024cb1f877803b354aa11 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 15 Aug 2018 14:22:16 -0400 Subject: memremap: Convert to XArray Use the new xa_store_range function instead of the radix tree. Signed-off-by: Matthew Wilcox --- kernel/memremap.c | 76 ++++++++++++------------------------------------------- 1 file changed, 16 insertions(+), 60 deletions(-) (limited to 'kernel') diff --git a/kernel/memremap.c b/kernel/memremap.c index 5b8600d39931..e842fab9f184 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -1,47 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ -#include #include -#include -#include #include #include -#include #include +#include +#include #include #include +#include #include +#include -static DEFINE_MUTEX(pgmap_lock); -static RADIX_TREE(pgmap_radix, GFP_KERNEL); +static DEFINE_XARRAY(pgmap_array); #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) -static unsigned long order_at(struct resource *res, unsigned long pgoff) -{ - unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; - unsigned long nr_pages, mask; - - nr_pages = PHYS_PFN(resource_size(res)); - if (nr_pages == pgoff) - return ULONG_MAX; - - /* - * What is the largest aligned power-of-2 range available from - * this resource pgoff to the end of the resource range, - * considering the alignment of the current pgoff? - */ - mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff); - if (!mask) - return ULONG_MAX; - - return find_first_bit(&mask, BITS_PER_LONG); -} - -#define foreach_order_pgoff(res, order, pgoff) \ - for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \ - pgoff += 1UL << order, order = order_at((res), pgoff)) - #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, unsigned long addr, @@ -70,18 +44,10 @@ vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, EXPORT_SYMBOL(device_private_entry_fault); #endif /* CONFIG_DEVICE_PRIVATE */ -static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff) +static void pgmap_array_delete(struct resource *res) { - unsigned long pgoff, order; - - mutex_lock(&pgmap_lock); - foreach_order_pgoff(res, order, pgoff) { - if (pgoff >= end_pgoff) - break; - radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); - } - mutex_unlock(&pgmap_lock); - + xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), + NULL, GFP_KERNEL); synchronize_rcu(); } @@ -142,7 +108,7 @@ static void devm_memremap_pages_release(void *data) mem_hotplug_done(); untrack_pfn(NULL, PHYS_PFN(align_start), align_size); - pgmap_radix_release(res, -1); + pgmap_array_delete(res); dev_WARN_ONCE(dev, pgmap->altmap.alloc, "%s: failed to free all reserved pages\n", __func__); } @@ -175,7 +141,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) struct vmem_altmap *altmap = pgmap->altmap_valid ? &pgmap->altmap : NULL; struct resource *res = &pgmap->res; - unsigned long pfn, pgoff, order; + unsigned long pfn; pgprot_t pgprot = PAGE_KERNEL; int error, nid, is_ram; struct dev_pagemap *conflict_pgmap; @@ -216,20 +182,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) pgmap->dev = dev; - mutex_lock(&pgmap_lock); - error = 0; - - foreach_order_pgoff(res, order, pgoff) { - error = __radix_tree_insert(&pgmap_radix, - PHYS_PFN(res->start) + pgoff, order, pgmap); - if (error) { - dev_err(dev, "%s: failed: %d\n", __func__, error); - break; - } - } - mutex_unlock(&pgmap_lock); + error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), + PHYS_PFN(res->end), pgmap, GFP_KERNEL)); if (error) - goto err_radix; + goto err_array; nid = dev_to_node(dev); if (nid < 0) @@ -279,8 +235,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) err_kasan: untrack_pfn(NULL, PHYS_PFN(align_start), align_size); err_pfn_remap: - err_radix: - pgmap_radix_release(res, pgoff); + pgmap_array_delete(res); + err_array: return ERR_PTR(error); } EXPORT_SYMBOL(devm_memremap_pages); @@ -320,7 +276,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, /* fall back to slow path lookup */ rcu_read_lock(); - pgmap = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); + pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) pgmap = NULL; rcu_read_unlock(); -- cgit v1.2.3 From 977e4be5eb714c48a67afc26a6c477f24130a1f2 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 20 Oct 2018 09:26:49 +0200 Subject: x86/stackprotector: Remove the call to boot_init_stack_canary() from cpu_startup_entry() The following commit: d7880812b359 ("idle: Add the stack canary init to cpu_startup_entry()") ... added an x86 specific boot_init_stack_canary() call to the generic cpu_startup_entry() as a temporary hack, with the intention to remove the #ifdef CONFIG_X86 later. More than 5 years later let's finally realize that plan! :-) While implementing stack protector support for PowerPC, we found that calling boot_init_stack_canary() is also needed for PowerPC which uses per task (TLS) stack canary like the X86. However, calling boot_init_stack_canary() would break architectures using a global stack canary (ARM, SH, MIPS and XTENSA). Instead of modifying the #ifdef CONFIG_X86 to an even messier: #if defined(CONFIG_X86) || defined(CONFIG_PPC) PowerPC implemented the call to boot_init_stack_canary() in the function calling cpu_startup_entry(). Let's try the same cleanup on the x86 side as well. On x86 we have two functions calling cpu_startup_entry(): - start_secondary() - cpu_bringup_and_idle() start_secondary() already calls boot_init_stack_canary(), so it's good, and this patch adds the call to boot_init_stack_canary() in cpu_bringup_and_idle(). I.e. now x86 catches up to the rest of the world and the ugly init sequence in init/main.c can be removed from cpu_startup_entry(). As a final benefit we can also remove the dependency from . [ mingo: Improved the changelog a bit, added language explaining x86 borkage and sched.h change. ] Signed-off-by: Christophe Leroy Reviewed-by: Juergen Gross Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linuxppc-dev@lists.ozlabs.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20181020072649.5B59310483E@pc16082vm.idsi0.si.c-s.fr Signed-off-by: Ingo Molnar --- arch/x86/xen/smp_pv.c | 2 ++ kernel/sched/idle.c | 15 --------------- kernel/sched/sched.h | 1 - 3 files changed, 2 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index e3b18ad49889..145506f9fdbe 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -88,6 +89,7 @@ static void cpu_bringup(void) asmlinkage __visible void cpu_bringup_and_idle(void) { cpu_bringup(); + boot_init_stack_canary(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 16f84142f2f4..f5516bae0c1b 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -347,21 +347,6 @@ EXPORT_SYMBOL_GPL(play_idle); void cpu_startup_entry(enum cpuhp_state state) { - /* - * This #ifdef needs to die, but it's too late in the cycle to - * make this generic (ARM and SH have never invoked the canary - * init for the non boot CPUs!). Will be fixed in 3.11 - */ -#ifdef CONFIG_X86 - /* - * If we're the non-boot CPU, nothing set the stack canary up - * for us. The boot CPU already has it initialized but no harm - * in doing it again. This is a good place for updating it, as - * we wont ever return from this function (so the invalid - * canaries already on the stack wont ever trigger). - */ - boot_init_stack_canary(); -#endif arch_cpu_idle_prepare(); cpuhp_online_idle(state); while (1) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4a2e8cae63c4..5b00a816a4b3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 876dcf2f3aaa0f68d437b368b93a4c4b81521191 Mon Sep 17 00:00:00 2001 From: Olivier Brunel Date: Sat, 20 Oct 2018 19:39:56 +0200 Subject: umh: Add command line to user mode helpers User mode helpers were spawned without a command line, and because an empty command line is used by many tools to identify processes as kernel threads, this could cause some issues. Notably during killing spree on shutdown, since such helper would then be skipped (i.e. not killed) which would result in the process remaining alive, and thus preventing unmouting of the rootfs (as experienced with the bpfilter umh). Fixes: 449325b52b7a ("umh: introduce fork_usermode_blob() helper") Signed-off-by: Olivier Brunel Signed-off-by: David S. Miller --- include/linux/umh.h | 1 + kernel/umh.c | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/umh.h b/include/linux/umh.h index 5c812acbb80a..235f51b62c71 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -44,6 +44,7 @@ struct subprocess_info *call_usermodehelper_setup_file(struct file *file, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *), void *data); struct umh_info { + const char *cmdline; struct file *pipe_to_umh; struct file *pipe_from_umh; pid_t pid; diff --git a/kernel/umh.c b/kernel/umh.c index c449858946af..0baa672e023c 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -405,11 +405,19 @@ struct subprocess_info *call_usermodehelper_setup_file(struct file *file, void (*cleanup)(struct subprocess_info *info), void *data) { struct subprocess_info *sub_info; + struct umh_info *info = data; + const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper"; sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL); if (!sub_info) return NULL; + sub_info->argv = argv_split(GFP_KERNEL, cmdline, NULL); + if (!sub_info->argv) { + kfree(sub_info); + return NULL; + } + INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); sub_info->path = "none"; sub_info->file = file; @@ -458,10 +466,11 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) return 0; } -static void umh_save_pid(struct subprocess_info *info) +static void umh_clean_and_save_pid(struct subprocess_info *info) { struct umh_info *umh_info = info->data; + argv_free(info->argv); umh_info->pid = info->pid; } @@ -471,6 +480,9 @@ static void umh_save_pid(struct subprocess_info *info) * @len: length of the blob * @info: information about usermode process (shouldn't be NULL) * + * If info->cmdline is set it will be used as command line for the + * user process, else "usermodehelper" is used. + * * Returns either negative error or zero which indicates success * in executing a blob of bytes as a usermode process. In such * case 'struct umh_info *info' is populated with two pipes @@ -500,7 +512,7 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info) err = -ENOMEM; sub_info = call_usermodehelper_setup_file(file, umh_pipe_setup, - umh_save_pid, info); + umh_clean_and_save_pid, info); if (!sub_info) goto out; -- cgit v1.2.3 From d7b31359ecef8d32540266f39d99892f61d17c4b Mon Sep 17 00:00:00 2001 From: Lénaïc Huard Date: Thu, 22 Mar 2018 23:53:05 +0100 Subject: kvm_config: add CONFIG_VIRTIO_MENU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure that make kvmconfig enables all the virtio drivers even if it is preceded by a make allnoconfig. Signed-off-by: Lénaïc Huard Signed-off-by: Michael S. Tsirkin --- kernel/configs/kvm_guest.config | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/configs/kvm_guest.config b/kernel/configs/kvm_guest.config index 108fecc20fc1..208481d91090 100644 --- a/kernel/configs/kvm_guest.config +++ b/kernel/configs/kvm_guest.config @@ -20,6 +20,7 @@ CONFIG_PARAVIRT=y CONFIG_KVM_GUEST=y CONFIG_S390_GUEST=y CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_CONSOLE=y -- cgit v1.2.3 From 145d952a29320dea883246bcb24ba1da7ac4bb7f Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 4 Oct 2018 14:04:02 +0200 Subject: sched: Factor out nr_iowait and nr_iowait_cpu The function nr_iowait_cpu() can be used directly by nr_iowait() instead of duplicating code. Call nr_iowait_cpu() from nr_iowait() Signed-off-by: Daniel Lezcano Acked-by: Peter Zijlstra (Intel) Signed-off-by: Rafael J. Wysocki --- kernel/sched/core.c | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fe0223121883..9245c56b8f5f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2875,6 +2875,25 @@ unsigned long long nr_context_switches(void) return sum; } +/* + * Consumers of these two interfaces, like for example the cpuidle menu + * governor, are using nonsensical data. Preferring shallow idle state selection + * for a CPU that has IO-wait which might not even end up running the task when + * it does become runnable. + */ + +unsigned long nr_iowait_cpu(int cpu) +{ + return atomic_read(&cpu_rq(cpu)->nr_iowait); +} + +void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) +{ + struct rq *rq = this_rq(); + *nr_waiters = atomic_read(&rq->nr_iowait); + *load = rq->load.weight; +} + /* * IO-wait accounting, and how its mostly bollocks (on SMP). * @@ -2910,31 +2929,11 @@ unsigned long nr_iowait(void) unsigned long i, sum = 0; for_each_possible_cpu(i) - sum += atomic_read(&cpu_rq(i)->nr_iowait); + sum += nr_iowait_cpu(i); return sum; } -/* - * Consumers of these two interfaces, like for example the cpuidle menu - * governor, are using nonsensical data. Preferring shallow idle state selection - * for a CPU that has IO-wait which might not even end up running the task when - * it does become runnable. - */ - -unsigned long nr_iowait_cpu(int cpu) -{ - struct rq *this = cpu_rq(cpu); - return atomic_read(&this->nr_iowait); -} - -void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) -{ - struct rq *rq = this_rq(); - *nr_waiters = atomic_read(&rq->nr_iowait); - *load = rq->load.weight; -} - #ifdef CONFIG_SMP /* -- cgit v1.2.3 From a7fe5190c03f8137ef08db84a58dd4daf2c4785d Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Thu, 4 Oct 2018 14:04:03 +0200 Subject: cpuidle: menu: Remove get_loadavg() from the performance multiplier The function get_loadavg() returns almost always zero. To be more precise, statistically speaking for a total of 1023379 times passing in the function, the load is equal to zero 1020728 times, greater than 100, 610 times, the remaining is between 0 and 5. In 2011, the get_loadavg() was removed from the Android tree because of the above [1]. At this time, the load was: unsigned long this_cpu_load(void) { struct rq *this = this_rq(); return this->cpu_load[0]; } In 2014, the code was changed by commit 372ba8cb46b2 (cpuidle: menu: Lookup CPU runqueues less) and the load is: void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) { struct rq *rq = this_rq(); *nr_waiters = atomic_read(&rq->nr_iowait); *load = rq->load.weight; } with the same result. Both measurements show using the load in this code path does no matter anymore. Removing it. [1] https://android.googlesource.com/kernel/common/+/4dedd9f124703207895777ac6e91dacde0f7cc17 Signed-off-by: Daniel Lezcano Acked-by: Mel Gorman Acked-by: Peter Zijlstra (Intel) Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/governors/menu.c | 25 ++++++------------------- include/linux/sched/stat.h | 1 - kernel/sched/core.c | 7 ------- 3 files changed, 6 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 575a68f31761..76df4f947f07 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -134,11 +134,6 @@ struct menu_device { #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) -static inline int get_loadavg(unsigned long load) -{ - return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10; -} - static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) { int bucket = 0; @@ -172,18 +167,10 @@ static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters * to be, the higher this multiplier, and thus the higher * the barrier to go to an expensive C state. */ -static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load) +static inline int performance_multiplier(unsigned long nr_iowaiters) { - int mult = 1; - - /* for higher loadavg, we are more reluctant */ - - mult += 2 * get_loadavg(load); - - /* for IO wait tasks (per cpu!) we add 5x each */ - mult += 10 * nr_iowaiters; - - return mult; + /* for IO wait tasks (per cpu!) we add 10x each */ + return 1 + 10 * nr_iowaiters; } static DEFINE_PER_CPU(struct menu_device, menu_devices); @@ -301,7 +288,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, int idx; unsigned int interactivity_req; unsigned int predicted_us; - unsigned long nr_iowaiters, cpu_load; + unsigned long nr_iowaiters; ktime_t delta_next; if (data->needs_update) { @@ -312,7 +299,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, /* determine the expected residency time, round up */ data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); - get_iowait_load(&nr_iowaiters, &cpu_load); + nr_iowaiters = nr_iowait_cpu(dev->cpu); data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); if (unlikely(drv->state_count <= 1 || latency_req == 0) || @@ -356,7 +343,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * Use the performance multiplier and the user-configurable * latency_req to determine the maximum exit latency. */ - interactivity_req = predicted_us / performance_multiplier(nr_iowaiters, cpu_load); + interactivity_req = predicted_us / performance_multiplier(nr_iowaiters); if (latency_req > interactivity_req) latency_req = interactivity_req; } diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index 04f1321d14c4..f30954cc059d 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -20,7 +20,6 @@ extern unsigned long nr_running(void); extern bool single_task_running(void); extern unsigned long nr_iowait(void); extern unsigned long nr_iowait_cpu(int cpu); -extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); static inline int sched_info_on(void) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9245c56b8f5f..c8d5c279be14 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2887,13 +2887,6 @@ unsigned long nr_iowait_cpu(int cpu) return atomic_read(&cpu_rq(cpu)->nr_iowait); } -void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) -{ - struct rq *rq = this_rq(); - *nr_waiters = atomic_read(&rq->nr_iowait); - *load = rq->load.weight; -} - /* * IO-wait accounting, and how its mostly bollocks (on SMP). * -- cgit v1.2.3 From a1c6ca3c6de763459a6e93b644ec6518c890ba1c Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 17 Oct 2018 13:23:55 +0200 Subject: kernel: hung_task.c: disable on suspend It is possible to observe hung_task complaints when system goes to suspend-to-idle state: # echo freeze > /sys/power/state PM: Syncing filesystems ... done. Freezing user space processes ... (elapsed 0.001 seconds) done. OOM killer disabled. Freezing remaining freezable tasks ... (elapsed 0.002 seconds) done. sd 0:0:0:0: [sda] Synchronizing SCSI cache INFO: task bash:1569 blocked for more than 120 seconds. Not tainted 4.19.0-rc3_+ #687 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. bash D 0 1569 604 0x00000000 Call Trace: ? __schedule+0x1fe/0x7e0 schedule+0x28/0x80 suspend_devices_and_enter+0x4ac/0x750 pm_suspend+0x2c0/0x310 Register a PM notifier to disable the detector on suspend and re-enable back on wakeup. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Rafael J. Wysocki --- kernel/hung_task.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/hung_task.c b/kernel/hung_task.c index b9132d1269ef..cb8e3e8ac7b9 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -242,6 +243,28 @@ void reset_hung_task_detector(void) } EXPORT_SYMBOL_GPL(reset_hung_task_detector); +static bool hung_detector_suspended; + +static int hungtask_pm_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + case PM_RESTORE_PREPARE: + hung_detector_suspended = true; + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + hung_detector_suspended = false; + break; + default: + break; + } + return NOTIFY_OK; +} + /* * kthread which checks for tasks stuck in D state */ @@ -261,7 +284,8 @@ static int watchdog(void *dummy) interval = min_t(unsigned long, interval, timeout); t = hung_timeout_jiffies(hung_last_checked, interval); if (t <= 0) { - if (!atomic_xchg(&reset_hung_task, 0)) + if (!atomic_xchg(&reset_hung_task, 0) && + !hung_detector_suspended) check_hung_uninterruptible_tasks(timeout); hung_last_checked = jiffies; continue; @@ -275,6 +299,10 @@ static int watchdog(void *dummy) static int __init hung_task_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + + /* Disable hung task detector on suspend */ + pm_notifier(hungtask_pm_notify, 0); + watchdog_task = kthread_run(watchdog, NULL, "khungtaskd"); return 0; -- cgit v1.2.3 From f592f804831f1cf9d1f9966f58c80f150e6829b5 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Wed, 24 Oct 2018 20:15:17 +0900 Subject: bpf: devmap: fix wrong interface selection in notifier_call The dev_map_notification() removes interface in devmap if unregistering interface's ifindex is same. But only checking ifindex is not enough because other netns can have same ifindex. so that wrong interface selection could occurred. Hence netdev pointer comparison code is added. v2: compare netdev pointer instead of using net_eq() (Daniel Borkmann) v1: Initial patch Fixes: 2ddf71e23cc2 ("net: add notifier hooks for devmap bpf map") Signed-off-by: Taehee Yoo Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/devmap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 141710b82a6c..191b79948424 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -512,8 +512,7 @@ static int dev_map_notification(struct notifier_block *notifier, struct bpf_dtab_netdev *dev, *odev; dev = READ_ONCE(dtab->netdev_map[i]); - if (!dev || - dev->dev->ifindex != netdev->ifindex) + if (!dev || netdev != dev->dev) continue; odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); if (dev == odev) -- cgit v1.2.3 From 4a6998aff82a20a1aece86a186d8e5263f8b2315 Mon Sep 17 00:00:00 2001 From: Martin Lau Date: Wed, 24 Oct 2018 20:42:25 +0000 Subject: bpf, btf: fix a missing check bug in btf_parse Wenwen Wang reported: In btf_parse(), the header of the user-space btf data 'btf_data' is firstly parsed and verified through btf_parse_hdr(). In btf_parse_hdr(), the header is copied from user-space 'btf_data' to kernel-space 'btf->hdr' and then verified. If no error happens during the verification process, the whole data of 'btf_data', including the header, is then copied to 'data' in btf_parse(). It is obvious that the header is copied twice here. More importantly, no check is enforced after the second copy to make sure the headers obtained in these two copies are same. Given that 'btf_data' resides in the user space, a malicious user can race to modify the header between these two copies. By doing so, the user can inject inconsistent data, which can cause undefined behavior of the kernel and introduce potential security risk. This issue is similar to the one fixed in commit 8af03d1ae2e1 ("bpf: btf: Fix a missing check bug"). To fix it, this patch copies the user 'btf_data' *before* parsing / verifying the BTF header. Fixes: 69b693f0aefa ("bpf: btf: Introduce BPF Type Format (BTF)") Signed-off-by: Martin KaFai Lau Co-developed-by: Wenwen Wang Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/btf.c | 58 ++++++++++++++++++++++++-------------------------------- 1 file changed, 25 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 378cef70341c..ee4c82667d65 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2067,56 +2067,47 @@ static int btf_check_sec_info(struct btf_verifier_env *env, return 0; } -static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data, - u32 btf_data_size) +static int btf_parse_hdr(struct btf_verifier_env *env) { + u32 hdr_len, hdr_copy, btf_data_size; const struct btf_header *hdr; - u32 hdr_len, hdr_copy; - /* - * Minimal part of the "struct btf_header" that - * contains the hdr_len. - */ - struct btf_min_header { - u16 magic; - u8 version; - u8 flags; - u32 hdr_len; - } __user *min_hdr; struct btf *btf; int err; btf = env->btf; - min_hdr = btf_data; + btf_data_size = btf->data_size; - if (btf_data_size < sizeof(*min_hdr)) { + if (btf_data_size < + offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) { btf_verifier_log(env, "hdr_len not found"); return -EINVAL; } - if (get_user(hdr_len, &min_hdr->hdr_len)) - return -EFAULT; - + hdr = btf->data; + hdr_len = hdr->hdr_len; if (btf_data_size < hdr_len) { btf_verifier_log(env, "btf_header not found"); return -EINVAL; } - err = bpf_check_uarg_tail_zero(btf_data, sizeof(btf->hdr), hdr_len); - if (err) { - if (err == -E2BIG) - btf_verifier_log(env, "Unsupported btf_header"); - return err; + /* Ensure the unsupported header fields are zero */ + if (hdr_len > sizeof(btf->hdr)) { + u8 *expected_zero = btf->data + sizeof(btf->hdr); + u8 *end = btf->data + hdr_len; + + for (; expected_zero < end; expected_zero++) { + if (*expected_zero) { + btf_verifier_log(env, "Unsupported btf_header"); + return -E2BIG; + } + } } hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); - if (copy_from_user(&btf->hdr, btf_data, hdr_copy)) - return -EFAULT; + memcpy(&btf->hdr, btf->data, hdr_copy); hdr = &btf->hdr; - if (hdr->hdr_len != hdr_len) - return -EINVAL; - btf_verifier_log_hdr(env, btf_data_size); if (hdr->magic != BTF_MAGIC) { @@ -2186,10 +2177,6 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, } env->btf = btf; - err = btf_parse_hdr(env, btf_data, btf_data_size); - if (err) - goto errout; - data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); if (!data) { err = -ENOMEM; @@ -2198,13 +2185,18 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, btf->data = data; btf->data_size = btf_data_size; - btf->nohdr_data = btf->data + btf->hdr.hdr_len; if (copy_from_user(data, btf_data, btf_data_size)) { err = -EFAULT; goto errout; } + err = btf_parse_hdr(env); + if (err) + goto errout; + + btf->nohdr_data = btf->data + btf->hdr.hdr_len; + err = btf_parse_str_sec(env); if (err) goto errout; -- cgit v1.2.3 From 5d66fa7d9e9e9399ddfdc530f352dd6f7c724485 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 24 Oct 2018 22:05:45 +0200 Subject: bpf: fix direct packet access for flow dissector progs Commit d58e468b1112 ("flow_dissector: implements flow dissector BPF hook") added direct packet access for skbs in may_access_direct_pkt_data() function where this enables read and write access to the skb->data. This is buggy because without a prologue generator such as bpf_unclone_prologue() we would allow for writing into cloned skbs. Original intention might have been to only allow read access where this is not needed (similar as the flow_dissector_func_proto() indicates which enables only bpf_skb_load_bytes() as well), therefore this patch fixes it to restrict to read-only. Fixes: d58e468b1112 ("flow_dissector: implements flow dissector BPF hook") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Petar Penkov Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 98fa0be35370..b0cc8f2ff95f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1387,21 +1387,23 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, enum bpf_access_type t) { switch (env->prog->type) { + /* Program types only with direct read access go here! */ case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: - /* dst_input() and dst_output() can't write for now */ + case BPF_PROG_TYPE_FLOW_DISSECTOR: if (t == BPF_WRITE) return false; /* fallthrough */ + + /* Program types with direct read + write access go here! */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: - case BPF_PROG_TYPE_FLOW_DISSECTOR: if (meta) return meta->pkt_access; -- cgit v1.2.3 From d5563d367c2ce48ea3d675c77f7109f37311943d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 24 Oct 2018 22:05:46 +0200 Subject: bpf: fix cg_skb types to hint access type in may_access_direct_pkt_data Commit b39b5f411dcf ("bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB") added direct packet access for skbs in cg_skb program types, however allowed access type was not added to the may_access_direct_pkt_data() helper. Therefore the latter always returns false. This is not directly an issue, it just means writes are unconditionally disabled (which is correct) but also reads. Latter is relevant in this function when BPF helpers may read direct packet data which is unconditionally disabled then. Fix it by properly adding BPF_PROG_TYPE_CGROUP_SKB to may_access_direct_pkt_data(). Fixes: b39b5f411dcf ("bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Song Liu Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b0cc8f2ff95f..5fc9a658af0e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1393,6 +1393,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_CGROUP_SKB: if (t == BPF_WRITE) return false; /* fallthrough */ -- cgit v1.2.3 From 80b0d86a176cab6201719b8dfd806902b0c6e046 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 24 Oct 2018 22:05:47 +0200 Subject: bpf: fix direct packet write into pop/peek helpers Commit f1a2e44a3aec ("bpf: add queue and stack maps") probably just copy-pasted .pkt_access for bpf_map_{pop,peek}_elem() helpers, but this is buggy in this context since it would allow writes into cloned skbs which is invalid. Therefore, disable .pkt_access for the two. Fixes: f1a2e44a3aec ("bpf: add queue and stack maps") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Mauricio Vasquez B Acked-by: Mauricio Vasquez B Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index ab0d5e3f9892..a74972b07e74 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -99,7 +99,6 @@ BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) const struct bpf_func_proto bpf_map_pop_elem_proto = { .func = bpf_map_pop_elem, .gpl_only = false, - .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, @@ -113,7 +112,6 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) const struct bpf_func_proto bpf_map_peek_elem_proto = { .func = bpf_map_pop_elem, .gpl_only = false, - .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, -- cgit v1.2.3 From d3f66e4116aff8dd0d5bd4067295b9ddb5e2c29c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 24 Oct 2018 22:05:48 +0200 Subject: bpf: fix leaking uninitialized memory on pop/peek helpers Commit f1a2e44a3aec ("bpf: add queue and stack maps") added helpers with ARG_PTR_TO_UNINIT_MAP_VALUE. Meaning, the helper is supposed to fill the map value buffer with data instead of reading from it like in other helpers such as map update. However, given the buffer is allowed to be uninitialized (since we fill it in the helper anyway), it also means that the helper is obliged to wipe the memory in case of an error in order to not allow for leaking uninitialized memory. Given pop/peek is both handled inside __{stack,queue}_map_get(), lets wipe it there on error case, that is, empty stack/queue. Fixes: f1a2e44a3aec ("bpf: add queue and stack maps") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Mauricio Vasquez B Acked-by: Mauricio Vasquez B Signed-off-by: Alexei Starovoitov --- kernel/bpf/queue_stack_maps.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 12a93fb37449..8bbd72d3a121 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -122,6 +122,7 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete) raw_spin_lock_irqsave(&qs->lock, flags); if (queue_stack_map_is_empty(qs)) { + memset(value, 0, qs->map.value_size); err = -ENOENT; goto out; } @@ -151,6 +152,7 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete) raw_spin_lock_irqsave(&qs->lock, flags); if (queue_stack_map_is_empty(qs)) { + memset(value, 0, qs->map.value_size); err = -ENOENT; goto out; } -- cgit v1.2.3 From b09928b976280d64060d7bee146d7df5c5a29bef Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 24 Oct 2018 22:05:49 +0200 Subject: bpf: make direct packet write unclone more robust Given this seems to be quite fragile and can easily slip through the cracks, lets make direct packet write more robust by requiring that future program types which allow for such write must provide a prologue callback. In case of XDP and sk_msg it's noop, thus add a generic noop handler there. The latter starts out with NULL data/data_end unconditionally when sg pages are shared. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Song Liu Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 6 +++++- net/core/filter.c | 11 +++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5fc9a658af0e..171a2c88e77d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5709,7 +5709,11 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) bool is_narrower_load; u32 target_size; - if (ops->gen_prologue) { + if (ops->gen_prologue || env->seen_direct_write) { + if (!ops->gen_prologue) { + verbose(env, "bpf verifier is misconfigured\n"); + return -EINVAL; + } cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { diff --git a/net/core/filter.c b/net/core/filter.c index 3fdddfa9a0fd..cd648d09a8e5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5644,6 +5644,15 @@ static bool sock_filter_is_valid_access(int off, int size, prog->expected_attach_type); } +static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + /* Neither direct read nor direct write requires any preliminary + * action. + */ + return 0; +} + static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog, int drop_verdict) { @@ -7210,6 +7219,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = { .get_func_proto = xdp_func_proto, .is_valid_access = xdp_is_valid_access, .convert_ctx_access = xdp_convert_ctx_access, + .gen_prologue = bpf_noop_prologue, }; const struct bpf_prog_ops xdp_prog_ops = { @@ -7308,6 +7318,7 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = { .get_func_proto = sk_msg_func_proto, .is_valid_access = sk_msg_is_valid_access, .convert_ctx_access = sk_msg_convert_ctx_access, + .gen_prologue = bpf_noop_prologue, }; const struct bpf_prog_ops sk_msg_prog_ops = { -- cgit v1.2.3 From ede95a63b5e84ddeea6b0c473b36ab8bfd8c6ce3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 23 Oct 2018 01:11:04 +0200 Subject: bpf: add bpf_jit_limit knob to restrict unpriv allocations Rick reported that the BPF JIT could potentially fill the entire module space with BPF programs from unprivileged users which would prevent later attempts to load normal kernel modules or privileged BPF programs, for example. If JIT was enabled but unsuccessful to generate the image, then before commit 290af86629b2 ("bpf: introduce BPF_JIT_ALWAYS_ON config") we would always fall back to the BPF interpreter. Nowadays in the case where the CONFIG_BPF_JIT_ALWAYS_ON could be set, then the load will abort with a failure since the BPF interpreter was compiled out. Add a global limit and enforce it for unprivileged users such that in case of BPF interpreter compiled out we fail once the limit has been reached or we fall back to BPF interpreter earlier w/o using module mem if latter was compiled in. In a next step, fair share among unprivileged users can be resolved in particular for the case where we would fail hard once limit is reached. Fixes: 290af86629b2 ("bpf: introduce BPF_JIT_ALWAYS_ON config") Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64") Co-Developed-by: Rick Edgecombe Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Eric Dumazet Cc: Jann Horn Cc: Kees Cook Cc: LKML Signed-off-by: Alexei Starovoitov --- Documentation/sysctl/net.txt | 8 ++++++++ include/linux/filter.h | 1 + kernel/bpf/core.c | 49 +++++++++++++++++++++++++++++++++++++++++--- net/core/sysctl_net_core.c | 10 +++++++-- 4 files changed, 63 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 9ecde517728c..2793d4eac55f 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -92,6 +92,14 @@ Values : 0 - disable JIT kallsyms export (default value) 1 - enable JIT kallsyms export for privileged users only +bpf_jit_limit +------------- + +This enforces a global limit for memory allocations to the BPF JIT +compiler in order to reject unprivileged JIT requests once it has +been surpassed. bpf_jit_limit contains the value of the global limit +in bytes. + dev_weight -------------- diff --git a/include/linux/filter.h b/include/linux/filter.h index 91b4c934f02e..de629b706d1d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, extern int bpf_jit_enable; extern int bpf_jit_harden; extern int bpf_jit_kallsyms; +extern int bpf_jit_limit; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7c7eeea8cffc..6377225b2082 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -365,10 +365,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) } #ifdef CONFIG_BPF_JIT +# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000) + /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); int bpf_jit_harden __read_mostly; int bpf_jit_kallsyms __read_mostly; +int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT; static __always_inline void bpf_get_prog_addr_region(const struct bpf_prog *prog, @@ -577,27 +580,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, return ret; } +static atomic_long_t bpf_jit_current; + +#if defined(MODULES_VADDR) +static int __init bpf_jit_charge_init(void) +{ + /* Only used as heuristic here to derive limit. */ + bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2, + PAGE_SIZE), INT_MAX); + return 0; +} +pure_initcall(bpf_jit_charge_init); +#endif + +static int bpf_jit_charge_modmem(u32 pages) +{ + if (atomic_long_add_return(pages, &bpf_jit_current) > + (bpf_jit_limit >> PAGE_SHIFT)) { + if (!capable(CAP_SYS_ADMIN)) { + atomic_long_sub(pages, &bpf_jit_current); + return -EPERM; + } + } + + return 0; +} + +static void bpf_jit_uncharge_modmem(u32 pages) +{ + atomic_long_sub(pages, &bpf_jit_current); +} + struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, bpf_jit_fill_hole_t bpf_fill_ill_insns) { struct bpf_binary_header *hdr; - unsigned int size, hole, start; + u32 size, hole, start, pages; /* Most of BPF filters are really small, but if some of them * fill a page, allow at least 128 extra bytes to insert a * random section of illegal instructions. */ size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); + pages = size / PAGE_SIZE; + + if (bpf_jit_charge_modmem(pages)) + return NULL; hdr = module_alloc(size); - if (hdr == NULL) + if (!hdr) { + bpf_jit_uncharge_modmem(pages); return NULL; + } /* Fill space with illegal/arch-dep instructions. */ bpf_fill_ill_insns(hdr, size); - hdr->pages = size / PAGE_SIZE; + hdr->pages = pages; hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), PAGE_SIZE - sizeof(*hdr)); start = (get_random_int() % hole) & ~(alignment - 1); @@ -610,7 +650,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, void bpf_jit_binary_free(struct bpf_binary_header *hdr) { + u32 pages = hdr->pages; + module_memfree(hdr); + bpf_jit_uncharge_modmem(pages); } /* This symbol is only overridden by archs that have different diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index b1a2c5e38530..37b4667128a3 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, return ret; } -# ifdef CONFIG_HAVE_EBPF_JIT static int proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, @@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } -# endif #endif static struct ctl_table net_core_table[] = { @@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = { .extra2 = &one, }, # endif + { + .procname = "bpf_jit_limit", + .data = &bpf_jit_limit, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax_bpf_restricted, + .extra1 = &one, + }, #endif { .procname = "netdev_tstamp_prequeue", -- cgit v1.2.3 From 9b6f7e163cd0f468d1b9696b785659d3c27c8667 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 26 Oct 2018 15:03:19 -0700 Subject: mm: rework memcg kernel stack accounting If CONFIG_VMAP_STACK is set, kernel stacks are allocated using __vmalloc_node_range() with __GFP_ACCOUNT. So kernel stack pages are charged against corresponding memory cgroups on allocation and uncharged on releasing them. The problem is that we do cache kernel stacks in small per-cpu caches and do reuse them for new tasks, which can belong to different memory cgroups. Each stack page still holds a reference to the original cgroup, so the cgroup can't be released until the vmap area is released. To make this happen we need more than two subsequent exits without forks in between on the current cpu, which makes it very unlikely to happen. As a result, I saw a significant number of dying cgroups (in theory, up to 2 * number_of_cpu + number_of_tasks), which can't be released even by significant memory pressure. As a cgroup structure can take a significant amount of memory (first of all, per-cpu data like memcg statistics), it leads to a noticeable waste of memory. Link: http://lkml.kernel.org/r/20180827162621.30187-1-guro@fb.com Fixes: ac496bf48d97 ("fork: Optimize task creation by caching two thread stacks per CPU if CONFIG_VMAP_STACK=y") Signed-off-by: Roman Gushchin Reviewed-by: Shakeel Butt Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Andy Lutomirski Cc: Konstantin Khlebnikov Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 13 ++++++++++- kernel/fork.c | 55 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 61 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 652f602167df..4399cc3f00e4 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1268,10 +1268,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); void memcg_kmem_put_cache(struct kmem_cache *cachep); int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, struct mem_cgroup *memcg); + +#ifdef CONFIG_MEMCG_KMEM int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); void memcg_kmem_uncharge(struct page *page, int order); -#ifdef CONFIG_MEMCG_KMEM extern struct static_key_false memcg_kmem_enabled_key; extern struct workqueue_struct *memcg_kmem_cache_wq; @@ -1307,6 +1308,16 @@ extern int memcg_expand_shrinker_maps(int new_id); extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); #else + +static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +{ + return 0; +} + +static inline void memcg_kmem_uncharge(struct page *page, int order) +{ +} + #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) diff --git a/kernel/fork.c b/kernel/fork.c index f0b58479534f..3c719fec46c5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -223,9 +223,14 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) return s->addr; } + /* + * Allocated stacks are cached and later reused by new threads, + * so memcg accounting is performed manually on assigning/releasing + * stacks to tasks. Drop __GFP_ACCOUNT. + */ stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, VMALLOC_START, VMALLOC_END, - THREADINFO_GFP, + THREADINFO_GFP & ~__GFP_ACCOUNT, PAGE_KERNEL, 0, node, __builtin_return_address(0)); @@ -248,9 +253,19 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) static inline void free_thread_stack(struct task_struct *tsk) { #ifdef CONFIG_VMAP_STACK - if (task_stack_vm_area(tsk)) { + struct vm_struct *vm = task_stack_vm_area(tsk); + + if (vm) { int i; + for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { + mod_memcg_page_state(vm->pages[i], + MEMCG_KERNEL_STACK_KB, + -(int)(PAGE_SIZE / 1024)); + + memcg_kmem_uncharge(vm->pages[i], 0); + } + for (i = 0; i < NR_CACHED_STACKS; i++) { if (this_cpu_cmpxchg(cached_stacks[i], NULL, tsk->stack_vm_area) != NULL) @@ -351,10 +366,6 @@ static void account_kernel_stack(struct task_struct *tsk, int account) NR_KERNEL_STACK_KB, PAGE_SIZE / 1024 * account); } - - /* All stack pages belong to the same memcg. */ - mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB, - account * (THREAD_SIZE / 1024)); } else { /* * All stack pages are in the same zone and belong to the @@ -370,6 +381,35 @@ static void account_kernel_stack(struct task_struct *tsk, int account) } } +static int memcg_charge_kernel_stack(struct task_struct *tsk) +{ +#ifdef CONFIG_VMAP_STACK + struct vm_struct *vm = task_stack_vm_area(tsk); + int ret; + + if (vm) { + int i; + + for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { + /* + * If memcg_kmem_charge() fails, page->mem_cgroup + * pointer is NULL, and both memcg_kmem_uncharge() + * and mod_memcg_page_state() in free_thread_stack() + * will ignore this page. So it's safe. + */ + ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0); + if (ret) + return ret; + + mod_memcg_page_state(vm->pages[i], + MEMCG_KERNEL_STACK_KB, + PAGE_SIZE / 1024); + } + } +#endif + return 0; +} + static void release_task_stack(struct task_struct *tsk) { if (WARN_ON(tsk->state != TASK_DEAD)) @@ -807,6 +847,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) if (!stack) goto free_tsk; + if (memcg_charge_kernel_stack(tsk)) + goto free_stack; + stack_vm_area = task_stack_vm_area(tsk); err = arch_dup_task_struct(tsk, orig); -- cgit v1.2.3 From b1d29ba82cf2bc784f4c963ddd6a2cf29e229b33 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 26 Oct 2018 15:06:08 -0700 Subject: delayacct: track delays from thrashing cache pages Delay accounting already measures the time a task spends in direct reclaim and waiting for swapin, but in low memory situations tasks spend can spend a significant amount of their time waiting on thrashing page cache. This isn't tracked right now. To know the full impact of memory contention on an individual task, measure the delay when waiting for a recently evicted active cache page to read back into memory. Also update tools/accounting/getdelays.c: [hannes@computer accounting]$ sudo ./getdelays -d -p 1 print delayacct stats ON PID 1 CPU count real total virtual total delay total delay average 50318 745000000 847346785 400533713 0.008ms IO count delay total delay average 435 122601218 0ms SWAP count delay total delay average 0 0 0ms RECLAIM count delay total delay average 0 0 0ms THRASHING count delay total delay average 19 12621439 0ms Link: http://lkml.kernel.org/r/20180828172258.3185-4-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Peter Zijlstra (Intel) Tested-by: Daniel Drake Tested-by: Suren Baghdasaryan Cc: Christopher Lameter Cc: Ingo Molnar Cc: Johannes Weiner Cc: Mike Galbraith Cc: Peter Enderborg Cc: Randy Dunlap Cc: Shakeel Butt Cc: Tejun Heo Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/delayacct.h | 23 +++++++++++++++++++++++ include/uapi/linux/taskstats.h | 6 +++++- kernel/delayacct.c | 15 +++++++++++++++ mm/filemap.c | 11 +++++++++++ tools/accounting/getdelays.c | 8 +++++++- 5 files changed, 61 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 31c865d1842e..577d1b25fccd 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -57,7 +57,12 @@ struct task_delay_info { u64 freepages_start; u64 freepages_delay; /* wait for memory reclaim */ + + u64 thrashing_start; + u64 thrashing_delay; /* wait for thrashing page */ + u32 freepages_count; /* total count of memory reclaim */ + u32 thrashing_count; /* total count of thrash waits */ }; #endif @@ -76,6 +81,8 @@ extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); extern void __delayacct_freepages_end(void); +extern void __delayacct_thrashing_start(void); +extern void __delayacct_thrashing_end(void); static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { @@ -156,6 +163,18 @@ static inline void delayacct_freepages_end(void) __delayacct_freepages_end(); } +static inline void delayacct_thrashing_start(void) +{ + if (current->delays) + __delayacct_thrashing_start(); +} + +static inline void delayacct_thrashing_end(void) +{ + if (current->delays) + __delayacct_thrashing_end(); +} + #else static inline void delayacct_set_flag(int flag) {} @@ -182,6 +201,10 @@ static inline void delayacct_freepages_start(void) {} static inline void delayacct_freepages_end(void) {} +static inline void delayacct_thrashing_start(void) +{} +static inline void delayacct_thrashing_end(void) +{} #endif /* CONFIG_TASK_DELAY_ACCT */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index b7aa7bb2349f..5e8ca16a9079 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -34,7 +34,7 @@ */ -#define TASKSTATS_VERSION 8 +#define TASKSTATS_VERSION 9 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -164,6 +164,10 @@ struct taskstats { /* Delay waiting for memory reclaim */ __u64 freepages_count; __u64 freepages_delay_total; + + /* Delay waiting for thrashing page */ + __u64 thrashing_count; + __u64 thrashing_delay_total; }; diff --git a/kernel/delayacct.c b/kernel/delayacct.c index ca8ac2824f0b..2a12b988c717 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -135,9 +135,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; tmp = d->freepages_delay_total + tsk->delays->freepages_delay; d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; + tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay; + d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp; d->blkio_count += tsk->delays->blkio_count; d->swapin_count += tsk->delays->swapin_count; d->freepages_count += tsk->delays->freepages_count; + d->thrashing_count += tsk->delays->thrashing_count; raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); return 0; @@ -169,3 +172,15 @@ void __delayacct_freepages_end(void) ¤t->delays->freepages_count); } +void __delayacct_thrashing_start(void) +{ + current->delays->thrashing_start = ktime_get_ns(); +} + +void __delayacct_thrashing_end(void) +{ + delayacct_end(¤t->delays->lock, + ¤t->delays->thrashing_start, + ¤t->delays->thrashing_delay, + ¤t->delays->thrashing_count); +} diff --git a/mm/filemap.c b/mm/filemap.c index 7997adce5a29..01a841f17bf4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "internal.h" #define CREATE_TRACE_POINTS @@ -1073,8 +1074,15 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, { struct wait_page_queue wait_page; wait_queue_entry_t *wait = &wait_page.wait; + bool thrashing = false; int ret = 0; + if (bit_nr == PG_locked && !PageSwapBacked(page) && + !PageUptodate(page) && PageWorkingset(page)) { + delayacct_thrashing_start(); + thrashing = true; + } + init_wait(wait); wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0; wait->func = wake_page_function; @@ -1113,6 +1121,9 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, finish_wait(q, wait); + if (thrashing) + delayacct_thrashing_end(); + /* * A signal could leave PageWaiters set. Clearing it here if * !waitqueue_active would be possible (by open-coding finish_wait), diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c index 9f420d98b5fb..8cb504d30384 100644 --- a/tools/accounting/getdelays.c +++ b/tools/accounting/getdelays.c @@ -203,6 +203,8 @@ static void print_delayacct(struct taskstats *t) "SWAP %15s%15s%15s\n" " %15llu%15llu%15llums\n" "RECLAIM %12s%15s%15s\n" + " %15llu%15llu%15llums\n" + "THRASHING%12s%15s%15s\n" " %15llu%15llu%15llums\n", "count", "real total", "virtual total", "delay total", "delay average", @@ -222,7 +224,11 @@ static void print_delayacct(struct taskstats *t) "count", "delay total", "delay average", (unsigned long long)t->freepages_count, (unsigned long long)t->freepages_delay_total, - average_ms(t->freepages_delay_total, t->freepages_count)); + average_ms(t->freepages_delay_total, t->freepages_count), + "count", "delay total", "delay average", + (unsigned long long)t->thrashing_count, + (unsigned long long)t->thrashing_delay_total, + average_ms(t->thrashing_delay_total, t->thrashing_count)); } static void task_context_switch_counts(struct taskstats *t) -- cgit v1.2.3 From 8508cf3ffad4defa202b303e5b6379efc4cd9054 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 26 Oct 2018 15:06:11 -0700 Subject: sched: loadavg: consolidate LOAD_INT, LOAD_FRAC, CALC_LOAD There are several definitions of those functions/macros in places that mess with fixed-point load averages. Provide an official version. [akpm@linux-foundation.org: fix missed conversion in block/blk-iolatency.c] Link: http://lkml.kernel.org/r/20180828172258.3185-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Peter Zijlstra (Intel) Tested-by: Suren Baghdasaryan Tested-by: Daniel Drake Cc: Christopher Lameter Cc: Ingo Molnar Cc: Johannes Weiner Cc: Mike Galbraith Cc: Peter Enderborg Cc: Randy Dunlap Cc: Shakeel Butt Cc: Tejun Heo Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/platforms/cell/cpufreq_spudemand.c | 2 +- arch/powerpc/platforms/cell/spufs/sched.c | 9 +++------ arch/s390/appldata/appldata_os.c | 4 ---- block/blk-iolatency.c | 8 +++++--- drivers/cpuidle/governors/menu.c | 4 ---- fs/proc/loadavg.c | 3 --- include/linux/sched/loadavg.h | 21 +++++++++++++++++---- kernel/debug/kdb/kdb_main.c | 7 +------ kernel/sched/loadavg.c | 15 --------------- 9 files changed, 27 insertions(+), 46 deletions(-) (limited to 'kernel') diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 882944c36ef5..5d8e8b6bb1cc 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c @@ -49,7 +49,7 @@ static int calc_freq(struct spu_gov_info_struct *info) cpu = info->policy->cpu; busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus); - CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1); + info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1); pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n", cpu, busy_spus, info->busy_spus); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index c9ef3c532169..9fcccb4490b9 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -987,9 +987,9 @@ static void spu_calc_load(void) unsigned long active_tasks; /* fixed-point */ active_tasks = count_active_contexts() * FIXED_1; - CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); - CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); - CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); + spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks); + spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks); + spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks); } static void spusched_wake(struct timer_list *unused) @@ -1071,9 +1071,6 @@ void spuctx_switch_state(struct spu_context *ctx, } } -#define LOAD_INT(x) ((x) >> FSHIFT) -#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) - static int show_spu_loadavg(struct seq_file *s, void *private) { int a, b, c; diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 433a994b1a89..54f375627532 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -25,10 +25,6 @@ #include "appldata.h" - -#define LOAD_INT(x) ((x) >> FSHIFT) -#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) - /* * OS data * diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 35c48d7b8f78..28f80d227528 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -153,7 +153,7 @@ struct iolatency_grp { #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC /* * These are the constants used to fake the fixed-point moving average - * calculation just like load average. The call to CALC_LOAD folds + * calculation just like load average. The call to calc_load() folds * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling * window size is bucketed to try to approximately calculate average * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows @@ -248,7 +248,7 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat, return; /* - * CALC_LOAD takes in a number stored in fixed point representation. + * calc_load() takes in a number stored in fixed point representation. * Because we are using this for IO time in ns, the values stored * are significantly larger than the FIXED_1 denominator (2048). * Therefore, rounding errors in the calculation are negligible and @@ -257,7 +257,9 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat, exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1, div64_u64(iolat->cur_win_nsec, BLKIOLATENCY_EXP_BUCKET_SIZE)); - CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat->rqs.mean); + iolat->lat_avg = calc_load(iolat->lat_avg, + iolatency_exp_factors[exp_idx], + stat->rqs.mean); } static inline bool iolatency_may_queue(struct iolatency_grp *iolat, diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 575a68f31761..71979605246e 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -130,10 +130,6 @@ struct menu_device { int interval_ptr; }; - -#define LOAD_INT(x) ((x) >> FSHIFT) -#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) - static inline int get_loadavg(unsigned long load) { return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10; diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index d06694757201..8468baee951d 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -10,9 +10,6 @@ #include #include -#define LOAD_INT(x) ((x) >> FSHIFT) -#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) - static int loadavg_proc_show(struct seq_file *m, void *v) { unsigned long avnrun[3]; diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 80bc84ba5d2a..cc9cc62bb1f8 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -22,10 +22,23 @@ extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); #define EXP_5 2014 /* 1/exp(5sec/5min) */ #define EXP_15 2037 /* 1/exp(5sec/15min) */ -#define CALC_LOAD(load,exp,n) \ - load *= exp; \ - load += n*(FIXED_1-exp); \ - load >>= FSHIFT; +/* + * a1 = a0 * e + a * (1 - e) + */ +static inline unsigned long +calc_load(unsigned long load, unsigned long exp, unsigned long active) +{ + unsigned long newload; + + newload = load * exp + active * (FIXED_1 - exp); + if (active >= load) + newload += FIXED_1-1; + + return newload / FIXED_1; +} + +#define LOAD_INT(x) ((x) >> FSHIFT) +#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) extern void calc_global_load(unsigned long ticks); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 2ddfce8f1e8f..bb4fe4e1a601 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2556,16 +2556,11 @@ static int kdb_summary(int argc, const char **argv) } kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60); - /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */ - -#define LOAD_INT(x) ((x) >> FSHIFT) -#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n", LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); -#undef LOAD_INT -#undef LOAD_FRAC + /* Display in kilobytes */ #define K(x) ((x) << (PAGE_SHIFT - 10)) kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n" diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index a171c1258109..54fbdfb2d86c 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -91,21 +91,6 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) return delta; } -/* - * a1 = a0 * e + a * (1 - e) - */ -static unsigned long -calc_load(unsigned long load, unsigned long exp, unsigned long active) -{ - unsigned long newload; - - newload = load * exp + active * (FIXED_1 - exp); - if (active >= load) - newload += FIXED_1-1; - - return newload / FIXED_1; -} - #ifdef CONFIG_NO_HZ_COMMON /* * Handle NO_HZ for the global load-average. -- cgit v1.2.3 From 5c54f5b9edb1aa2eabbb1091c458f1b6776a1896 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 26 Oct 2018 15:06:16 -0700 Subject: sched: loadavg: make calc_load_n() public It's going to be used in a later patch. Keep the churn separate. Link: http://lkml.kernel.org/r/20180828172258.3185-6-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Peter Zijlstra (Intel) Tested-by: Suren Baghdasaryan Tested-by: Daniel Drake Cc: Christopher Lameter Cc: Ingo Molnar Cc: Johannes Weiner Cc: Mike Galbraith Cc: Peter Enderborg Cc: Randy Dunlap Cc: Shakeel Butt Cc: Tejun Heo Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched/loadavg.h | 3 + kernel/sched/loadavg.c | 138 +++++++++++++++++++++--------------------- 2 files changed, 72 insertions(+), 69 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index cc9cc62bb1f8..4859bea47a7b 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -37,6 +37,9 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active) return newload / FIXED_1; } +extern unsigned long calc_load_n(unsigned long load, unsigned long exp, + unsigned long active, unsigned int n); + #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 54fbdfb2d86c..28a516575c18 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -91,6 +91,75 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) return delta; } +/** + * fixed_power_int - compute: x^n, in O(log n) time + * + * @x: base of the power + * @frac_bits: fractional bits of @x + * @n: power to raise @x to. + * + * By exploiting the relation between the definition of the natural power + * function: x^n := x*x*...*x (x multiplied by itself for n times), and + * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, + * (where: n_i \elem {0, 1}, the binary vector representing n), + * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is + * of course trivially computable in O(log_2 n), the length of our binary + * vector. + */ +static unsigned long +fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) +{ + unsigned long result = 1UL << frac_bits; + + if (n) { + for (;;) { + if (n & 1) { + result *= x; + result += 1UL << (frac_bits - 1); + result >>= frac_bits; + } + n >>= 1; + if (!n) + break; + x *= x; + x += 1UL << (frac_bits - 1); + x >>= frac_bits; + } + } + + return result; +} + +/* + * a1 = a0 * e + a * (1 - e) + * + * a2 = a1 * e + a * (1 - e) + * = (a0 * e + a * (1 - e)) * e + a * (1 - e) + * = a0 * e^2 + a * (1 - e) * (1 + e) + * + * a3 = a2 * e + a * (1 - e) + * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) + * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) + * + * ... + * + * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] + * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) + * = a0 * e^n + a * (1 - e^n) + * + * [1] application of the geometric series: + * + * n 1 - x^(n+1) + * S_n := \Sum x^i = ------------- + * i=0 1 - x + */ +unsigned long +calc_load_n(unsigned long load, unsigned long exp, + unsigned long active, unsigned int n) +{ + return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); +} + #ifdef CONFIG_NO_HZ_COMMON /* * Handle NO_HZ for the global load-average. @@ -210,75 +279,6 @@ static long calc_load_nohz_fold(void) return delta; } -/** - * fixed_power_int - compute: x^n, in O(log n) time - * - * @x: base of the power - * @frac_bits: fractional bits of @x - * @n: power to raise @x to. - * - * By exploiting the relation between the definition of the natural power - * function: x^n := x*x*...*x (x multiplied by itself for n times), and - * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, - * (where: n_i \elem {0, 1}, the binary vector representing n), - * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is - * of course trivially computable in O(log_2 n), the length of our binary - * vector. - */ -static unsigned long -fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) -{ - unsigned long result = 1UL << frac_bits; - - if (n) { - for (;;) { - if (n & 1) { - result *= x; - result += 1UL << (frac_bits - 1); - result >>= frac_bits; - } - n >>= 1; - if (!n) - break; - x *= x; - x += 1UL << (frac_bits - 1); - x >>= frac_bits; - } - } - - return result; -} - -/* - * a1 = a0 * e + a * (1 - e) - * - * a2 = a1 * e + a * (1 - e) - * = (a0 * e + a * (1 - e)) * e + a * (1 - e) - * = a0 * e^2 + a * (1 - e) * (1 + e) - * - * a3 = a2 * e + a * (1 - e) - * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) - * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) - * - * ... - * - * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] - * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) - * = a0 * e^n + a * (1 - e^n) - * - * [1] application of the geometric series: - * - * n 1 - x^(n+1) - * S_n := \Sum x^i = ------------- - * i=0 1 - x - */ -static unsigned long -calc_load_n(unsigned long load, unsigned long exp, - unsigned long active, unsigned int n) -{ - return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); -} - /* * NO_HZ can leave us missing all per-CPU ticks calling * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into -- cgit v1.2.3 From 1f351d7f7590857ea281579c26e6045b4c548ef4 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 26 Oct 2018 15:06:19 -0700 Subject: sched: sched.h: make rq locking and clock functions available in stats.h kernel/sched/sched.h includes "stats.h" half-way through the file. The next patch introduces users of sched.h's rq locking functions and update_rq_clock() in kernel/sched/stats.h. Move those definitions up in the file so they are available in stats.h. Link: http://lkml.kernel.org/r/20180828172258.3185-7-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Peter Zijlstra (Intel) Tested-by: Suren Baghdasaryan Tested-by: Daniel Drake Cc: Christopher Lameter Cc: Ingo Molnar Cc: Johannes Weiner Cc: Mike Galbraith Cc: Peter Enderborg Cc: Randy Dunlap Cc: Shakeel Butt Cc: Tejun Heo Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched/sched.h | 164 +++++++++++++++++++++++++-------------------------- 1 file changed, 82 insertions(+), 82 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b8c007713b3b..65a75b317935 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -957,6 +957,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() raw_cpu_ptr(&runqueues) +extern void update_rq_clock(struct rq *rq); + static inline u64 __rq_clock_broken(struct rq *rq) { return READ_ONCE(rq->clock); @@ -1075,6 +1077,86 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) #endif } +struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) + __acquires(rq->lock); + +struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) + __acquires(p->pi_lock) + __acquires(rq->lock); + +static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) + __releases(rq->lock) +{ + rq_unpin_lock(rq, rf); + raw_spin_unlock(&rq->lock); +} + +static inline void +task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) + __releases(rq->lock) + __releases(p->pi_lock) +{ + rq_unpin_lock(rq, rf); + raw_spin_unlock(&rq->lock); + raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); +} + +static inline void +rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) + __acquires(rq->lock) +{ + raw_spin_lock_irqsave(&rq->lock, rf->flags); + rq_pin_lock(rq, rf); +} + +static inline void +rq_lock_irq(struct rq *rq, struct rq_flags *rf) + __acquires(rq->lock) +{ + raw_spin_lock_irq(&rq->lock); + rq_pin_lock(rq, rf); +} + +static inline void +rq_lock(struct rq *rq, struct rq_flags *rf) + __acquires(rq->lock) +{ + raw_spin_lock(&rq->lock); + rq_pin_lock(rq, rf); +} + +static inline void +rq_relock(struct rq *rq, struct rq_flags *rf) + __acquires(rq->lock) +{ + raw_spin_lock(&rq->lock); + rq_repin_lock(rq, rf); +} + +static inline void +rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) + __releases(rq->lock) +{ + rq_unpin_lock(rq, rf); + raw_spin_unlock_irqrestore(&rq->lock, rf->flags); +} + +static inline void +rq_unlock_irq(struct rq *rq, struct rq_flags *rf) + __releases(rq->lock) +{ + rq_unpin_lock(rq, rf); + raw_spin_unlock_irq(&rq->lock); +} + +static inline void +rq_unlock(struct rq *rq, struct rq_flags *rf) + __releases(rq->lock) +{ + rq_unpin_lock(rq, rf); + raw_spin_unlock(&rq->lock); +} + #ifdef CONFIG_NUMA enum numa_topology_type { NUMA_DIRECT, @@ -1717,8 +1799,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) sched_update_tick_dependency(rq); } -extern void update_rq_clock(struct rq *rq); - extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); @@ -1783,86 +1863,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) #endif #endif -struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) - __acquires(rq->lock); - -struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) - __acquires(p->pi_lock) - __acquires(rq->lock); - -static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) - __releases(rq->lock) -{ - rq_unpin_lock(rq, rf); - raw_spin_unlock(&rq->lock); -} - -static inline void -task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) - __releases(rq->lock) - __releases(p->pi_lock) -{ - rq_unpin_lock(rq, rf); - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); -} - -static inline void -rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) - __acquires(rq->lock) -{ - raw_spin_lock_irqsave(&rq->lock, rf->flags); - rq_pin_lock(rq, rf); -} - -static inline void -rq_lock_irq(struct rq *rq, struct rq_flags *rf) - __acquires(rq->lock) -{ - raw_spin_lock_irq(&rq->lock); - rq_pin_lock(rq, rf); -} - -static inline void -rq_lock(struct rq *rq, struct rq_flags *rf) - __acquires(rq->lock) -{ - raw_spin_lock(&rq->lock); - rq_pin_lock(rq, rf); -} - -static inline void -rq_relock(struct rq *rq, struct rq_flags *rf) - __acquires(rq->lock) -{ - raw_spin_lock(&rq->lock); - rq_repin_lock(rq, rf); -} - -static inline void -rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) - __releases(rq->lock) -{ - rq_unpin_lock(rq, rf); - raw_spin_unlock_irqrestore(&rq->lock, rf->flags); -} - -static inline void -rq_unlock_irq(struct rq *rq, struct rq_flags *rf) - __releases(rq->lock) -{ - rq_unpin_lock(rq, rf); - raw_spin_unlock_irq(&rq->lock); -} - -static inline void -rq_unlock(struct rq *rq, struct rq_flags *rf) - __releases(rq->lock) -{ - rq_unpin_lock(rq, rf); - raw_spin_unlock(&rq->lock); -} - #ifdef CONFIG_SMP #ifdef CONFIG_PREEMPT -- cgit v1.2.3 From 246b3b3342c9b0a2e24cda2178be87bc36e1c874 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 26 Oct 2018 15:06:23 -0700 Subject: sched: introduce this_rq_lock_irq() do_sched_yield() disables IRQs, looks up this_rq() and locks it. The next patch is adding another site with the same pattern, so provide a convenience function for it. Link: http://lkml.kernel.org/r/20180828172258.3185-8-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Peter Zijlstra (Intel) Tested-by: Suren Baghdasaryan Tested-by: Daniel Drake Cc: Christopher Lameter Cc: Ingo Molnar Cc: Johannes Weiner Cc: Mike Galbraith Cc: Peter Enderborg Cc: Randy Dunlap Cc: Shakeel Butt Cc: Tejun Heo Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched/core.c | 4 +--- kernel/sched/sched.h | 12 ++++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2e696b03e99d..f3efef387797 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4933,9 +4933,7 @@ static void do_sched_yield(void) struct rq_flags rf; struct rq *rq; - local_irq_disable(); - rq = this_rq(); - rq_lock(rq, &rf); + rq = this_rq_lock_irq(&rf); schedstat_inc(rq->yld_count); current->sched_class->yield_task(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 65a75b317935..1de189bb9209 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1157,6 +1157,18 @@ rq_unlock(struct rq *rq, struct rq_flags *rf) raw_spin_unlock(&rq->lock); } +static inline struct rq * +this_rq_lock_irq(struct rq_flags *rf) + __acquires(rq->lock) +{ + struct rq *rq; + + local_irq_disable(); + rq = this_rq(); + rq_lock(rq, rf); + return rq; +} + #ifdef CONFIG_NUMA enum numa_topology_type { NUMA_DIRECT, -- cgit v1.2.3 From eb414681d5a07d28d2ff90dc05f69ec6b232ebd2 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 26 Oct 2018 15:06:27 -0700 Subject: psi: pressure stall information for CPU, memory, and IO When systems are overcommitted and resources become contended, it's hard to tell exactly the impact this has on workload productivity, or how close the system is to lockups and OOM kills. In particular, when machines work multiple jobs concurrently, the impact of overcommit in terms of latency and throughput on the individual job can be enormous. In order to maximize hardware utilization without sacrificing individual job health or risk complete machine lockups, this patch implements a way to quantify resource pressure in the system. A kernel built with CONFIG_PSI=y creates files in /proc/pressure/ that expose the percentage of time the system is stalled on CPU, memory, or IO, respectively. Stall states are aggregate versions of the per-task delay accounting delays: cpu: some tasks are runnable but not executing on a CPU memory: tasks are reclaiming, or waiting for swapin or thrashing cache io: tasks are waiting for io completions These percentages of walltime can be thought of as pressure percentages, and they give a general sense of system health and productivity loss incurred by resource overcommit. They can also indicate when the system is approaching lockup scenarios and OOMs. To do this, psi keeps track of the task states associated with each CPU and samples the time they spend in stall states. Every 2 seconds, the samples are averaged across CPUs - weighted by the CPUs' non-idle time to eliminate artifacts from unused CPUs - and translated into percentages of walltime. A running average of those percentages is maintained over 10s, 1m, and 5m periods (similar to the loadaverage). [hannes@cmpxchg.org: doc fixlet, per Randy] Link: http://lkml.kernel.org/r/20180828205625.GA14030@cmpxchg.org [hannes@cmpxchg.org: code optimization] Link: http://lkml.kernel.org/r/20180907175015.GA8479@cmpxchg.org [hannes@cmpxchg.org: rename psi_clock() to psi_update_work(), per Peter] Link: http://lkml.kernel.org/r/20180907145404.GB11088@cmpxchg.org [hannes@cmpxchg.org: fix build] Link: http://lkml.kernel.org/r/20180913014222.GA2370@cmpxchg.org Link: http://lkml.kernel.org/r/20180828172258.3185-9-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Peter Zijlstra (Intel) Tested-by: Daniel Drake Tested-by: Suren Baghdasaryan Cc: Christopher Lameter Cc: Ingo Molnar Cc: Johannes Weiner Cc: Mike Galbraith Cc: Peter Enderborg Cc: Randy Dunlap Cc: Shakeel Butt Cc: Tejun Heo Cc: Vinayak Menon Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/accounting/psi.txt | 64 ++++ include/linux/psi.h | 28 ++ include/linux/psi_types.h | 92 ++++++ include/linux/sched.h | 10 + init/Kconfig | 15 + kernel/fork.c | 4 + kernel/sched/Makefile | 1 + kernel/sched/core.c | 12 +- kernel/sched/psi.c | 657 +++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 2 + kernel/sched/stats.h | 86 +++++ mm/compaction.c | 5 + mm/filemap.c | 15 +- mm/page_alloc.c | 9 + mm/vmscan.c | 9 + 15 files changed, 1003 insertions(+), 6 deletions(-) create mode 100644 Documentation/accounting/psi.txt create mode 100644 include/linux/psi.h create mode 100644 include/linux/psi_types.h create mode 100644 kernel/sched/psi.c (limited to 'kernel') diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt new file mode 100644 index 000000000000..3753a82f1cf5 --- /dev/null +++ b/Documentation/accounting/psi.txt @@ -0,0 +1,64 @@ +================================ +PSI - Pressure Stall Information +================================ + +:Date: April, 2018 +:Author: Johannes Weiner + +When CPU, memory or IO devices are contended, workloads experience +latency spikes, throughput losses, and run the risk of OOM kills. + +Without an accurate measure of such contention, users are forced to +either play it safe and under-utilize their hardware resources, or +roll the dice and frequently suffer the disruptions resulting from +excessive overcommit. + +The psi feature identifies and quantifies the disruptions caused by +such resource crunches and the time impact it has on complex workloads +or even entire systems. + +Having an accurate measure of productivity losses caused by resource +scarcity aids users in sizing workloads to hardware--or provisioning +hardware according to workload demand. + +As psi aggregates this information in realtime, systems can be managed +dynamically using techniques such as load shedding, migrating jobs to +other systems or data centers, or strategically pausing or killing low +priority or restartable batch jobs. + +This allows maximizing hardware utilization without sacrificing +workload health or risking major disruptions such as OOM kills. + +Pressure interface +================== + +Pressure information for each resource is exported through the +respective file in /proc/pressure/ -- cpu, memory, and io. + +The format for CPU is as such: + +some avg10=0.00 avg60=0.00 avg300=0.00 total=0 + +and for memory and IO: + +some avg10=0.00 avg60=0.00 avg300=0.00 total=0 +full avg10=0.00 avg60=0.00 avg300=0.00 total=0 + +The "some" line indicates the share of time in which at least some +tasks are stalled on a given resource. + +The "full" line indicates the share of time in which all non-idle +tasks are stalled on a given resource simultaneously. In this state +actual CPU cycles are going to waste, and a workload that spends +extended time in this state is considered to be thrashing. This has +severe impact on performance, and it's useful to distinguish this +situation from a state where some tasks are stalled but the CPU is +still doing productive work. As such, time spent in this subset of the +stall state is tracked separately and exported in the "full" averages. + +The ratios are tracked as recent trends over ten, sixty, and three +hundred second windows, which gives insight into short term events as +well as medium and long term trends. The total absolute stall time is +tracked and exported as well, to allow detection of latency spikes +which wouldn't necessarily make a dent in the time averages, or to +average trends over custom time frames. diff --git a/include/linux/psi.h b/include/linux/psi.h new file mode 100644 index 000000000000..b0daf050de58 --- /dev/null +++ b/include/linux/psi.h @@ -0,0 +1,28 @@ +#ifndef _LINUX_PSI_H +#define _LINUX_PSI_H + +#include +#include + +#ifdef CONFIG_PSI + +extern bool psi_disabled; + +void psi_init(void); + +void psi_task_change(struct task_struct *task, int clear, int set); + +void psi_memstall_tick(struct task_struct *task, int cpu); +void psi_memstall_enter(unsigned long *flags); +void psi_memstall_leave(unsigned long *flags); + +#else /* CONFIG_PSI */ + +static inline void psi_init(void) {} + +static inline void psi_memstall_enter(unsigned long *flags) {} +static inline void psi_memstall_leave(unsigned long *flags) {} + +#endif /* CONFIG_PSI */ + +#endif /* _LINUX_PSI_H */ diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h new file mode 100644 index 000000000000..2cf422db5d18 --- /dev/null +++ b/include/linux/psi_types.h @@ -0,0 +1,92 @@ +#ifndef _LINUX_PSI_TYPES_H +#define _LINUX_PSI_TYPES_H + +#include +#include + +#ifdef CONFIG_PSI + +/* Tracked task states */ +enum psi_task_count { + NR_IOWAIT, + NR_MEMSTALL, + NR_RUNNING, + NR_PSI_TASK_COUNTS, +}; + +/* Task state bitmasks */ +#define TSK_IOWAIT (1 << NR_IOWAIT) +#define TSK_MEMSTALL (1 << NR_MEMSTALL) +#define TSK_RUNNING (1 << NR_RUNNING) + +/* Resources that workloads could be stalled on */ +enum psi_res { + PSI_IO, + PSI_MEM, + PSI_CPU, + NR_PSI_RESOURCES, +}; + +/* + * Pressure states for each resource: + * + * SOME: Stalled tasks & working tasks + * FULL: Stalled tasks & no working tasks + */ +enum psi_states { + PSI_IO_SOME, + PSI_IO_FULL, + PSI_MEM_SOME, + PSI_MEM_FULL, + PSI_CPU_SOME, + /* Only per-CPU, to weigh the CPU in the global average: */ + PSI_NONIDLE, + NR_PSI_STATES, +}; + +struct psi_group_cpu { + /* 1st cacheline updated by the scheduler */ + + /* Aggregator needs to know of concurrent changes */ + seqcount_t seq ____cacheline_aligned_in_smp; + + /* States of the tasks belonging to this group */ + unsigned int tasks[NR_PSI_TASK_COUNTS]; + + /* Period time sampling buckets for each state of interest (ns) */ + u32 times[NR_PSI_STATES]; + + /* Time of last task change in this group (rq_clock) */ + u64 state_start; + + /* 2nd cacheline updated by the aggregator */ + + /* Delta detection against the sampling buckets */ + u32 times_prev[NR_PSI_STATES] ____cacheline_aligned_in_smp; +}; + +struct psi_group { + /* Protects data updated during an aggregation */ + struct mutex stat_lock; + + /* Per-cpu task state & time tracking */ + struct psi_group_cpu __percpu *pcpu; + + /* Periodic aggregation state */ + u64 total_prev[NR_PSI_STATES - 1]; + u64 last_update; + u64 next_update; + struct delayed_work clock_work; + + /* Total stall times and sampled pressure averages */ + u64 total[NR_PSI_STATES - 1]; + unsigned long avg[NR_PSI_STATES - 1][3]; +}; + +#else /* CONFIG_PSI */ + +struct psi_group { }; + +#endif /* CONFIG_PSI */ + +#endif /* _LINUX_PSI_TYPES_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index adfb3f9a7597..b8fcc6b3080c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -706,6 +707,10 @@ struct task_struct { unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; unsigned sched_remote_wakeup:1; +#ifdef CONFIG_PSI + unsigned sched_psi_wake_requeue:1; +#endif + /* Force alignment to the next boundary: */ unsigned :0; @@ -965,6 +970,10 @@ struct task_struct { kernel_siginfo_t *last_siginfo; struct task_io_accounting ioac; +#ifdef CONFIG_PSI + /* Pressure stall state */ + unsigned int psi_flags; +#endif #ifdef CONFIG_TASK_XACCT /* Accumulated RSS usage: */ u64 acct_rss_mem1; @@ -1391,6 +1400,7 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ diff --git a/init/Kconfig b/init/Kconfig index 317d5ccb5191..26e639df5517 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -490,6 +490,21 @@ config TASK_IO_ACCOUNTING Say N if unsure. +config PSI + bool "Pressure stall information tracking" + help + Collect metrics that indicate how overcommitted the CPU, memory, + and IO capacity are in the system. + + If you say Y here, the kernel will create /proc/pressure/ with the + pressure statistics files cpu, memory, and io. These will indicate + the share of walltime in which some or all tasks in the system are + delayed due to contention of the respective resource. + + For more details see Documentation/accounting/psi.txt. + + Say N if unsure. + endmenu # "CPU/Task time and stats accounting" config CPU_ISOLATION diff --git a/kernel/fork.c b/kernel/fork.c index 3c719fec46c5..8f82a3bdcb8f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1822,6 +1822,10 @@ static __latent_entropy struct task_struct *copy_process( p->default_timer_slack_ns = current->timer_slack_ns; +#ifdef CONFIG_PSI + p->psi_flags = 0; +#endif + task_io_accounting_init(&p->ioac); acct_clear_integrals(p); diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 7fe183404c38..21fb5a5662b5 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -29,3 +29,4 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_MEMBARRIER) += membarrier.o obj-$(CONFIG_CPU_ISOLATION) += isolation.o +obj-$(CONFIG_PSI) += psi.o diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f3efef387797..fd2fce8a001b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -722,8 +722,10 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & ENQUEUE_NOCLOCK)) update_rq_clock(rq); - if (!(flags & ENQUEUE_RESTORE)) + if (!(flags & ENQUEUE_RESTORE)) { sched_info_queued(rq, p); + psi_enqueue(p, flags & ENQUEUE_WAKEUP); + } p->sched_class->enqueue_task(rq, p, flags); } @@ -733,8 +735,10 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & DEQUEUE_NOCLOCK)) update_rq_clock(rq); - if (!(flags & DEQUEUE_SAVE)) + if (!(flags & DEQUEUE_SAVE)) { sched_info_dequeued(rq, p); + psi_dequeue(p, flags & DEQUEUE_SLEEP); + } p->sched_class->dequeue_task(rq, p, flags); } @@ -2037,6 +2041,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); if (task_cpu(p) != cpu) { wake_flags |= WF_MIGRATED; + psi_ttwu_dequeue(p); set_task_cpu(p, cpu); } @@ -3051,6 +3056,7 @@ void scheduler_tick(void) curr->sched_class->task_tick(rq, curr, 0); cpu_load_update_active(rq); calc_global_load_tick(rq); + psi_task_tick(rq); rq_unlock(rq, &rf); @@ -6067,6 +6073,8 @@ void __init sched_init(void) init_schedstats(); + psi_init(); + scheduler_running = 1; } diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c new file mode 100644 index 000000000000..595414599b98 --- /dev/null +++ b/kernel/sched/psi.c @@ -0,0 +1,657 @@ +/* + * Pressure stall information for CPU, memory and IO + * + * Copyright (c) 2018 Facebook, Inc. + * Author: Johannes Weiner + * + * When CPU, memory and IO are contended, tasks experience delays that + * reduce throughput and introduce latencies into the workload. Memory + * and IO contention, in addition, can cause a full loss of forward + * progress in which the CPU goes idle. + * + * This code aggregates individual task delays into resource pressure + * metrics that indicate problems with both workload health and + * resource utilization. + * + * Model + * + * The time in which a task can execute on a CPU is our baseline for + * productivity. Pressure expresses the amount of time in which this + * potential cannot be realized due to resource contention. + * + * This concept of productivity has two components: the workload and + * the CPU. To measure the impact of pressure on both, we define two + * contention states for a resource: SOME and FULL. + * + * In the SOME state of a given resource, one or more tasks are + * delayed on that resource. This affects the workload's ability to + * perform work, but the CPU may still be executing other tasks. + * + * In the FULL state of a given resource, all non-idle tasks are + * delayed on that resource such that nobody is advancing and the CPU + * goes idle. This leaves both workload and CPU unproductive. + * + * (Naturally, the FULL state doesn't exist for the CPU resource.) + * + * SOME = nr_delayed_tasks != 0 + * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 + * + * The percentage of wallclock time spent in those compound stall + * states gives pressure numbers between 0 and 100 for each resource, + * where the SOME percentage indicates workload slowdowns and the FULL + * percentage indicates reduced CPU utilization: + * + * %SOME = time(SOME) / period + * %FULL = time(FULL) / period + * + * Multiple CPUs + * + * The more tasks and available CPUs there are, the more work can be + * performed concurrently. This means that the potential that can go + * unrealized due to resource contention *also* scales with non-idle + * tasks and CPUs. + * + * Consider a scenario where 257 number crunching tasks are trying to + * run concurrently on 256 CPUs. If we simply aggregated the task + * states, we would have to conclude a CPU SOME pressure number of + * 100%, since *somebody* is waiting on a runqueue at all + * times. However, that is clearly not the amount of contention the + * workload is experiencing: only one out of 256 possible exceution + * threads will be contended at any given time, or about 0.4%. + * + * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any + * given time *one* of the tasks is delayed due to a lack of memory. + * Again, looking purely at the task state would yield a memory FULL + * pressure number of 0%, since *somebody* is always making forward + * progress. But again this wouldn't capture the amount of execution + * potential lost, which is 1 out of 4 CPUs, or 25%. + * + * To calculate wasted potential (pressure) with multiple processors, + * we have to base our calculation on the number of non-idle tasks in + * conjunction with the number of available CPUs, which is the number + * of potential execution threads. SOME becomes then the proportion of + * delayed tasks to possibe threads, and FULL is the share of possible + * threads that are unproductive due to delays: + * + * threads = min(nr_nonidle_tasks, nr_cpus) + * SOME = min(nr_delayed_tasks / threads, 1) + * FULL = (threads - min(nr_running_tasks, threads)) / threads + * + * For the 257 number crunchers on 256 CPUs, this yields: + * + * threads = min(257, 256) + * SOME = min(1 / 256, 1) = 0.4% + * FULL = (256 - min(257, 256)) / 256 = 0% + * + * For the 1 out of 4 memory-delayed tasks, this yields: + * + * threads = min(4, 4) + * SOME = min(1 / 4, 1) = 25% + * FULL = (4 - min(3, 4)) / 4 = 25% + * + * [ Substitute nr_cpus with 1, and you can see that it's a natural + * extension of the single-CPU model. ] + * + * Implementation + * + * To assess the precise time spent in each such state, we would have + * to freeze the system on task changes and start/stop the state + * clocks accordingly. Obviously that doesn't scale in practice. + * + * Because the scheduler aims to distribute the compute load evenly + * among the available CPUs, we can track task state locally to each + * CPU and, at much lower frequency, extrapolate the global state for + * the cumulative stall times and the running averages. + * + * For each runqueue, we track: + * + * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) + * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) + * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) + * + * and then periodically aggregate: + * + * tNONIDLE = sum(tNONIDLE[i]) + * + * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE + * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE + * + * %SOME = tSOME / period + * %FULL = tFULL / period + * + * This gives us an approximation of pressure that is practical + * cost-wise, yet way more sensitive and accurate than periodic + * sampling of the aggregate task states would be. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "sched.h" + +static int psi_bug __read_mostly; + +bool psi_disabled __read_mostly; +core_param(psi_disabled, psi_disabled, bool, 0644); + +/* Running averages - we need to be higher-res than loadavg */ +#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ +#define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ +#define EXP_60s 1981 /* 1/exp(2s/60s) */ +#define EXP_300s 2034 /* 1/exp(2s/300s) */ + +/* Sampling frequency in nanoseconds */ +static u64 psi_period __read_mostly; + +/* System-level pressure and stall tracking */ +static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); +static struct psi_group psi_system = { + .pcpu = &system_group_pcpu, +}; + +static void psi_update_work(struct work_struct *work); + +static void group_init(struct psi_group *group) +{ + int cpu; + + for_each_possible_cpu(cpu) + seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); + group->next_update = sched_clock() + psi_period; + INIT_DELAYED_WORK(&group->clock_work, psi_update_work); + mutex_init(&group->stat_lock); +} + +void __init psi_init(void) +{ + if (psi_disabled) + return; + + psi_period = jiffies_to_nsecs(PSI_FREQ); + group_init(&psi_system); +} + +static bool test_state(unsigned int *tasks, enum psi_states state) +{ + switch (state) { + case PSI_IO_SOME: + return tasks[NR_IOWAIT]; + case PSI_IO_FULL: + return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; + case PSI_MEM_SOME: + return tasks[NR_MEMSTALL]; + case PSI_MEM_FULL: + return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; + case PSI_CPU_SOME: + return tasks[NR_RUNNING] > 1; + case PSI_NONIDLE: + return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || + tasks[NR_RUNNING]; + default: + return false; + } +} + +static void get_recent_times(struct psi_group *group, int cpu, u32 *times) +{ + struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); + unsigned int tasks[NR_PSI_TASK_COUNTS]; + u64 now, state_start; + unsigned int seq; + int s; + + /* Snapshot a coherent view of the CPU state */ + do { + seq = read_seqcount_begin(&groupc->seq); + now = cpu_clock(cpu); + memcpy(times, groupc->times, sizeof(groupc->times)); + memcpy(tasks, groupc->tasks, sizeof(groupc->tasks)); + state_start = groupc->state_start; + } while (read_seqcount_retry(&groupc->seq, seq)); + + /* Calculate state time deltas against the previous snapshot */ + for (s = 0; s < NR_PSI_STATES; s++) { + u32 delta; + /* + * In addition to already concluded states, we also + * incorporate currently active states on the CPU, + * since states may last for many sampling periods. + * + * This way we keep our delta sampling buckets small + * (u32) and our reported pressure close to what's + * actually happening. + */ + if (test_state(tasks, s)) + times[s] += now - state_start; + + delta = times[s] - groupc->times_prev[s]; + groupc->times_prev[s] = times[s]; + + times[s] = delta; + } +} + +static void calc_avgs(unsigned long avg[3], int missed_periods, + u64 time, u64 period) +{ + unsigned long pct; + + /* Fill in zeroes for periods of no activity */ + if (missed_periods) { + avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); + avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); + avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); + } + + /* Sample the most recent active period */ + pct = div_u64(time * 100, period); + pct *= FIXED_1; + avg[0] = calc_load(avg[0], EXP_10s, pct); + avg[1] = calc_load(avg[1], EXP_60s, pct); + avg[2] = calc_load(avg[2], EXP_300s, pct); +} + +static bool update_stats(struct psi_group *group) +{ + u64 deltas[NR_PSI_STATES - 1] = { 0, }; + unsigned long missed_periods = 0; + unsigned long nonidle_total = 0; + u64 now, expires, period; + int cpu; + int s; + + mutex_lock(&group->stat_lock); + + /* + * Collect the per-cpu time buckets and average them into a + * single time sample that is normalized to wallclock time. + * + * For averaging, each CPU is weighted by its non-idle time in + * the sampling period. This eliminates artifacts from uneven + * loading, or even entirely idle CPUs. + */ + for_each_possible_cpu(cpu) { + u32 times[NR_PSI_STATES]; + u32 nonidle; + + get_recent_times(group, cpu, times); + + nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); + nonidle_total += nonidle; + + for (s = 0; s < PSI_NONIDLE; s++) + deltas[s] += (u64)times[s] * nonidle; + } + + /* + * Integrate the sample into the running statistics that are + * reported to userspace: the cumulative stall times and the + * decaying averages. + * + * Pressure percentages are sampled at PSI_FREQ. We might be + * called more often when the user polls more frequently than + * that; we might be called less often when there is no task + * activity, thus no data, and clock ticks are sporadic. The + * below handles both. + */ + + /* total= */ + for (s = 0; s < NR_PSI_STATES - 1; s++) + group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL)); + + /* avgX= */ + now = sched_clock(); + expires = group->next_update; + if (now < expires) + goto out; + if (now - expires > psi_period) + missed_periods = div_u64(now - expires, psi_period); + + /* + * The periodic clock tick can get delayed for various + * reasons, especially on loaded systems. To avoid clock + * drift, we schedule the clock in fixed psi_period intervals. + * But the deltas we sample out of the per-cpu buckets above + * are based on the actual time elapsing between clock ticks. + */ + group->next_update = expires + ((1 + missed_periods) * psi_period); + period = now - (group->last_update + (missed_periods * psi_period)); + group->last_update = now; + + for (s = 0; s < NR_PSI_STATES - 1; s++) { + u32 sample; + + sample = group->total[s] - group->total_prev[s]; + /* + * Due to the lockless sampling of the time buckets, + * recorded time deltas can slip into the next period, + * which under full pressure can result in samples in + * excess of the period length. + * + * We don't want to report non-sensical pressures in + * excess of 100%, nor do we want to drop such events + * on the floor. Instead we punt any overage into the + * future until pressure subsides. By doing this we + * don't underreport the occurring pressure curve, we + * just report it delayed by one period length. + * + * The error isn't cumulative. As soon as another + * delta slips from a period P to P+1, by definition + * it frees up its time T in P. + */ + if (sample > period) + sample = period; + group->total_prev[s] += sample; + calc_avgs(group->avg[s], missed_periods, sample, period); + } +out: + mutex_unlock(&group->stat_lock); + return nonidle_total; +} + +static void psi_update_work(struct work_struct *work) +{ + struct delayed_work *dwork; + struct psi_group *group; + bool nonidle; + + dwork = to_delayed_work(work); + group = container_of(dwork, struct psi_group, clock_work); + + /* + * If there is task activity, periodically fold the per-cpu + * times and feed samples into the running averages. If things + * are idle and there is no data to process, stop the clock. + * Once restarted, we'll catch up the running averages in one + * go - see calc_avgs() and missed_periods. + */ + + nonidle = update_stats(group); + + if (nonidle) { + unsigned long delay = 0; + u64 now; + + now = sched_clock(); + if (group->next_update > now) + delay = nsecs_to_jiffies(group->next_update - now) + 1; + schedule_delayed_work(dwork, delay); + } +} + +static void record_times(struct psi_group_cpu *groupc, int cpu, + bool memstall_tick) +{ + u32 delta; + u64 now; + + now = cpu_clock(cpu); + delta = now - groupc->state_start; + groupc->state_start = now; + + if (test_state(groupc->tasks, PSI_IO_SOME)) { + groupc->times[PSI_IO_SOME] += delta; + if (test_state(groupc->tasks, PSI_IO_FULL)) + groupc->times[PSI_IO_FULL] += delta; + } + + if (test_state(groupc->tasks, PSI_MEM_SOME)) { + groupc->times[PSI_MEM_SOME] += delta; + if (test_state(groupc->tasks, PSI_MEM_FULL)) + groupc->times[PSI_MEM_FULL] += delta; + else if (memstall_tick) { + u32 sample; + /* + * Since we care about lost potential, a + * memstall is FULL when there are no other + * working tasks, but also when the CPU is + * actively reclaiming and nothing productive + * could run even if it were runnable. + * + * When the timer tick sees a reclaiming CPU, + * regardless of runnable tasks, sample a FULL + * tick (or less if it hasn't been a full tick + * since the last state change). + */ + sample = min(delta, (u32)jiffies_to_nsecs(1)); + groupc->times[PSI_MEM_FULL] += sample; + } + } + + if (test_state(groupc->tasks, PSI_CPU_SOME)) + groupc->times[PSI_CPU_SOME] += delta; + + if (test_state(groupc->tasks, PSI_NONIDLE)) + groupc->times[PSI_NONIDLE] += delta; +} + +static void psi_group_change(struct psi_group *group, int cpu, + unsigned int clear, unsigned int set) +{ + struct psi_group_cpu *groupc; + unsigned int t, m; + + groupc = per_cpu_ptr(group->pcpu, cpu); + + /* + * First we assess the aggregate resource states this CPU's + * tasks have been in since the last change, and account any + * SOME and FULL time these may have resulted in. + * + * Then we update the task counts according to the state + * change requested through the @clear and @set bits. + */ + write_seqcount_begin(&groupc->seq); + + record_times(groupc, cpu, false); + + for (t = 0, m = clear; m; m &= ~(1 << t), t++) { + if (!(m & (1 << t))) + continue; + if (groupc->tasks[t] == 0 && !psi_bug) { + printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n", + cpu, t, groupc->tasks[0], + groupc->tasks[1], groupc->tasks[2], + clear, set); + psi_bug = 1; + } + groupc->tasks[t]--; + } + + for (t = 0; set; set &= ~(1 << t), t++) + if (set & (1 << t)) + groupc->tasks[t]++; + + write_seqcount_end(&groupc->seq); + + if (!delayed_work_pending(&group->clock_work)) + schedule_delayed_work(&group->clock_work, PSI_FREQ); +} + +void psi_task_change(struct task_struct *task, int clear, int set) +{ + int cpu = task_cpu(task); + + if (!task->pid) + return; + + if (((task->psi_flags & set) || + (task->psi_flags & clear) != clear) && + !psi_bug) { + printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", + task->pid, task->comm, cpu, + task->psi_flags, clear, set); + psi_bug = 1; + } + + task->psi_flags &= ~clear; + task->psi_flags |= set; + + psi_group_change(&psi_system, cpu, clear, set); +} + +void psi_memstall_tick(struct task_struct *task, int cpu) +{ + struct psi_group_cpu *groupc; + + groupc = per_cpu_ptr(psi_system.pcpu, cpu); + write_seqcount_begin(&groupc->seq); + record_times(groupc, cpu, true); + write_seqcount_end(&groupc->seq); +} + +/** + * psi_memstall_enter - mark the beginning of a memory stall section + * @flags: flags to handle nested sections + * + * Marks the calling task as being stalled due to a lack of memory, + * such as waiting for a refault or performing reclaim. + */ +void psi_memstall_enter(unsigned long *flags) +{ + struct rq_flags rf; + struct rq *rq; + + if (psi_disabled) + return; + + *flags = current->flags & PF_MEMSTALL; + if (*flags) + return; + /* + * PF_MEMSTALL setting & accounting needs to be atomic wrt + * changes to the task's scheduling state, otherwise we can + * race with CPU migration. + */ + rq = this_rq_lock_irq(&rf); + + current->flags |= PF_MEMSTALL; + psi_task_change(current, 0, TSK_MEMSTALL); + + rq_unlock_irq(rq, &rf); +} + +/** + * psi_memstall_leave - mark the end of an memory stall section + * @flags: flags to handle nested memdelay sections + * + * Marks the calling task as no longer stalled due to lack of memory. + */ +void psi_memstall_leave(unsigned long *flags) +{ + struct rq_flags rf; + struct rq *rq; + + if (psi_disabled) + return; + + if (*flags) + return; + /* + * PF_MEMSTALL clearing & accounting needs to be atomic wrt + * changes to the task's scheduling state, otherwise we could + * race with CPU migration. + */ + rq = this_rq_lock_irq(&rf); + + current->flags &= ~PF_MEMSTALL; + psi_task_change(current, TSK_MEMSTALL, 0); + + rq_unlock_irq(rq, &rf); +} + +static int psi_show(struct seq_file *m, struct psi_group *group, + enum psi_res res) +{ + int full; + + if (psi_disabled) + return -EOPNOTSUPP; + + update_stats(group); + + for (full = 0; full < 2 - (res == PSI_CPU); full++) { + unsigned long avg[3]; + u64 total; + int w; + + for (w = 0; w < 3; w++) + avg[w] = group->avg[res * 2 + full][w]; + total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC); + + seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", + full ? "full" : "some", + LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), + LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), + LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), + total); + } + + return 0; +} + +static int psi_io_show(struct seq_file *m, void *v) +{ + return psi_show(m, &psi_system, PSI_IO); +} + +static int psi_memory_show(struct seq_file *m, void *v) +{ + return psi_show(m, &psi_system, PSI_MEM); +} + +static int psi_cpu_show(struct seq_file *m, void *v) +{ + return psi_show(m, &psi_system, PSI_CPU); +} + +static int psi_io_open(struct inode *inode, struct file *file) +{ + return single_open(file, psi_io_show, NULL); +} + +static int psi_memory_open(struct inode *inode, struct file *file) +{ + return single_open(file, psi_memory_show, NULL); +} + +static int psi_cpu_open(struct inode *inode, struct file *file) +{ + return single_open(file, psi_cpu_show, NULL); +} + +static const struct file_operations psi_io_fops = { + .open = psi_io_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations psi_memory_fops = { + .open = psi_memory_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations psi_cpu_fops = { + .open = psi_cpu_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init psi_proc_init(void) +{ + proc_mkdir("pressure", NULL); + proc_create("pressure/io", 0, NULL, &psi_io_fops); + proc_create("pressure/memory", 0, NULL, &psi_memory_fops); + proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops); + return 0; +} +module_init(psi_proc_init); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1de189bb9209..618577fc9aa8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -319,6 +320,7 @@ extern bool dl_cpu_busy(unsigned int cpu); #ifdef CONFIG_CGROUP_SCHED #include +#include struct cfs_rq; struct rt_rq; diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 8aea199a39b4..4904c4677000 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -55,6 +55,92 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt # define schedstat_val_or_zero(var) 0 #endif /* CONFIG_SCHEDSTATS */ +#ifdef CONFIG_PSI +/* + * PSI tracks state that persists across sleeps, such as iowaits and + * memory stalls. As a result, it has to distinguish between sleeps, + * where a task's runnable state changes, and requeues, where a task + * and its state are being moved between CPUs and runqueues. + */ +static inline void psi_enqueue(struct task_struct *p, bool wakeup) +{ + int clear = 0, set = TSK_RUNNING; + + if (psi_disabled) + return; + + if (!wakeup || p->sched_psi_wake_requeue) { + if (p->flags & PF_MEMSTALL) + set |= TSK_MEMSTALL; + if (p->sched_psi_wake_requeue) + p->sched_psi_wake_requeue = 0; + } else { + if (p->in_iowait) + clear |= TSK_IOWAIT; + } + + psi_task_change(p, clear, set); +} + +static inline void psi_dequeue(struct task_struct *p, bool sleep) +{ + int clear = TSK_RUNNING, set = 0; + + if (psi_disabled) + return; + + if (!sleep) { + if (p->flags & PF_MEMSTALL) + clear |= TSK_MEMSTALL; + } else { + if (p->in_iowait) + set |= TSK_IOWAIT; + } + + psi_task_change(p, clear, set); +} + +static inline void psi_ttwu_dequeue(struct task_struct *p) +{ + if (psi_disabled) + return; + /* + * Is the task being migrated during a wakeup? Make sure to + * deregister its sleep-persistent psi states from the old + * queue, and let psi_enqueue() know it has to requeue. + */ + if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) { + struct rq_flags rf; + struct rq *rq; + int clear = 0; + + if (p->in_iowait) + clear |= TSK_IOWAIT; + if (p->flags & PF_MEMSTALL) + clear |= TSK_MEMSTALL; + + rq = __task_rq_lock(p, &rf); + psi_task_change(p, clear, 0); + p->sched_psi_wake_requeue = 1; + __task_rq_unlock(rq, &rf); + } +} + +static inline void psi_task_tick(struct rq *rq) +{ + if (psi_disabled) + return; + + if (unlikely(rq->curr->flags & PF_MEMSTALL)) + psi_memstall_tick(rq->curr, cpu_of(rq)); +} +#else /* CONFIG_PSI */ +static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} +static inline void psi_dequeue(struct task_struct *p, bool sleep) {} +static inline void psi_ttwu_dequeue(struct task_struct *p) {} +static inline void psi_task_tick(struct rq *rq) {} +#endif /* CONFIG_PSI */ + #ifdef CONFIG_SCHED_INFO static inline void sched_info_reset_dequeued(struct task_struct *t) { diff --git a/mm/compaction.c b/mm/compaction.c index faca45ebe62d..7c607479de4a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "internal.h" #ifdef CONFIG_COMPACTION @@ -2068,11 +2069,15 @@ static int kcompactd(void *p) pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; while (!kthread_should_stop()) { + unsigned long pflags; + trace_mm_compaction_kcompactd_sleep(pgdat->node_id); wait_event_freezable(pgdat->kcompactd_wait, kcompactd_work_requested(pgdat)); + psi_memstall_enter(&pflags); kcompactd_do_work(pgdat); + psi_memstall_leave(&pflags); } return 0; diff --git a/mm/filemap.c b/mm/filemap.c index 01a841f17bf4..41586009fa42 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "internal.h" #define CREATE_TRACE_POINTS @@ -1075,11 +1076,14 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, struct wait_page_queue wait_page; wait_queue_entry_t *wait = &wait_page.wait; bool thrashing = false; + unsigned long pflags; int ret = 0; - if (bit_nr == PG_locked && !PageSwapBacked(page) && + if (bit_nr == PG_locked && !PageUptodate(page) && PageWorkingset(page)) { - delayacct_thrashing_start(); + if (!PageSwapBacked(page)) + delayacct_thrashing_start(); + psi_memstall_enter(&pflags); thrashing = true; } @@ -1121,8 +1125,11 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, finish_wait(q, wait); - if (thrashing) - delayacct_thrashing_end(); + if (thrashing) { + if (!PageSwapBacked(page)) + delayacct_thrashing_end(); + psi_memstall_leave(&pflags); + } /* * A signal could leave PageWaiters set. Clearing it here if diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 20f25d06c00c..f97b5a1700a4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include @@ -3549,15 +3550,20 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, enum compact_priority prio, enum compact_result *compact_result) { struct page *page; + unsigned long pflags; unsigned int noreclaim_flag; if (!order) return NULL; + psi_memstall_enter(&pflags); noreclaim_flag = memalloc_noreclaim_save(); + *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, prio); + memalloc_noreclaim_restore(noreclaim_flag); + psi_memstall_leave(&pflags); if (*compact_result <= COMPACT_INACTIVE) return NULL; @@ -3756,11 +3762,13 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct reclaim_state reclaim_state; int progress; unsigned int noreclaim_flag; + unsigned long pflags; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); + psi_memstall_enter(&pflags); fs_reclaim_acquire(gfp_mask); noreclaim_flag = memalloc_noreclaim_save(); reclaim_state.reclaimed_slab = 0; @@ -3772,6 +3780,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, current->reclaim_state = NULL; memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(gfp_mask); + psi_memstall_leave(&pflags); cond_resched(); diff --git a/mm/vmscan.c b/mm/vmscan.c index 87e9fef341d2..8ea87586925e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -3305,6 +3306,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, { struct zonelist *zonelist; unsigned long nr_reclaimed; + unsigned long pflags; int nid; unsigned int noreclaim_flag; struct scan_control sc = { @@ -3333,9 +3335,13 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, sc.gfp_mask, sc.reclaim_idx); + psi_memstall_enter(&pflags); noreclaim_flag = memalloc_noreclaim_save(); + nr_reclaimed = do_try_to_free_pages(zonelist, &sc); + memalloc_noreclaim_restore(noreclaim_flag); + psi_memstall_leave(&pflags); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); @@ -3500,6 +3506,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) int i; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; + unsigned long pflags; struct zone *zone; struct scan_control sc = { .gfp_mask = GFP_KERNEL, @@ -3510,6 +3517,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) .may_swap = 1, }; + psi_memstall_enter(&pflags); __fs_reclaim_acquire(); count_vm_event(PAGEOUTRUN); @@ -3611,6 +3619,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) out: snapshot_refaults(NULL, pgdat); __fs_reclaim_release(); + psi_memstall_leave(&pflags); /* * Return the order kswapd stopped reclaiming at as * prepare_kswapd_sleep() takes it into account. If another caller -- cgit v1.2.3 From 2ce7135adc9ad081aa3c49744144376ac74fea60 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 26 Oct 2018 15:06:31 -0700 Subject: psi: cgroup support On a system that executes multiple cgrouped jobs and independent workloads, we don't just care about the health of the overall system, but also that of individual jobs, so that we can ensure individual job health, fairness between jobs, or prioritize some jobs over others. This patch implements pressure stall tracking for cgroups. In kernels with CONFIG_PSI=y, cgroup2 groups will have cpu.pressure, memory.pressure, and io.pressure files that track aggregate pressure stall times for only the tasks inside the cgroup. Link: http://lkml.kernel.org/r/20180828172258.3185-10-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Tejun Heo Acked-by: Peter Zijlstra (Intel) Tested-by: Daniel Drake Tested-by: Suren Baghdasaryan Cc: Christopher Lameter Cc: Ingo Molnar Cc: Johannes Weiner Cc: Mike Galbraith Cc: Peter Enderborg Cc: Randy Dunlap Cc: Shakeel Butt Cc: Vinayak Menon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/accounting/psi.txt | 9 +++ Documentation/admin-guide/cgroup-v2.rst | 18 +++++ include/linux/cgroup-defs.h | 4 ++ include/linux/cgroup.h | 15 ++++ include/linux/psi.h | 25 +++++++ init/Kconfig | 4 ++ kernel/cgroup/cgroup.c | 45 +++++++++++- kernel/sched/psi.c | 118 +++++++++++++++++++++++++++++--- 8 files changed, 228 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt index 3753a82f1cf5..b8ca28b60215 100644 --- a/Documentation/accounting/psi.txt +++ b/Documentation/accounting/psi.txt @@ -62,3 +62,12 @@ well as medium and long term trends. The total absolute stall time is tracked and exported as well, to allow detection of latency spikes which wouldn't necessarily make a dent in the time averages, or to average trends over custom time frames. + +Cgroup2 interface +================= + +In a system with a CONFIG_CGROUP=y kernel and the cgroup2 filesystem +mounted, pressure stall information is also tracked for tasks grouped +into cgroups. Each subdirectory in the cgroupfs mountpoint contains +cpu.pressure, memory.pressure, and io.pressure files; the format is +the same as the /proc/pressure/ files. diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index caf36105a1c7..8389d6f72a77 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -966,6 +966,12 @@ All time durations are in microseconds. $PERIOD duration. "max" for $MAX indicates no limit. If only one number is written, $MAX is updated. + cpu.pressure + A read-only nested-key file which exists on non-root cgroups. + + Shows pressure stall information for CPU. See + Documentation/accounting/psi.txt for details. + Memory ------ @@ -1271,6 +1277,12 @@ PAGE_SIZE multiple when read back. higher than the limit for an extended period of time. This reduces the impact on the workload and memory management. + memory.pressure + A read-only nested-key file which exists on non-root cgroups. + + Shows pressure stall information for memory. See + Documentation/accounting/psi.txt for details. + Usage Guidelines ~~~~~~~~~~~~~~~~ @@ -1408,6 +1420,12 @@ IO Interface Files 8:16 rbps=2097152 wbps=max riops=max wiops=max + io.pressure + A read-only nested-key file which exists on non-root cgroups. + + Shows pressure stall information for IO. See + Documentation/accounting/psi.txt for details. + Writeback ~~~~~~~~~ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 22254c1fe1c5..5e1694fe035b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef CONFIG_CGROUPS @@ -436,6 +437,9 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; + /* used to track pressure stalls */ + struct psi_group psi; + /* used to store eBPF programs */ struct cgroup_bpf bpf; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b622d6608605..9968332cceed 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -650,6 +650,11 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) +{ + return &cgrp->psi; +} + static inline void cgroup_init_kthreadd(void) { /* @@ -703,6 +708,16 @@ static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) return NULL; } +static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) +{ + return NULL; +} + +static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) +{ + return NULL; +} + static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) { diff --git a/include/linux/psi.h b/include/linux/psi.h index b0daf050de58..8e0725aac0aa 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -4,6 +4,9 @@ #include #include +struct seq_file; +struct css_set; + #ifdef CONFIG_PSI extern bool psi_disabled; @@ -16,6 +19,14 @@ void psi_memstall_tick(struct task_struct *task, int cpu); void psi_memstall_enter(unsigned long *flags); void psi_memstall_leave(unsigned long *flags); +int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res); + +#ifdef CONFIG_CGROUPS +int psi_cgroup_alloc(struct cgroup *cgrp); +void psi_cgroup_free(struct cgroup *cgrp); +void cgroup_move_task(struct task_struct *p, struct css_set *to); +#endif + #else /* CONFIG_PSI */ static inline void psi_init(void) {} @@ -23,6 +34,20 @@ static inline void psi_init(void) {} static inline void psi_memstall_enter(unsigned long *flags) {} static inline void psi_memstall_leave(unsigned long *flags) {} +#ifdef CONFIG_CGROUPS +static inline int psi_cgroup_alloc(struct cgroup *cgrp) +{ + return 0; +} +static inline void psi_cgroup_free(struct cgroup *cgrp) +{ +} +static inline void cgroup_move_task(struct task_struct *p, struct css_set *to) +{ + rcu_assign_pointer(p->cgroups, to); +} +#endif + #endif /* CONFIG_PSI */ #endif /* _LINUX_PSI_H */ diff --git a/init/Kconfig b/init/Kconfig index 26e639df5517..a4112e95724a 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -501,6 +501,10 @@ config PSI the share of walltime in which some or all tasks in the system are delayed due to contention of the respective resource. + In kernels with cgroup support, cgroups (cgroup2 only) will + have cpu.pressure, memory.pressure, and io.pressure files, + which aggregate pressure stalls for the grouped tasks only. + For more details see Documentation/accounting/psi.txt. Say N if unsure. diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 4c1cf0969a80..8b79318810ad 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -862,7 +863,7 @@ static void css_set_move_task(struct task_struct *task, */ WARN_ON_ONCE(task->flags & PF_EXITING); - rcu_assign_pointer(task->cgroups, to_cset); + cgroup_move_task(task, to_cset); list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks : &to_cset->tasks); } @@ -3446,6 +3447,21 @@ static int cpu_stat_show(struct seq_file *seq, void *v) return ret; } +#ifdef CONFIG_PSI +static int cgroup_io_pressure_show(struct seq_file *seq, void *v) +{ + return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_IO); +} +static int cgroup_memory_pressure_show(struct seq_file *seq, void *v) +{ + return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_MEM); +} +static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v) +{ + return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU); +} +#endif + static int cgroup_file_open(struct kernfs_open_file *of) { struct cftype *cft = of->kn->priv; @@ -4576,6 +4592,23 @@ static struct cftype cgroup_base_files[] = { .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cpu_stat_show, }, +#ifdef CONFIG_PSI + { + .name = "io.pressure", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_io_pressure_show, + }, + { + .name = "memory.pressure", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_memory_pressure_show, + }, + { + .name = "cpu.pressure", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_cpu_pressure_show, + }, +#endif { } /* terminate */ }; @@ -4636,6 +4669,7 @@ static void css_free_rwork_fn(struct work_struct *work) */ cgroup_put(cgroup_parent(cgrp)); kernfs_put(cgrp->kn); + psi_cgroup_free(cgrp); if (cgroup_on_dfl(cgrp)) cgroup_rstat_exit(cgrp); kfree(cgrp); @@ -4892,10 +4926,15 @@ static struct cgroup *cgroup_create(struct cgroup *parent) cgrp->self.parent = &parent->self; cgrp->root = root; cgrp->level = level; - ret = cgroup_bpf_inherit(cgrp); + + ret = psi_cgroup_alloc(cgrp); if (ret) goto out_idr_free; + ret = cgroup_bpf_inherit(cgrp); + if (ret) + goto out_psi_free; + for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { cgrp->ancestor_ids[tcgrp->level] = tcgrp->id; @@ -4933,6 +4972,8 @@ static struct cgroup *cgroup_create(struct cgroup *parent) return cgrp; +out_psi_free: + psi_cgroup_free(cgrp); out_idr_free: cgroup_idr_remove(&root->cgroup_idr, cgrp->id); out_stat_exit: diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 595414599b98..7cdecfc010af 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -473,9 +473,35 @@ static void psi_group_change(struct psi_group *group, int cpu, schedule_delayed_work(&group->clock_work, PSI_FREQ); } +static struct psi_group *iterate_groups(struct task_struct *task, void **iter) +{ +#ifdef CONFIG_CGROUPS + struct cgroup *cgroup = NULL; + + if (!*iter) + cgroup = task->cgroups->dfl_cgrp; + else if (*iter == &psi_system) + return NULL; + else + cgroup = cgroup_parent(*iter); + + if (cgroup && cgroup_parent(cgroup)) { + *iter = cgroup; + return cgroup_psi(cgroup); + } +#else + if (*iter) + return NULL; +#endif + *iter = &psi_system; + return &psi_system; +} + void psi_task_change(struct task_struct *task, int clear, int set) { int cpu = task_cpu(task); + struct psi_group *group; + void *iter = NULL; if (!task->pid) return; @@ -492,17 +518,23 @@ void psi_task_change(struct task_struct *task, int clear, int set) task->psi_flags &= ~clear; task->psi_flags |= set; - psi_group_change(&psi_system, cpu, clear, set); + while ((group = iterate_groups(task, &iter))) + psi_group_change(group, cpu, clear, set); } void psi_memstall_tick(struct task_struct *task, int cpu) { - struct psi_group_cpu *groupc; + struct psi_group *group; + void *iter = NULL; - groupc = per_cpu_ptr(psi_system.pcpu, cpu); - write_seqcount_begin(&groupc->seq); - record_times(groupc, cpu, true); - write_seqcount_end(&groupc->seq); + while ((group = iterate_groups(task, &iter))) { + struct psi_group_cpu *groupc; + + groupc = per_cpu_ptr(group->pcpu, cpu); + write_seqcount_begin(&groupc->seq); + record_times(groupc, cpu, true); + write_seqcount_end(&groupc->seq); + } } /** @@ -565,8 +597,78 @@ void psi_memstall_leave(unsigned long *flags) rq_unlock_irq(rq, &rf); } -static int psi_show(struct seq_file *m, struct psi_group *group, - enum psi_res res) +#ifdef CONFIG_CGROUPS +int psi_cgroup_alloc(struct cgroup *cgroup) +{ + if (psi_disabled) + return 0; + + cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); + if (!cgroup->psi.pcpu) + return -ENOMEM; + group_init(&cgroup->psi); + return 0; +} + +void psi_cgroup_free(struct cgroup *cgroup) +{ + if (psi_disabled) + return; + + cancel_delayed_work_sync(&cgroup->psi.clock_work); + free_percpu(cgroup->psi.pcpu); +} + +/** + * cgroup_move_task - move task to a different cgroup + * @task: the task + * @to: the target css_set + * + * Move task to a new cgroup and safely migrate its associated stall + * state between the different groups. + * + * This function acquires the task's rq lock to lock out concurrent + * changes to the task's scheduling state and - in case the task is + * running - concurrent changes to its stall state. + */ +void cgroup_move_task(struct task_struct *task, struct css_set *to) +{ + bool move_psi = !psi_disabled; + unsigned int task_flags = 0; + struct rq_flags rf; + struct rq *rq; + + if (move_psi) { + rq = task_rq_lock(task, &rf); + + if (task_on_rq_queued(task)) + task_flags = TSK_RUNNING; + else if (task->in_iowait) + task_flags = TSK_IOWAIT; + + if (task->flags & PF_MEMSTALL) + task_flags |= TSK_MEMSTALL; + + if (task_flags) + psi_task_change(task, task_flags, 0); + } + + /* + * Lame to do this here, but the scheduler cannot be locked + * from the outside, so we move cgroups from inside sched/. + */ + rcu_assign_pointer(task->cgroups, to); + + if (move_psi) { + if (task_flags) + psi_task_change(task, 0, task_flags); + + task_rq_unlock(rq, task, &rf); + } +} +#endif /* CONFIG_CGROUPS */ + +int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) { int full; -- cgit v1.2.3 From 966cf44f637e6aeea7e3d01ba004bf8b5beac78f Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 26 Oct 2018 15:07:52 -0700 Subject: mm: defer ZONE_DEVICE page initialization to the point where we init pgmap The ZONE_DEVICE pages were being initialized in two locations. One was with the memory_hotplug lock held and another was outside of that lock. The problem with this is that it was nearly doubling the memory initialization time. Instead of doing this twice, once while holding a global lock and once without, I am opting to defer the initialization to the one outside of the lock. This allows us to avoid serializing the overhead for memory init and we can instead focus on per-node init times. One issue I encountered is that devm_memremap_pages and hmm_devmmem_pages_create were initializing only the pgmap field the same way. One wasn't initializing hmm_data, and the other was initializing it to a poison value. Since this is something that is exposed to the driver in the case of hmm I am opting for a third option and just initializing hmm_data to 0 since this is going to be exposed to unknown third party drivers. [alexander.h.duyck@linux.intel.com: fix reference count for pgmap in devm_memremap_pages] Link: http://lkml.kernel.org/r/20181008233404.1909.37302.stgit@localhost.localdomain Link: http://lkml.kernel.org/r/20180925202053.3576.66039.stgit@localhost.localdomain Signed-off-by: Alexander Duyck Reviewed-by: Pavel Tatashin Tested-by: Dan Williams Cc: Dave Hansen Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 ++ kernel/memremap.c | 25 ++++++--------- mm/hmm.c | 12 ++++--- mm/page_alloc.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 108 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/include/linux/mm.h b/include/linux/mm.h index 737279bb479c..33228a49d7d2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -848,6 +848,8 @@ static inline bool is_zone_device_page(const struct page *page) { return page_zonenum(page) == ZONE_DEVICE; } +extern void memmap_init_zone_device(struct zone *, unsigned long, + unsigned long, struct dev_pagemap *); #else static inline bool is_zone_device_page(const struct page *page) { diff --git a/kernel/memremap.c b/kernel/memremap.c index 5b8600d39931..620fc4d2559a 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -175,10 +175,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) struct vmem_altmap *altmap = pgmap->altmap_valid ? &pgmap->altmap : NULL; struct resource *res = &pgmap->res; - unsigned long pfn, pgoff, order; + struct dev_pagemap *conflict_pgmap; pgprot_t pgprot = PAGE_KERNEL; + unsigned long pgoff, order; int error, nid, is_ram; - struct dev_pagemap *conflict_pgmap; align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) @@ -256,19 +256,14 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) if (error) goto err_add_memory; - for_each_device_pfn(pfn, pgmap) { - struct page *page = pfn_to_page(pfn); - - /* - * ZONE_DEVICE pages union ->lru with a ->pgmap back - * pointer. It is a bug if a ZONE_DEVICE page is ever - * freed or placed on a driver-private list. Seed the - * storage with LIST_POISON* values. - */ - list_del(&page->lru); - page->pgmap = pgmap; - percpu_ref_get(pgmap->ref); - } + /* + * Initialization of the pages has been deferred until now in order + * to allow us to do the work while not holding the hotplug lock. + */ + memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], + align_start >> PAGE_SHIFT, + align_size >> PAGE_SHIFT, pgmap); + percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); devm_add_action(dev, devm_memremap_pages_release, pgmap); diff --git a/mm/hmm.c b/mm/hmm.c index c968e49f7a0c..774d684fa2b4 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -1024,7 +1024,6 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) resource_size_t key, align_start, align_size, align_end; struct device *device = devmem->device; int ret, nid, is_ram; - unsigned long pfn; align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); align_size = ALIGN(devmem->resource->start + @@ -1109,11 +1108,14 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) align_size >> PAGE_SHIFT, NULL); mem_hotplug_done(); - for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { - struct page *page = pfn_to_page(pfn); + /* + * Initialization of the pages has been deferred until now in order + * to allow us to do the work while not holding the hotplug lock. + */ + memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], + align_start >> PAGE_SHIFT, + align_size >> PAGE_SHIFT, &devmem->pagemap); - page->pgmap = &devmem->pagemap; - } return 0; error_add_memory: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cee1abf85d72..d73ff2188d72 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5465,12 +5465,23 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, if (highest_memmap_pfn < end_pfn - 1) highest_memmap_pfn = end_pfn - 1; +#ifdef CONFIG_ZONE_DEVICE /* * Honor reservation requested by the driver for this ZONE_DEVICE - * memory + * memory. We limit the total number of pages to initialize to just + * those that might contain the memory mapping. We will defer the + * ZONE_DEVICE page initialization until after we have released + * the hotplug lock. */ - if (altmap && start_pfn == altmap->base_pfn) - start_pfn += altmap->reserve; + if (zone == ZONE_DEVICE) { + if (!altmap) + return; + + if (start_pfn == altmap->base_pfn) + start_pfn += altmap->reserve; + end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); + } +#endif for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* @@ -5537,6 +5548,81 @@ not_early: } } +#ifdef CONFIG_ZONE_DEVICE +void __ref memmap_init_zone_device(struct zone *zone, + unsigned long start_pfn, + unsigned long size, + struct dev_pagemap *pgmap) +{ + unsigned long pfn, end_pfn = start_pfn + size; + struct pglist_data *pgdat = zone->zone_pgdat; + unsigned long zone_idx = zone_idx(zone); + unsigned long start = jiffies; + int nid = pgdat->node_id; + + if (WARN_ON_ONCE(!pgmap || !is_dev_zone(zone))) + return; + + /* + * The call to memmap_init_zone should have already taken care + * of the pages reserved for the memmap, so we can just jump to + * the end of that region and start processing the device pages. + */ + if (pgmap->altmap_valid) { + struct vmem_altmap *altmap = &pgmap->altmap; + + start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); + size = end_pfn - start_pfn; + } + + for (pfn = start_pfn; pfn < end_pfn; pfn++) { + struct page *page = pfn_to_page(pfn); + + __init_single_page(page, pfn, zone_idx, nid); + + /* + * Mark page reserved as it will need to wait for onlining + * phase for it to be fully associated with a zone. + * + * We can use the non-atomic __set_bit operation for setting + * the flag as we are still initializing the pages. + */ + __SetPageReserved(page); + + /* + * ZONE_DEVICE pages union ->lru with a ->pgmap back + * pointer and hmm_data. It is a bug if a ZONE_DEVICE + * page is ever freed or placed on a driver-private list. + */ + page->pgmap = pgmap; + page->hmm_data = 0; + + /* + * Mark the block movable so that blocks are reserved for + * movable at startup. This will force kernel allocations + * to reserve their blocks rather than leaking throughout + * the address space during boot when many long-lived + * kernel allocations are made. + * + * bitmap is created for zone's valid pfn range. but memmap + * can be created for invalid pages (for alignment) + * check here not to call set_pageblock_migratetype() against + * pfn out of zone. + * + * Please note that MEMMAP_HOTPLUG path doesn't clear memmap + * because this is done early in sparse_add_one_section + */ + if (!(pfn & (pageblock_nr_pages - 1))) { + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + cond_resched(); + } + } + + pr_info("%s initialised, %lu pages in %ums\n", dev_name(pgmap->dev), + size, jiffies_to_msecs(jiffies - start)); +} + +#endif static void __meminit zone_init_free_lists(struct zone *zone) { unsigned int order, t; -- cgit v1.2.3 From da387e5c930f43d9f3b011a6fbb33bdf43d9714c Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 17 Oct 2018 09:51:43 +0300 Subject: tracing: Export trace_dump_stack to modules There is no reason for this function to be unexprted and it's a useful debugging aid. Link: http://lkml.kernel.org/r/1539759103-5923-1-git-send-email-nborisov@suse.com Signed-off-by: Nikolay Borisov Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ec5b21778806..ff1c4b20cd0a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2727,6 +2727,7 @@ void trace_dump_stack(int skip) __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, skip, preempt_count(), NULL); } +EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); -- cgit v1.2.3 From a2acce536921bd793bae13fa344fcea157638e72 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 15 Oct 2018 23:14:28 -0400 Subject: tracing: Have stack tracer trace full stack The stack tracer traces every function call checking the current stack (in non interrupt context), looking for the deepest stack, and saving it when it finds a new max depth. The problem is that it calls save_stack_trace(), and with the new ORC unwinder, it can skip too much. As it looks at the ip of the function call in the backtrace to find where it should start, it doesn't need to skip anything. The stack trace selftest would fail when the kernel was complied with the ORC UNDWINDER enabled. Without skipping functions when doing the stack trace, it now passes again. Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_stack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4237eba4ef20..2b0d1ee3241c 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -111,7 +111,7 @@ check_stack(unsigned long ip, unsigned long *stack) stack_trace_max_size = this_size; stack_trace_max.nr_entries = 0; - stack_trace_max.skip = 3; + stack_trace_max.skip = 0; save_stack_trace(&stack_trace_max); -- cgit v1.2.3 From 18858511fd8a877303cc34c06efa461b26a0e070 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 22 Oct 2018 00:08:20 +0900 Subject: tracing: Return -ENOENT if there is no target synthetic event Return -ENOENT error if there is no target synthetic event. This notices an operation failure to user as below; # echo 'wakeup_latency u64 lat; pid_t pid;' > synthetic_events # echo '!wakeup' >> synthetic_events sh: write error: No such file or directory Link: http://lkml.kernel.org/r/154013449986.25576.9487131386597290172.stgit@devbox Acked-by: Tom Zanussi Tested-by: Tom Zanussi Cc: Shuah Khan Cc: Rajvi Jingar Cc: stable@vger.kernel.org Fixes: 4b147936fa50 ('tracing: Add support for 'synthetic' events') Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_hist.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index d239004aaf29..eb908ef2ecec 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1063,8 +1063,10 @@ static int create_synth_event(int argc, char **argv) event = NULL; ret = -EEXIST; goto out; - } else if (delete_event) + } else if (delete_event) { + ret = -ENOENT; goto out; + } if (argc < 2) { ret = -EINVAL; -- cgit v1.2.3 From a68d75081aeccfb169575bea6f452a5a12b9f49b Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Sat, 27 Oct 2018 11:05:17 +0800 Subject: sched/rt: Update comment in pick_next_task_rt() Commit: f4ebcbc0d7e0 ("sched/rt: Substract number of tasks of throttled queues from rq->nr_running") added a new rt_rq->rt_queued field, which is used to indicate the status of rq->rt enqueue or dequeue. So, the ->rt_nr_running check was removed and we now check ->rt_queued instead. Fix the comment in pick_next_task_rt() as well, which was still referencing the old logic. Signed-off-by: Muchun Song Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20181027030517.23292-1-smuchun@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/rt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2e2955a8cf8f..a21ea6021929 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1561,7 +1561,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* * We may dequeue prev's rt_rq in put_prev_task(). - * So, we update time before rt_nr_running check. + * So, we update time before rt_queued check. */ if (prev->sched_class == &rt_sched_class) update_curr_rt(rq); -- cgit v1.2.3 From 28fa741c27e6d57f6bf594ba3c444ce79e671e09 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 29 Oct 2018 23:32:11 +0000 Subject: perf/core: Clean up inconsisent indentation Replace a bunch of spaces with tab, cleans up indentation Signed-off-by: Colin Ian King Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-janitors@vger.kernel.org Link: http://lkml.kernel.org/r/20181029233211.21475-1-colin.king@canonical.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 5a97f34bc14c..65e90c752a91 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -750,7 +750,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) /* * Do not update time when cgroup is not active */ - if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) + if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) __update_cgrp_time(event->cgrp); } -- cgit v1.2.3 From ea6f650465c61c70b4c96648ed2cb8a0c55db337 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Tue, 30 Oct 2018 15:04:51 -0700 Subject: kernel/fail_function.c: remove meaningless null pointer check before debugfs_remove_recursive debugfs_remove_recursive() has taken the null pointer into account. just remove the null check before debugfs_remove_recursive(). Link: http://lkml.kernel.org/r/1537494404-16473-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang Acked-by: Masami Hiramatsu Acked-by: Kees Cook Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fail_function.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/fail_function.c b/kernel/fail_function.c index bc80a4e268c0..17f75b545f66 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -173,8 +173,7 @@ static void fei_debugfs_remove_attr(struct fei_attr *attr) struct dentry *dir; dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir); - if (dir) - debugfs_remove_recursive(dir); + debugfs_remove_recursive(dir); } static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) -- cgit v1.2.3 From 2e58f57d859fbe0ce79e052d7a63bba32d5786e8 Mon Sep 17 00:00:00 2001 From: Weikang Shi Date: Tue, 30 Oct 2018 15:07:05 -0700 Subject: kernel/signal.c: fix a comment error Because get_signal_to_deliver() was renamed to get_signal() the comment should be fixed. Link: http://lkml.kernel.org/r/1539179128-45709-1-git-send-email-swkhack@gmail.com Signed-off-by: Weikang Shi Reported-by: Christian Brauner Cc: Eric W. Biederman Cc: Oleg Nesterov Cc: Anna-Maria Gleixner Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/signal.c | 4 ++-- kernel/signal.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 9a960829a01d..99099f73b207 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -344,10 +344,10 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) get_signal(&ksig); /* - * get_signal_to_deliver() may have run a debugger (via notify_parent()) + * get_signal() may have run a debugger (via notify_parent()) * and the debugger may have modified the state (e.g., to arrange for an * inferior call), thus it's important to check for restarting _after_ - * get_signal_to_deliver(). + * get_signal(). */ if ((long) scr->pt.r10 != -1) /* diff --git a/kernel/signal.c b/kernel/signal.c index 17565240b1c6..9a32bc2088c9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -892,7 +892,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) /* * The first thread which returns from do_signal_stop() * will take ->siglock, notice SIGNAL_CLD_MASK, and - * notify its parent. See get_signal_to_deliver(). + * notify its parent. See get_signal(). */ signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); signal->group_stop_count = 0; -- cgit v1.2.3 From 95c4fb78fb23081472465ca20d5d31c4b780ed82 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 30 Oct 2018 15:07:13 -0700 Subject: kernel/panic.c: do not append newline to the stack protector panic string ... because panic() itself already does this. Otherwise you have line-broken trailer: [ 1.836965] ---[ end Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: pgd_alloc+0x29e/0x2a0 [ 1.836965] ]--- Link: http://lkml.kernel.org/r/20181008202901.7894-1-bp@alien8.de Signed-off-by: Borislav Petkov Acked-by: Kees Cook Cc: Masahiro Yamada Cc: "Steven Rostedt (VMware)" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/panic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 8b2e002d52eb..837a94b7024d 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -631,7 +631,7 @@ device_initcall(register_warn_debugfs); */ __visible void __stack_chk_fail(void) { - panic("stack-protector: Kernel stack is corrupted in: %pB\n", + panic("stack-protector: Kernel stack is corrupted in: %pB", __builtin_return_address(0)); } EXPORT_SYMBOL(__stack_chk_fail); -- cgit v1.2.3 From b49dec1cf8ff1e0b204dd2c30b95a92d75591146 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 30 Oct 2018 15:07:17 -0700 Subject: kernel/panic.c: filter out a potential trailing newline If a call to panic() terminates the string with a \n , the result puts the closing brace ']---' on a newline because panic() itself adds \n too. Now, if one goes and removes the newline chars from all panic() invocations - and the stats right now look like this: ~300 calls with a \n ~500 calls without a \n one is destined to a neverending game of whack-a-mole because the usual thing to do is add a newline at the end of a string a function is supposed to print. Therefore, simply zap any \n at the end of the panic string to avoid touching so many places in the kernel. Link: http://lkml.kernel.org/r/20181009205019.2786-1-bp@alien8.de Signed-off-by: Borislav Petkov Acked-by: Kees Cook Reviewed-by: Steven Rostedt (VMware) Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/panic.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 837a94b7024d..f6d549a29a5c 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -136,7 +136,7 @@ void panic(const char *fmt, ...) { static char buf[1024]; va_list args; - long i, i_next = 0; + long i, i_next = 0, len; int state = 0; int old_cpu, this_cpu; bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; @@ -173,8 +173,12 @@ void panic(const char *fmt, ...) console_verbose(); bust_spinlocks(1); va_start(args, fmt); - vsnprintf(buf, sizeof(buf), fmt, args); + len = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); + + if (len && buf[len - 1] == '\n') + buf[len - 1] = '\0'; + pr_emerg("Kernel panic - not syncing: %s\n", buf); #ifdef CONFIG_DEBUG_BUGVERBOSE /* -- cgit v1.2.3 From 6a32c2469c3fbfee8f25bcd20af647326650a6cf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 30 Oct 2018 15:07:32 -0700 Subject: kbuild: fix kernel/bounds.c 'W=1' warning Building any configuration with 'make W=1' produces a warning: kernel/bounds.c:16:6: warning: no previous prototype for 'foo' [-Wmissing-prototypes] When also passing -Werror, this prevents us from building any other files. Nobody ever calls the function, but we can't make it 'static' either since we want the compiler output. Calling it 'main' instead however avoids the warning, because gcc does not insist on having a declaration for main. Link: http://lkml.kernel.org/r/20181005083313.2088252-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Reported-by: Kieran Bingham Reviewed-by: Kieran Bingham Cc: David Laight Cc: Masahiro Yamada Cc: Greg Kroah-Hartman Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/bounds.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bounds.c b/kernel/bounds.c index c373e887c066..9795d75b09b2 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -13,7 +13,7 @@ #include #include -void foo(void) +int main(void) { /* The enum constants to put into include/generated/bounds.h */ DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); @@ -23,4 +23,6 @@ void foo(void) #endif DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); /* End of constants */ + + return 0; } -- cgit v1.2.3 From eb31d559f1e8390195372cd51cfb198da8bc84b9 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 30 Oct 2018 15:08:04 -0700 Subject: memblock: remove _virt from APIs returning virtual address The conversion is done using sed -i 's@memblock_virt_alloc@memblock_alloc@g' \ $(git grep -l memblock_virt_alloc) Link: http://lkml.kernel.org/r/1536927045-23536-8-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Cc: Catalin Marinas Cc: Chris Zankel Cc: "David S. Miller" Cc: Geert Uytterhoeven Cc: Greentime Hu Cc: Greg Kroah-Hartman Cc: Guan Xuetao Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jonas Bonn Cc: Jonathan Corbet Cc: Ley Foon Tan Cc: Mark Salter Cc: Martin Schwidefsky Cc: Matt Turner Cc: Michael Ellerman Cc: Michal Hocko Cc: Michal Simek Cc: Palmer Dabbelt Cc: Paul Burton Cc: Richard Kuo Cc: Richard Weinberger Cc: Rich Felker Cc: Russell King Cc: Serge Semin Cc: Thomas Gleixner Cc: Tony Luck Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/setup.c | 4 ++-- arch/arm/mach-omap2/omap_hwmod.c | 6 ++--- arch/arm64/mm/kasan_init.c | 2 +- arch/arm64/mm/numa.c | 2 +- arch/mips/kernel/setup.c | 2 +- arch/powerpc/kernel/pci_32.c | 2 +- arch/powerpc/lib/alloc.c | 2 +- arch/powerpc/mm/mmu_context_nohash.c | 6 ++--- arch/powerpc/platforms/powermac/nvram.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 6 ++--- arch/powerpc/platforms/ps3/setup.c | 2 +- arch/powerpc/sysdev/msi_bitmap.c | 2 +- arch/s390/kernel/setup.c | 8 +++---- arch/s390/kernel/smp.c | 2 +- arch/s390/kernel/topology.c | 4 ++-- arch/s390/numa/mode_emu.c | 2 +- arch/s390/numa/toptree.c | 2 +- arch/x86/mm/kasan_init_64.c | 4 ++-- arch/xtensa/mm/kasan_init.c | 2 +- drivers/clk/ti/clk.c | 2 +- drivers/firmware/memmap.c | 2 +- drivers/of/fdt.c | 2 +- drivers/of/unittest.c | 2 +- include/linux/bootmem.h | 38 +++++++++++++++---------------- init/main.c | 6 ++--- kernel/dma/swiotlb.c | 6 ++--- kernel/power/snapshot.c | 2 +- kernel/printk/printk.c | 4 ++-- lib/cpumask.c | 2 +- mm/hugetlb.c | 2 +- mm/kasan/kasan_init.c | 2 +- mm/memblock.c | 26 ++++++++++----------- mm/page_alloc.c | 8 +++---- mm/page_ext.c | 2 +- mm/percpu.c | 28 +++++++++++------------ mm/sparse-vmemmap.c | 2 +- mm/sparse.c | 12 +++++----- 37 files changed, 105 insertions(+), 105 deletions(-) (limited to 'kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 4c249cb261f3..39e6090d23ac 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -857,7 +857,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) */ boot_alias_start = phys_to_idmap(start); if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { - res = memblock_virt_alloc(sizeof(*res), 0); + res = memblock_alloc(sizeof(*res), 0); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; res->end = phys_to_idmap(end); @@ -865,7 +865,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) request_resource(&iomem_resource, res); } - res = memblock_virt_alloc(sizeof(*res), 0); + res = memblock_alloc(sizeof(*res), 0); res->name = "System RAM"; res->start = start; res->end = end; diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 56a1fe90d394..1f9b34a7eccd 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -726,7 +726,7 @@ static int __init _setup_clkctrl_provider(struct device_node *np) u64 size; int i; - provider = memblock_virt_alloc(sizeof(*provider), 0); + provider = memblock_alloc(sizeof(*provider), 0); if (!provider) return -ENOMEM; @@ -736,12 +736,12 @@ static int __init _setup_clkctrl_provider(struct device_node *np) of_property_count_elems_of_size(np, "reg", sizeof(u32)) / 2; provider->addr = - memblock_virt_alloc(sizeof(void *) * provider->num_addrs, 0); + memblock_alloc(sizeof(void *) * provider->num_addrs, 0); if (!provider->addr) return -ENOMEM; provider->size = - memblock_virt_alloc(sizeof(u32) * provider->num_addrs, 0); + memblock_alloc(sizeof(u32) * provider->num_addrs, 0); if (!provider->size) return -ENOMEM; diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index fccb1a6f8c6f..6a65a2912d36 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -38,7 +38,7 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); static phys_addr_t __init kasan_alloc_zeroed_page(int node) { - void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, + void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); return __pa(p); diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index c7fb34efd23e..0bff116c07a8 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -168,7 +168,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, { int nid = early_cpu_to_node(cpu); - return memblock_virt_alloc_try_nid(size, align, + return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 01a5ff4c41ff..0c997645e8f0 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -859,7 +859,7 @@ static void __init arch_mem_init(char **cmdline_p) * Prevent memblock from allocating high memory. * This cannot be done before max_low_pfn is detected, so up * to this point is possible to only reserve physical memory - * with memblock_reserve; memblock_virt_alloc* can be used + * with memblock_reserve; memblock_alloc* can be used * only after this point */ memblock_set_current_limit(PFN_PHYS(max_low_pfn)); diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 4da8ed576229..d39ec3a4550a 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -203,7 +203,7 @@ pci_create_OF_bus_map(void) struct property* of_prop; struct device_node *dn; - of_prop = memblock_virt_alloc(sizeof(struct property) + 256, 0); + of_prop = memblock_alloc(sizeof(struct property) + 256, 0); dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index 06796dec01ea..bf87d6e13369 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -14,7 +14,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) if (slab_is_available()) p = kzalloc(size, mask); else { - p = memblock_virt_alloc(size, 0); + p = memblock_alloc(size, 0); } return p; } diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 4d80239ef83c..954f1986af4d 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -461,10 +461,10 @@ void __init mmu_context_init(void) /* * Allocate the maps used by context management */ - context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0); - context_mm = memblock_virt_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 0); + context_map = memblock_alloc(CTX_MAP_SIZE, 0); + context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 0); #ifdef CONFIG_SMP - stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0); + stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, 0); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, "powerpc/mmu/ctx:prepare", diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index 60b03a1703d1..f45b369177a4 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -513,7 +513,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = memblock_virt_alloc(NVRAM_SIZE, 0); + nvram_image = memblock_alloc(NVRAM_SIZE, 0); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index cde710297a4e..23a67b545b70 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3770,7 +3770,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb_id = be64_to_cpup(prop64); pr_debug(" PHB-ID : 0x%016llx\n", phb_id); - phb = memblock_virt_alloc(sizeof(*phb), 0); + phb = memblock_alloc(sizeof(*phb), 0); /* Allocate PCI controller */ phb->hose = hose = pcibios_alloc_controller(np); @@ -3816,7 +3816,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, else phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; - phb->diag_data = memblock_virt_alloc(phb->diag_data_size, 0); + phb->diag_data = memblock_alloc(phb->diag_data_size, 0); /* Parse 32-bit and IO ranges (if any) */ pci_process_bridge_OF_ranges(hose, np, !hose->global_number); @@ -3875,7 +3875,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, } pemap_off = size; size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); - aux = memblock_virt_alloc(size, 0); + aux = memblock_alloc(size, 0); phb->ioda.pe_alloc = aux; phb->ioda.m64_segmap = aux + m64map_off; phb->ioda.m32_segmap = aux + m32map_off; diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 77a37520068d..12519857a33c 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -126,7 +126,7 @@ static void __init prealloc(struct ps3_prealloc *p) if (!p->size) return; - p->address = memblock_virt_alloc(p->size, p->align); + p->address = memblock_alloc(p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index e64a411d1a00..349a9ff6ca5b 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -128,7 +128,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, if (bmp->bitmap_from_slab) bmp->bitmap = kzalloc(size, GFP_KERNEL); else { - bmp->bitmap = memblock_virt_alloc(size, 0); + bmp->bitmap = memblock_alloc(size, 0); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 204ccfa54bf3..781c1053a773 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -378,7 +378,7 @@ static void __init setup_lowcore(void) * Setup lowcore for boot cpu */ BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); - lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc)); + lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = (unsigned long) restart_int_handler; lc->external_new_psw.mask = PSW_KERNEL_BITS | @@ -422,7 +422,7 @@ static void __init setup_lowcore(void) * Allocate the global restart stack which is the same for * all CPUs in cast *one* of them does a PSW restart. */ - restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE); + restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); restart_stack += STACK_INIT_OFFSET; /* @@ -488,7 +488,7 @@ static void __init setup_resources(void) bss_resource.end = (unsigned long) __bss_stop - 1; for_each_memblock(memory, reg) { - res = memblock_virt_alloc(sizeof(*res), 8); + res = memblock_alloc(sizeof(*res), 8); res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; res->name = "System RAM"; @@ -502,7 +502,7 @@ static void __init setup_resources(void) std_res->start > res->end) continue; if (std_res->end > res->end) { - sub_res = memblock_virt_alloc(sizeof(*sub_res), 8); + sub_res = memblock_alloc(sizeof(*sub_res), 8); *sub_res = *std_res; sub_res->end = res->end; std_res->start = res->end + 1; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1b3188f57b58..44f9a7d6450b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -761,7 +761,7 @@ void __init smp_detect_cpus(void) u16 address; /* Get CPU information */ - info = memblock_virt_alloc(sizeof(*info), 8); + info = memblock_alloc(sizeof(*info), 8); smp_get_core_info(info, 1); /* Find boot CPU type */ if (sclp.has_core_type) { diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index e8184a15578a..799a91882a76 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -519,7 +519,7 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; nr_masks = max(nr_masks, 1); for (i = 0; i < nr_masks; i++) { - mask->next = memblock_virt_alloc(sizeof(*mask->next), 8); + mask->next = memblock_alloc(sizeof(*mask->next), 8); mask = mask->next; } } @@ -537,7 +537,7 @@ void __init topology_init_early(void) } if (!MACHINE_HAS_TOPOLOGY) goto out; - tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); + tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE); info = tl_info; store_topology(info); pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n", diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 83b222c57609..5a381fc8e958 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -313,7 +313,7 @@ static void __ref create_core_to_node_map(void) { int i; - emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8); + emu_cores = memblock_alloc(sizeof(*emu_cores), 8); for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) emu_cores->to_node_id[i] = NODE_ID_FREE; } diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c index 21d1e8a1546d..7f61cc3fd4d1 100644 --- a/arch/s390/numa/toptree.c +++ b/arch/s390/numa/toptree.c @@ -34,7 +34,7 @@ struct toptree __ref *toptree_alloc(int level, int id) if (slab_is_available()) res = kzalloc(sizeof(*res), GFP_KERNEL); else - res = memblock_virt_alloc(sizeof(*res), 8); + res = memblock_alloc(sizeof(*res), 8); if (!res) return res; diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index e3e77527f8df..77b857cb036f 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -28,10 +28,10 @@ static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); static __init void *early_alloc(size_t size, int nid, bool panic) { if (panic) - return memblock_virt_alloc_try_nid(size, size, + return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); else - return memblock_virt_alloc_try_nid_nopanic(size, size, + return memblock_alloc_try_nid_nopanic(size, size, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); } diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index 6b532b6bd785..1a30a258ccd0 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -43,7 +43,7 @@ static void __init populate(void *start, void *end) unsigned long vaddr = (unsigned long)start; pgd_t *pgd = pgd_offset_k(vaddr); pmd_t *pmd = pmd_offset(pgd, vaddr); - pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); + pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); pr_debug("%s: %p - %p\n", __func__, start, end); diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 7d22e1af2247..5c54d3734daf 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -342,7 +342,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) { struct clk_iomap *io; - io = memblock_virt_alloc(sizeof(*io), 0); + io = memblock_alloc(sizeof(*io), 0); io->mem = mem; diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 5de3ed29282c..03cead6d5f97 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -333,7 +333,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; - entry = memblock_virt_alloc(sizeof(struct firmware_map_entry), 0); + entry = memblock_alloc(sizeof(struct firmware_map_entry), 0); if (WARN_ON(!entry)) return -ENOMEM; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 4f915cea6f75..ffe62a7ae19b 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1179,7 +1179,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - return memblock_virt_alloc(size, align); + return memblock_alloc(size, align); } bool __init early_init_dt_verify(void *params) diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index a3a6866765f2..01e23b85e798 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -2192,7 +2192,7 @@ static struct device_node *overlay_base_root; static void * __init dt_alloc_memory(u64 size, u64 align) { - return memblock_virt_alloc(size, align); + return memblock_alloc(size, align); } /* diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index b74bafd110b9..7d91f0f5ee44 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -95,78 +95,78 @@ extern void *__alloc_bootmem_low(unsigned long size, #define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0) /* FIXME: Move to memblock.h at a point where we remove nobootmem.c */ -void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, +void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid); -void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size, +void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid); -void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align, +void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid); void __memblock_free_early(phys_addr_t base, phys_addr_t size); void __memblock_free_late(phys_addr_t base, phys_addr_t size); -static inline void * __init memblock_virt_alloc( +static inline void * __init memblock_alloc( phys_addr_t size, phys_addr_t align) { - return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, + return memblock_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, BOOTMEM_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } -static inline void * __init memblock_virt_alloc_raw( +static inline void * __init memblock_alloc_raw( phys_addr_t size, phys_addr_t align) { - return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT, + return memblock_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT, BOOTMEM_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } -static inline void * __init memblock_virt_alloc_nopanic( +static inline void * __init memblock_alloc_nopanic( phys_addr_t size, phys_addr_t align) { - return memblock_virt_alloc_try_nid_nopanic(size, align, + return memblock_alloc_try_nid_nopanic(size, align, BOOTMEM_LOW_LIMIT, BOOTMEM_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } -static inline void * __init memblock_virt_alloc_low( +static inline void * __init memblock_alloc_low( phys_addr_t size, phys_addr_t align) { - return memblock_virt_alloc_try_nid(size, align, + return memblock_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); } -static inline void * __init memblock_virt_alloc_low_nopanic( +static inline void * __init memblock_alloc_low_nopanic( phys_addr_t size, phys_addr_t align) { - return memblock_virt_alloc_try_nid_nopanic(size, align, + return memblock_alloc_try_nid_nopanic(size, align, BOOTMEM_LOW_LIMIT, ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); } -static inline void * __init memblock_virt_alloc_from_nopanic( +static inline void * __init memblock_alloc_from_nopanic( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) { - return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr, + return memblock_alloc_try_nid_nopanic(size, align, min_addr, BOOTMEM_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } -static inline void * __init memblock_virt_alloc_node( +static inline void * __init memblock_alloc_node( phys_addr_t size, int nid) { - return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT, + return memblock_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT, BOOTMEM_ALLOC_ACCESSIBLE, nid); } -static inline void * __init memblock_virt_alloc_node_nopanic( +static inline void * __init memblock_alloc_node_nopanic( phys_addr_t size, int nid) { - return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, + return memblock_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, BOOTMEM_ALLOC_ACCESSIBLE, nid); } diff --git a/init/main.c b/init/main.c index 1c3f90264280..86b59cf3bec7 100644 --- a/init/main.c +++ b/init/main.c @@ -375,10 +375,10 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } static void __init setup_command_line(char *command_line) { saved_command_line = - memblock_virt_alloc(strlen(boot_command_line) + 1, 0); + memblock_alloc(strlen(boot_command_line) + 1, 0); initcall_command_line = - memblock_virt_alloc(strlen(boot_command_line) + 1, 0); - static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0); + memblock_alloc(strlen(boot_command_line) + 1, 0); + static_command_line = memblock_alloc(strlen(command_line) + 1, 0); strcpy(saved_command_line, boot_command_line); strcpy(static_command_line, command_line); } diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index ebecaf255ea2..801da67e957b 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -204,10 +204,10 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ - io_tlb_list = memblock_virt_alloc( + io_tlb_list = memblock_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), PAGE_SIZE); - io_tlb_orig_addr = memblock_virt_alloc( + io_tlb_orig_addr = memblock_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), PAGE_SIZE); for (i = 0; i < io_tlb_nslabs; i++) { @@ -242,7 +242,7 @@ swiotlb_init(int verbose) bytes = io_tlb_nslabs << IO_TLB_SHIFT; /* Get IO TLB memory from the low pages */ - vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); + vstart = memblock_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) return; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 3d37c279c090..34116a6097be 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -963,7 +963,7 @@ void __init __register_nosave_region(unsigned long start_pfn, BUG_ON(!region); } else { /* This allocation cannot fail */ - region = memblock_virt_alloc(sizeof(struct nosave_region), 0); + region = memblock_alloc(sizeof(struct nosave_region), 0); } region->start_pfn = start_pfn; region->end_pfn = end_pfn; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b77150ad1965..429e4a3833ca 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1111,9 +1111,9 @@ void __init setup_log_buf(int early) if (early) { new_log_buf = - memblock_virt_alloc(new_log_buf_len, LOG_ALIGN); + memblock_alloc(new_log_buf_len, LOG_ALIGN); } else { - new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len, + new_log_buf = memblock_alloc_nopanic(new_log_buf_len, LOG_ALIGN); } diff --git a/lib/cpumask.c b/lib/cpumask.c index beca6244671a..1405cb22e6bc 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var); */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { - *mask = memblock_virt_alloc(cpumask_size(), 0); + *mask = memblock_alloc(cpumask_size(), 0); } /** diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7b5c0ad9a6bd..51e9f17dbd5c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2100,7 +2100,7 @@ int __alloc_bootmem_huge_page(struct hstate *h) for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { void *addr; - addr = memblock_virt_alloc_try_nid_raw( + addr = memblock_alloc_try_nid_raw( huge_page_size(h), huge_page_size(h), 0, BOOTMEM_ALLOC_ACCESSIBLE, node); if (addr) { diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c index 7a2a2f13f86f..24d734bdff6b 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/kasan_init.c @@ -83,7 +83,7 @@ static inline bool kasan_zero_page_entry(pte_t pte) static __init void *early_alloc(size_t size, int node) { - return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), + return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, node); } diff --git a/mm/memblock.c b/mm/memblock.c index 20358374e8a8..58340de3ebc6 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1319,7 +1319,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali } /** - * memblock_virt_alloc_internal - allocate boot memory block + * memblock_alloc_internal - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region to allocate (phys address) @@ -1345,7 +1345,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali * Return: * Virtual address of allocated memory block on success, NULL on failure. */ -static void * __init memblock_virt_alloc_internal( +static void * __init memblock_alloc_internal( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) @@ -1412,7 +1412,7 @@ done: } /** - * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing + * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing * memory and without panicking * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size @@ -1430,7 +1430,7 @@ done: * Return: * Virtual address of allocated memory block on success, NULL on failure. */ -void * __init memblock_virt_alloc_try_nid_raw( +void * __init memblock_alloc_try_nid_raw( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) @@ -1441,7 +1441,7 @@ void * __init memblock_virt_alloc_try_nid_raw( __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); - ptr = memblock_virt_alloc_internal(size, align, + ptr = memblock_alloc_internal(size, align, min_addr, max_addr, nid); if (ptr && size > 0) page_init_poison(ptr, size); @@ -1450,7 +1450,7 @@ void * __init memblock_virt_alloc_try_nid_raw( } /** - * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block + * memblock_alloc_try_nid_nopanic - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation @@ -1466,7 +1466,7 @@ void * __init memblock_virt_alloc_try_nid_raw( * Return: * Virtual address of allocated memory block on success, NULL on failure. */ -void * __init memblock_virt_alloc_try_nid_nopanic( +void * __init memblock_alloc_try_nid_nopanic( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) @@ -1477,7 +1477,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic( __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); - ptr = memblock_virt_alloc_internal(size, align, + ptr = memblock_alloc_internal(size, align, min_addr, max_addr, nid); if (ptr) memset(ptr, 0, size); @@ -1485,7 +1485,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic( } /** - * memblock_virt_alloc_try_nid - allocate boot memory block with panicking + * memblock_alloc_try_nid - allocate boot memory block with panicking * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation @@ -1495,14 +1495,14 @@ void * __init memblock_virt_alloc_try_nid_nopanic( * allocate only from memory limited by memblock.current_limit value * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * - * Public panicking version of memblock_virt_alloc_try_nid_nopanic() + * Public panicking version of memblock_alloc_try_nid_nopanic() * which provides debug information (including caller info), if enabled, * and panics if the request can not be satisfied. * * Return: * Virtual address of allocated memory block on success, NULL on failure. */ -void * __init memblock_virt_alloc_try_nid( +void * __init memblock_alloc_try_nid( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) @@ -1512,7 +1512,7 @@ void * __init memblock_virt_alloc_try_nid( memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); - ptr = memblock_virt_alloc_internal(size, align, + ptr = memblock_alloc_internal(size, align, min_addr, max_addr, nid); if (ptr) { memset(ptr, 0, size); @@ -1529,7 +1529,7 @@ void * __init memblock_virt_alloc_try_nid( * @base: phys starting address of the boot memory block * @size: size of the boot memory block in bytes * - * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. + * Free boot memory block previously allocated by memblock_alloc_xx() API. * The freeing memory will not be released to the buddy allocator. */ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59d171f84445..8ca6954fdcdc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6209,7 +6209,7 @@ static void __ref setup_usemap(struct pglist_data *pgdat, zone->pageblock_flags = NULL; if (usemapsize) zone->pageblock_flags = - memblock_virt_alloc_node_nopanic(usemapsize, + memblock_alloc_node_nopanic(usemapsize, pgdat->node_id); } #else @@ -6439,7 +6439,7 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat) end = pgdat_end_pfn(pgdat); end = ALIGN(end, MAX_ORDER_NR_PAGES); size = (end - start) * sizeof(struct page); - map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id); + map = memblock_alloc_node_nopanic(size, pgdat->node_id); pgdat->node_mem_map = map + offset; } pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", @@ -7711,9 +7711,9 @@ void *__init alloc_large_system_hash(const char *tablename, size = bucketsize << log2qty; if (flags & HASH_EARLY) { if (flags & HASH_ZERO) - table = memblock_virt_alloc_nopanic(size, 0); + table = memblock_alloc_nopanic(size, 0); else - table = memblock_virt_alloc_raw(size, 0); + table = memblock_alloc_raw(size, 0); } else if (hashdist) { table = __vmalloc(size, gfp_flags, PAGE_KERNEL); } else { diff --git a/mm/page_ext.c b/mm/page_ext.c index a9826da84ccb..e77c0f031dd0 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -161,7 +161,7 @@ static int __init alloc_node_page_ext(int nid) table_size = get_entry_size() * nr_pages; - base = memblock_virt_alloc_try_nid_nopanic( + base = memblock_alloc_try_nid_nopanic( table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); if (!base) diff --git a/mm/percpu.c b/mm/percpu.c index 4b90682623e9..3050c1d37d37 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1101,7 +1101,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, region_size = ALIGN(start_offset + map_size, lcm_align); /* allocate chunk */ - chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) + + chunk = memblock_alloc(sizeof(struct pcpu_chunk) + BITS_TO_LONGS(region_size >> PAGE_SHIFT), 0); @@ -1114,11 +1114,11 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, chunk->nr_pages = region_size >> PAGE_SHIFT; region_bits = pcpu_chunk_map_bits(chunk); - chunk->alloc_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits) * + chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]), 0); - chunk->bound_map = memblock_virt_alloc(BITS_TO_LONGS(region_bits + 1) * + chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]), 0); - chunk->md_blocks = memblock_virt_alloc(pcpu_chunk_nr_blocks(chunk) * + chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]), 0); pcpu_init_md_blocks(chunk); @@ -1888,7 +1888,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); - ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE); + ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE); if (!ptr) return NULL; ai = ptr; @@ -2075,12 +2075,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ - group_offsets = memblock_virt_alloc(ai->nr_groups * + group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]), 0); - group_sizes = memblock_virt_alloc(ai->nr_groups * + group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]), 0); - unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); - unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); + unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); + unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; @@ -2144,7 +2144,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, * empty chunks. */ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; - pcpu_slot = memblock_virt_alloc( + pcpu_slot = memblock_alloc( pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_slot[i]); @@ -2458,7 +2458,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); - areas = memblock_virt_alloc_nopanic(areas_size, 0); + areas = memblock_alloc_nopanic(areas_size, 0); if (!areas) { rc = -ENOMEM; goto out_free; @@ -2599,7 +2599,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); - pages = memblock_virt_alloc(pages_size, 0); + pages = memblock_alloc(pages_size, 0); /* allocate pages */ j = 0; @@ -2688,7 +2688,7 @@ EXPORT_SYMBOL(__per_cpu_offset); static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, size_t align) { - return memblock_virt_alloc_from_nopanic( + return memblock_alloc_from_nopanic( size, align, __pa(MAX_DMA_ADDRESS)); } @@ -2737,7 +2737,7 @@ void __init setup_per_cpu_areas(void) void *fc; ai = pcpu_alloc_alloc_info(1, 1); - fc = memblock_virt_alloc_from_nopanic(unit_size, + fc = memblock_alloc_from_nopanic(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 8301293331a2..91c2c3d25827 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -42,7 +42,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node, unsigned long align, unsigned long goal) { - return memblock_virt_alloc_try_nid_raw(size, align, goal, + return memblock_alloc_try_nid_raw(size, align, goal, BOOTMEM_ALLOC_ACCESSIBLE, node); } diff --git a/mm/sparse.c b/mm/sparse.c index 67ad061f7fb8..cb900dda7fd2 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -68,7 +68,7 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid) if (slab_is_available()) section = kzalloc_node(array_size, GFP_KERNEL, nid); else - section = memblock_virt_alloc_node(array_size, nid); + section = memblock_alloc_node(array_size, nid); return section; } @@ -216,7 +216,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; align = 1 << (INTERNODE_CACHE_SHIFT); - mem_section = memblock_virt_alloc(size, align); + mem_section = memblock_alloc(size, align); } #endif @@ -306,7 +306,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, limit = goal + (1UL << PA_SECTION_SHIFT); nid = early_pfn_to_nid(goal >> PAGE_SHIFT); again: - p = memblock_virt_alloc_try_nid_nopanic(size, + p = memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, goal, limit, nid); if (!p && limit) { @@ -362,7 +362,7 @@ static unsigned long * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { - return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); + return memblock_alloc_node_nopanic(size, pgdat->node_id); } static void __init check_usemap_section_nr(int nid, unsigned long *usemap) @@ -391,7 +391,7 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, if (map) return map; - map = memblock_virt_alloc_try_nid(size, + map = memblock_alloc_try_nid(size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); return map; @@ -405,7 +405,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid) { WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ sparsemap_buf = - memblock_virt_alloc_try_nid_raw(size, PAGE_SIZE, + memblock_alloc_try_nid_raw(size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); sparsemap_buf_end = sparsemap_buf + size; -- cgit v1.2.3 From 57c8a661d95dff48dd9c2f2496139082bbaf241a Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 30 Oct 2018 15:09:49 -0700 Subject: mm: remove include/linux/bootmem.h Move remaining definitions and declarations from include/linux/bootmem.h into include/linux/memblock.h and remove the redundant header. The includes were replaced with the semantic patch below and then semi-automated removal of duplicated '#include @@ @@ - #include + #include [sfr@canb.auug.org.au: dma-direct: fix up for the removal of linux/bootmem.h] Link: http://lkml.kernel.org/r/20181002185342.133d1680@canb.auug.org.au [sfr@canb.auug.org.au: powerpc: fix up for removal of linux/bootmem.h] Link: http://lkml.kernel.org/r/20181005161406.73ef8727@canb.auug.org.au [sfr@canb.auug.org.au: x86/kaslr, ACPI/NUMA: fix for linux/bootmem.h removal] Link: http://lkml.kernel.org/r/20181008190341.5e396491@canb.auug.org.au Link: http://lkml.kernel.org/r/1536927045-23536-30-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Signed-off-by: Stephen Rothwell Acked-by: Michal Hocko Cc: Catalin Marinas Cc: Chris Zankel Cc: "David S. Miller" Cc: Geert Uytterhoeven Cc: Greentime Hu Cc: Greg Kroah-Hartman Cc: Guan Xuetao Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jonas Bonn Cc: Jonathan Corbet Cc: Ley Foon Tan Cc: Mark Salter Cc: Martin Schwidefsky Cc: Matt Turner Cc: Michael Ellerman Cc: Michal Simek Cc: Palmer Dabbelt Cc: Paul Burton Cc: Richard Kuo Cc: Richard Weinberger Cc: Rich Felker Cc: Russell King Cc: Serge Semin Cc: Thomas Gleixner Cc: Tony Luck Cc: Vineet Gupta Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/core_cia.c | 2 +- arch/alpha/kernel/core_irongate.c | 1 - arch/alpha/kernel/core_marvel.c | 2 +- arch/alpha/kernel/core_titan.c | 2 +- arch/alpha/kernel/core_tsunami.c | 2 +- arch/alpha/kernel/pci-noop.c | 2 +- arch/alpha/kernel/pci.c | 2 +- arch/alpha/kernel/pci_iommu.c | 2 +- arch/alpha/kernel/setup.c | 1 - arch/alpha/kernel/sys_nautilus.c | 2 +- arch/alpha/mm/init.c | 2 +- arch/alpha/mm/numa.c | 1 - arch/arc/kernel/unwind.c | 2 +- arch/arc/mm/highmem.c | 2 +- arch/arc/mm/init.c | 1 - arch/arm/kernel/devtree.c | 1 - arch/arm/kernel/setup.c | 1 - arch/arm/mach-omap2/omap_hwmod.c | 2 +- arch/arm/mm/dma-mapping.c | 1 - arch/arm/mm/init.c | 1 - arch/arm/xen/mm.c | 1 - arch/arm/xen/p2m.c | 2 +- arch/arm64/kernel/acpi.c | 1 - arch/arm64/kernel/acpi_numa.c | 1 - arch/arm64/kernel/setup.c | 1 - arch/arm64/mm/dma-mapping.c | 2 +- arch/arm64/mm/init.c | 1 - arch/arm64/mm/kasan_init.c | 1 - arch/arm64/mm/numa.c | 1 - arch/c6x/kernel/setup.c | 1 - arch/c6x/mm/init.c | 2 +- arch/h8300/kernel/setup.c | 1 - arch/h8300/mm/init.c | 2 +- arch/hexagon/kernel/dma.c | 2 +- arch/hexagon/kernel/setup.c | 2 +- arch/hexagon/mm/init.c | 1 - arch/ia64/kernel/crash.c | 2 +- arch/ia64/kernel/efi.c | 2 +- arch/ia64/kernel/ia64_ksyms.c | 2 +- arch/ia64/kernel/iosapic.c | 2 +- arch/ia64/kernel/mca.c | 2 +- arch/ia64/kernel/mca_drv.c | 2 +- arch/ia64/kernel/setup.c | 1 - arch/ia64/kernel/smpboot.c | 2 +- arch/ia64/kernel/topology.c | 2 +- arch/ia64/kernel/unwind.c | 2 +- arch/ia64/mm/contig.c | 1 - arch/ia64/mm/discontig.c | 1 - arch/ia64/mm/init.c | 1 - arch/ia64/mm/numa.c | 2 +- arch/ia64/mm/tlb.c | 2 +- arch/ia64/pci/pci.c | 2 +- arch/ia64/sn/kernel/bte.c | 2 +- arch/ia64/sn/kernel/io_common.c | 2 +- arch/ia64/sn/kernel/setup.c | 2 +- arch/m68k/atari/stram.c | 2 +- arch/m68k/coldfire/m54xx.c | 2 +- arch/m68k/kernel/setup_mm.c | 1 - arch/m68k/kernel/setup_no.c | 1 - arch/m68k/kernel/uboot.c | 2 +- arch/m68k/mm/init.c | 2 +- arch/m68k/mm/mcfmmu.c | 1 - arch/m68k/mm/motorola.c | 1 - arch/m68k/mm/sun3mmu.c | 2 +- arch/m68k/sun3/config.c | 2 +- arch/m68k/sun3/dvma.c | 2 +- arch/m68k/sun3/mmu_emu.c | 2 +- arch/m68k/sun3/sun3dvma.c | 2 +- arch/m68k/sun3x/dvma.c | 2 +- arch/microblaze/mm/consistent.c | 2 +- arch/microblaze/mm/init.c | 3 +- arch/microblaze/pci/pci-common.c | 2 +- arch/mips/ar7/memory.c | 2 +- arch/mips/ath79/setup.c | 2 +- arch/mips/bcm63xx/prom.c | 2 +- arch/mips/bcm63xx/setup.c | 2 +- arch/mips/bmips/setup.c | 2 +- arch/mips/cavium-octeon/dma-octeon.c | 2 +- arch/mips/dec/prom/memory.c | 2 +- arch/mips/emma/common/prom.c | 2 +- arch/mips/fw/arc/memory.c | 2 +- arch/mips/jazz/jazzdma.c | 2 +- arch/mips/kernel/crash.c | 2 +- arch/mips/kernel/crash_dump.c | 2 +- arch/mips/kernel/prom.c | 2 +- arch/mips/kernel/setup.c | 1 - arch/mips/kernel/traps.c | 1 - arch/mips/kernel/vpe.c | 2 +- arch/mips/kvm/commpage.c | 2 +- arch/mips/kvm/dyntrans.c | 2 +- arch/mips/kvm/emulate.c | 2 +- arch/mips/kvm/interrupt.c | 2 +- arch/mips/kvm/mips.c | 2 +- arch/mips/lantiq/prom.c | 2 +- arch/mips/lasat/prom.c | 2 +- arch/mips/loongson64/common/init.c | 2 +- arch/mips/loongson64/loongson-3/numa.c | 1 - arch/mips/mm/init.c | 2 +- arch/mips/mm/pgtable-32.c | 2 +- arch/mips/mti-malta/malta-memory.c | 2 +- arch/mips/netlogic/xlp/dt.c | 2 +- arch/mips/pci/pci-legacy.c | 2 +- arch/mips/pci/pci.c | 2 +- arch/mips/ralink/of.c | 2 +- arch/mips/rb532/prom.c | 2 +- arch/mips/sgi-ip27/ip27-memory.c | 1 - arch/mips/sibyte/common/cfe.c | 2 +- arch/mips/sibyte/swarm/setup.c | 2 +- arch/mips/txx9/rbtx4938/prom.c | 2 +- arch/nds32/kernel/setup.c | 3 +- arch/nds32/mm/highmem.c | 2 +- arch/nds32/mm/init.c | 3 +- arch/nios2/kernel/prom.c | 2 +- arch/nios2/kernel/setup.c | 1 - arch/nios2/mm/init.c | 2 +- arch/openrisc/kernel/setup.c | 3 +- arch/openrisc/mm/init.c | 3 +- arch/parisc/mm/init.c | 1 - arch/powerpc/kernel/pci_32.c | 2 +- arch/powerpc/kernel/setup-common.c | 1 - arch/powerpc/kernel/setup_64.c | 3 +- arch/powerpc/lib/alloc.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 1 - arch/powerpc/mm/mem.c | 3 +- arch/powerpc/mm/mmu_context_nohash.c | 2 +- arch/powerpc/mm/numa.c | 3 +- arch/powerpc/platforms/powermac/nvram.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 3 +- arch/powerpc/platforms/ps3/setup.c | 2 +- arch/powerpc/sysdev/msi_bitmap.c | 2 +- arch/riscv/mm/init.c | 3 +- arch/s390/kernel/crash_dump.c | 3 +- arch/s390/kernel/setup.c | 1 - arch/s390/kernel/smp.c | 3 +- arch/s390/kernel/topology.c | 2 +- arch/s390/kernel/vdso.c | 2 +- arch/s390/mm/extmem.c | 2 +- arch/s390/mm/init.c | 3 +- arch/s390/mm/vmem.c | 3 +- arch/s390/numa/mode_emu.c | 1 - arch/s390/numa/numa.c | 1 - arch/s390/numa/toptree.c | 2 +- arch/sh/mm/init.c | 3 +- arch/sh/mm/ioremap_fixed.c | 2 +- arch/sparc/kernel/mdesc.c | 2 - arch/sparc/kernel/prom_32.c | 2 +- arch/sparc/kernel/setup_64.c | 2 +- arch/sparc/kernel/smp_64.c | 2 +- arch/sparc/mm/init_32.c | 1 - arch/sparc/mm/init_64.c | 3 +- arch/sparc/mm/srmmu.c | 2 +- arch/um/drivers/net_kern.c | 2 +- arch/um/drivers/vector_kern.c | 2 +- arch/um/kernel/initrd.c | 2 +- arch/um/kernel/mem.c | 1 - arch/um/kernel/physmem.c | 1 - arch/unicore32/kernel/hibernate.c | 2 +- arch/unicore32/kernel/setup.c | 3 +- arch/unicore32/mm/init.c | 3 +- arch/unicore32/mm/mmu.c | 1 - arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/acpi/sleep.c | 1 - arch/x86/kernel/apic/apic.c | 2 +- arch/x86/kernel/apic/io_apic.c | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/e820.c | 3 +- arch/x86/kernel/mpparse.c | 1 - arch/x86/kernel/pci-dma.c | 2 +- arch/x86/kernel/pci-swiotlb.c | 2 +- arch/x86/kernel/pvclock.c | 2 +- arch/x86/kernel/setup.c | 1 - arch/x86/kernel/setup_percpu.c | 1 - arch/x86/kernel/smpboot.c | 2 +- arch/x86/kernel/tce_64.c | 1 - arch/x86/mm/amdtopology.c | 1 - arch/x86/mm/fault.c | 2 +- arch/x86/mm/highmem_32.c | 2 +- arch/x86/mm/init.c | 1 - arch/x86/mm/init_32.c | 1 - arch/x86/mm/init_64.c | 1 - arch/x86/mm/ioremap.c | 2 +- arch/x86/mm/kasan_init_64.c | 3 +- arch/x86/mm/kaslr.c | 1 + arch/x86/mm/numa.c | 1 - arch/x86/mm/numa_32.c | 1 - arch/x86/mm/numa_64.c | 2 +- arch/x86/mm/numa_emulation.c | 1 - arch/x86/mm/pageattr-test.c | 2 +- arch/x86/mm/pageattr.c | 2 +- arch/x86/mm/pat.c | 2 +- arch/x86/mm/physaddr.c | 2 +- arch/x86/pci/i386.c | 2 +- arch/x86/platform/efi/efi.c | 3 +- arch/x86/platform/efi/efi_64.c | 2 +- arch/x86/platform/efi/quirks.c | 1 - arch/x86/platform/olpc/olpc_dt.c | 2 +- arch/x86/power/hibernate_32.c | 2 +- arch/x86/xen/enlighten.c | 2 +- arch/x86/xen/enlighten_pv.c | 3 +- arch/x86/xen/p2m.c | 1 - arch/xtensa/kernel/pci.c | 2 +- arch/xtensa/mm/cache.c | 2 +- arch/xtensa/mm/init.c | 2 +- arch/xtensa/mm/kasan_init.c | 3 +- arch/xtensa/mm/mmu.c | 2 +- arch/xtensa/platforms/iss/network.c | 2 +- arch/xtensa/platforms/iss/setup.c | 2 +- block/blk-settings.c | 2 +- block/bounce.c | 2 +- drivers/acpi/numa.c | 1 - drivers/acpi/tables.c | 3 +- drivers/base/platform.c | 2 +- drivers/clk/ti/clk.c | 2 +- drivers/firmware/dmi_scan.c | 2 +- drivers/firmware/efi/apple-properties.c | 2 +- drivers/firmware/iscsi_ibft_find.c | 2 +- drivers/firmware/memmap.c | 2 +- drivers/iommu/mtk_iommu.c | 2 +- drivers/iommu/mtk_iommu_v1.c | 2 +- drivers/macintosh/smu.c | 3 +- drivers/mtd/ar7part.c | 2 +- drivers/net/arcnet/arc-rimi.c | 2 +- drivers/net/arcnet/com20020-isa.c | 2 +- drivers/net/arcnet/com90io.c | 2 +- drivers/of/fdt.c | 1 - drivers/of/unittest.c | 2 +- drivers/s390/char/fs3270.c | 2 +- drivers/s390/char/tty3270.c | 2 +- drivers/s390/cio/cmf.c | 2 +- drivers/s390/virtio/virtio_ccw.c | 2 +- drivers/sfi/sfi_core.c | 2 +- drivers/tty/serial/cpm_uart/cpm_uart_core.c | 2 +- drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c | 2 +- drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c | 2 +- drivers/usb/early/xhci-dbc.c | 1 - drivers/xen/balloon.c | 2 +- drivers/xen/events/events_base.c | 2 +- drivers/xen/grant-table.c | 2 +- drivers/xen/swiotlb-xen.c | 1 - drivers/xen/xen-selfballoon.c | 2 +- fs/dcache.c | 2 +- fs/inode.c | 2 +- fs/namespace.c | 2 +- fs/proc/kcore.c | 2 +- fs/proc/page.c | 2 +- fs/proc/vmcore.c | 2 +- include/linux/bootmem.h | 173 ---------------------------- include/linux/memblock.h | 151 +++++++++++++++++++++++- init/main.c | 2 +- kernel/dma/direct.c | 2 +- kernel/dma/swiotlb.c | 2 +- kernel/futex.c | 2 +- kernel/locking/qspinlock_paravirt.h | 2 +- kernel/pid.c | 2 +- kernel/power/snapshot.c | 2 +- kernel/printk/printk.c | 1 - kernel/profile.c | 2 +- lib/cpumask.c | 2 +- mm/hugetlb.c | 1 - mm/kasan/kasan_init.c | 3 +- mm/kmemleak.c | 2 +- mm/memblock.c | 1 - mm/memory_hotplug.c | 1 - mm/page_alloc.c | 1 - mm/page_ext.c | 2 +- mm/page_idle.c | 2 +- mm/page_owner.c | 2 +- mm/percpu.c | 2 +- mm/sparse-vmemmap.c | 1 - mm/sparse.c | 1 - net/ipv4/inet_hashtables.c | 2 +- net/ipv4/tcp.c | 2 +- net/ipv4/udp.c | 2 +- net/sctp/protocol.c | 2 +- net/xfrm/xfrm_hash.c | 2 +- 275 files changed, 353 insertions(+), 476 deletions(-) delete mode 100644 include/linux/bootmem.h (limited to 'kernel') diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c index 026ee955fd10..867e8730b0c5 100644 --- a/arch/alpha/kernel/core_cia.c +++ b/arch/alpha/kernel/core_cia.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c index 35572be9deb5..a9fd133a7fb2 100644 --- a/arch/alpha/kernel/core_irongate.c +++ b/arch/alpha/kernel/core_irongate.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 1f00c9433b10..8a568c4d8e81 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c index 132b06bdf903..97551597581b 100644 --- a/arch/alpha/kernel/core_titan.c +++ b/arch/alpha/kernel/core_titan.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c index e7c956ea46b6..f334b8928d72 100644 --- a/arch/alpha/kernel/core_tsunami.c +++ b/arch/alpha/kernel/core_tsunami.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 59cbfc2bf2c5..a9378ee0c2f1 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index 4cc3eb92f55b..13937e72d875 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 5d178c7ba5b2..82cf950bda2a 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 64c06a0adf3d..a37fd990bd55 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index ff4f54b86c7f..cd9a112d67ff 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 853d15344934..a42fc5c4db89 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -19,7 +19,7 @@ #include #include #include -#include /* max_low_pfn */ +#include /* max_low_pfn */ #include #include diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 26cd925d19b1..74846553e3f1 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 2a01dd1005f4..d34f69eb1a95 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c index f582dc8944c9..48e700151810 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c @@ -7,7 +7,7 @@ * */ -#include +#include #include #include #include diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 0f29c6548779..f8fe5668b30f 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -8,7 +8,6 @@ #include #include -#include #include #ifdef CONFIG_BLK_DEV_INITRD #include diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 13bcd3b867cb..e3057c1b55b9 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 39e6090d23ac..840a4adc69fc 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 1f9b34a7eccd..cd5732ab0cdf 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -141,7 +141,7 @@ #include #include #include -#include +#include #include diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 66566472c153..661fe48ab78d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -9,7 +9,6 @@ * * DMA uncached mapping support. */ -#include #include #include #include diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d421a10c93a8..32e4845af2b6 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 785d2a562a23..cb44aa290e73 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -1,6 +1,5 @@ #include #include -#include #include #include #include diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 0641ba54ab62..e70a49fc8dcd 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -1,4 +1,4 @@ -#include +#include #include #include #include diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index ed46dc188b22..44e3c351e1ea 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -16,7 +16,6 @@ #define pr_fmt(fmt) "ACPI: " fmt #include -#include #include #include #include diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c index 4f4f1815e047..eac1d0cc595c 100644 --- a/arch/arm64/kernel/acpi_numa.c +++ b/arch/arm64/kernel/acpi_numa.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 3428427f6c93..7ce7306f1d75 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index d190612b8f33..3a703e5d4e32 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index d8d73073835f..9d9582cac6c4 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 6a65a2912d36..63527e585aac 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -11,7 +11,6 @@ */ #define pr_fmt(fmt) "kasan: " fmt -#include #include #include #include diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 0bff116c07a8..27a31efd9e8e 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -20,7 +20,6 @@ #define pr_fmt(fmt) "NUMA: " fmt #include -#include #include #include #include diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c index 05d96a9541b5..2e1c0ea22eb0 100644 --- a/arch/c6x/kernel/setup.c +++ b/arch/c6x/kernel/setup.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c index 3383df8b3508..af5ada0520be 100644 --- a/arch/c6x/mm/init.c +++ b/arch/c6x/mm/init.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #ifdef CONFIG_BLK_DEV_RAM #include #endif diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c index 34e2df5c0d6d..b32bfa1fe99e 100644 --- a/arch/h8300/kernel/setup.c +++ b/arch/h8300/kernel/setup.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index f2bf4487aabd..6519252ac4db 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 706699374444..38eaa7b703e7 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -19,7 +19,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c index dc8c7e75b5d1..b3c3e04d4e57 100644 --- a/arch/hexagon/kernel/setup.c +++ b/arch/hexagon/kernel/setup.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 88643faf3981..1719ede9e9bd 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -20,7 +20,6 @@ #include #include -#include #include #include #include diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 39f4433a6f0e..bec762a9b418 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index f77d80edddfe..8f106638913c 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -23,7 +23,7 @@ * Skip non-WB memory and ignore empty memory ranges. */ #include -#include +#include #include #include #include diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 6b51c88e3578..b49fe6f618ed 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -6,7 +6,7 @@ #ifdef CONFIG_VIRTUAL_MEM_MAP #include #include -#include +#include EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 550243a94b5d..fe6e4946672e 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -90,7 +90,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 71209766c47f..9a6603f8e409 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -77,7 +77,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index dfe40cbdf3b3..45f956ad715a 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 0e6c2d9fb498..583a3746d70b 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 74fe317477e6..51ec944b036c 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -24,7 +24,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 9b820f7a6a98..e311ee13e61d 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index e04efa088902..7601fe0622d2 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c @@ -28,7 +28,7 @@ * acquired, then the read-write lock must be acquired first. */ #include -#include +#include #include #include #include diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 9e5c23a6b8b4..6e447234205c 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -14,7 +14,6 @@ * Routines used by ia64 machines with contiguous (or virtually contiguous) * memory. */ -#include #include #include #include diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 70609f823960..8a965784340c 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 43ea4a47163d..d5e12ff1d73c 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -8,7 +8,6 @@ #include #include -#include #include #include #include diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index aa19b7ac8222..3861d6e32d5f 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 5554863b4c9b..ab545daff7c3 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 5d71800df431..196a0dd7ff97 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index 9146192b86f5..9900e6d4add6 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 8b05d5581615..98f55220c67d 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved. */ -#include +#include #include #include #include diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index ab2564f95199..71ad6b0ccab4 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c index 1089d67df315..6ffc204eb07d 100644 --- a/arch/m68k/atari/stram.c +++ b/arch/m68k/atari/stram.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c index adad03ca6e11..360c723c0ae6 100644 --- a/arch/m68k/coldfire/m54xx.c +++ b/arch/m68k/coldfire/m54xx.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index 5d3596c180f9..a1a3eaeaf58c 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c index cfd5475bfc31..3c5def10d486 100644 --- a/arch/m68k/kernel/setup_no.c +++ b/arch/m68k/kernel/setup_no.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c index 107082877064..1b4c562753da 100644 --- a/arch/m68k/kernel/uboot.c +++ b/arch/m68k/kernel/uboot.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index ae49ae4d3049..933c33e76a48 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 38a1d92dd555..0de4999a3810 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 2113eec8dbf9..7497cf30bf1c 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index 19c05ab9824d..f736db48a2e1 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c index 79a2bb857906..542c4404861c 100644 --- a/arch/m68k/sun3/config.c +++ b/arch/m68k/sun3/config.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c index 5f92c72b05c3..a2c1c9304895 100644 --- a/arch/m68k/sun3/dvma.c +++ b/arch/m68k/sun3/dvma.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index d30da12a1702..582a1284059a 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index 72d94585b52e..8be8b750c629 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c @@ -7,7 +7,7 @@ * Contains common routines for sun3/sun3x DVMA management. */ -#include +#include #include #include #include diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c index b2acbc862f60..89e630e66555 100644 --- a/arch/m68k/sun3x/dvma.c +++ b/arch/m68k/sun3x/dvma.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index d801cc5f5b95..45e0a1aa9357 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 9989740d397a..8c14988f52f2 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -7,10 +7,9 @@ * for more details. */ -#include +#include #include #include -#include #include /* mem_init */ #include #include diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 2ffd171af8b6..6b89a66ec1a5 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c index 0332f0514d05..80390a9ec264 100644 --- a/arch/mips/ar7/memory.c +++ b/arch/mips/ar7/memory.c @@ -16,7 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#include +#include #include #include #include diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 4c7a93f4039a..9728abcb18fa 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index 7019e2967009..77a836e661c9 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -7,7 +7,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 2be9caaa2085..e28ee9a7cc7e 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c index 6329c5f780d6..1738a06396f9 100644 --- a/arch/mips/bmips/setup.c +++ b/arch/mips/bmips/setup.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index c44c1a654471..e8eb60ed99f2 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -11,7 +11,7 @@ * Copyright (C) 2010 Cavium Networks, Inc. */ #include -#include +#include #include #include #include diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c index a2acc6454cf3..5073d2ed78bb 100644 --- a/arch/mips/dec/prom/memory.c +++ b/arch/mips/dec/prom/memory.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/emma/common/prom.c b/arch/mips/emma/common/prom.c index cae42259d6da..675337b8a4a0 100644 --- a/arch/mips/emma/common/prom.c +++ b/arch/mips/emma/common/prom.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c index dd9496f26e6a..429b7f8d2aeb 100644 --- a/arch/mips/fw/arc/memory.c +++ b/arch/mips/fw/arc/memory.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 0a0aaf39fd16..4c41ed0a637e 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 2c7288041a99..81845ba04835 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index a8657d29c62e..01b2bd95ba1f 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include +#include #include #include #include diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 89950b7bf536..93b8e0b4332f 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 31522d3bc8bf..41c1683761bb 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 623dc18f7f2f..0f852e1b5891 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 0bef238d2c0c..6176b9acba95 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c index f43629979a0e..5812e6145801 100644 --- a/arch/mips/kvm/commpage.c +++ b/arch/mips/kvm/commpage.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c index f8e772564d74..d77b61b3d6ee 100644 --- a/arch/mips/kvm/dyntrans.c +++ b/arch/mips/kvm/dyntrans.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include "commpage.h" diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 4144bfaef137..ec9ed23bca7f 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c index aa0a1a00faf6..7257e8b6f5a9 100644 --- a/arch/mips/kvm/interrupt.c +++ b/arch/mips/kvm/interrupt.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index f7ea8e21656b..1fcc4d149054 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index d984bd5c2ec5..14d4c5e2b42f 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c index 37b8fc5b9ac9..5ce1407de2d5 100644 --- a/arch/mips/lasat/prom.c +++ b/arch/mips/lasat/prom.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/loongson64/common/init.c b/arch/mips/loongson64/common/init.c index 6ef17120722f..c073fbcb9805 100644 --- a/arch/mips/loongson64/common/init.c +++ b/arch/mips/loongson64/common/init.c @@ -8,7 +8,7 @@ * option) any later version. */ -#include +#include #include #include #include diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 703ad4536fe0..622761878cd1 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 0893b6136498..b521d8e2d359 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index b19a3c506b1e..e2a33adc0f29 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -7,7 +7,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index a47556723b85..868921adef1d 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -12,7 +12,7 @@ * Steven J. Hill */ #include -#include +#include #include #include diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c index b5ba83f4c646..c856f2a3ea42 100644 --- a/arch/mips/netlogic/xlp/dt.c +++ b/arch/mips/netlogic/xlp/dt.c @@ -33,7 +33,7 @@ */ #include -#include +#include #include #include diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index 3c3b1e6abb53..687513880fbf 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index c2e94cf5ecda..e68b44b27c0d 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 1ada8492733b..d544e7b07f7a 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c index 6484e4a4597b..361a690facbf 100644 --- a/arch/mips/rb532/prom.c +++ b/arch/mips/rb532/prom.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index cb1f1a6a166d..d8b8444d6795 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index 092fb2a6ec4a..12a780f251e1 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 152ca71cc2d7..3b034b7178d6 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -23,7 +23,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/mips/txx9/rbtx4938/prom.c b/arch/mips/txx9/rbtx4938/prom.c index bcb469247e8c..2b36a2ee744c 100644 --- a/arch/mips/txx9/rbtx4938/prom.c +++ b/arch/mips/txx9/rbtx4938/prom.c @@ -11,7 +11,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index 63a1a5ef5219..eacc79024879 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c @@ -2,9 +2,8 @@ // Copyright (C) 2005-2017 Andes Technology Corporation #include -#include -#include #include +#include #include #include #include diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c index e17cb8a69315..022779af6148 100644 --- a/arch/nds32/mm/highmem.c +++ b/arch/nds32/mm/highmem.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index 66d3e9cf498d..131104bd2538 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c @@ -7,12 +7,11 @@ #include #include #include -#include +#include #include #include #include #include -#include #include #include diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c index a6d4f7530247..232a36b511aa 100644 --- a/arch/nios2/kernel/prom.c +++ b/arch/nios2/kernel/prom.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index 2d0011ddd4d5..6bbd4ae2beb0 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 12923501d94f..16cea5776b87 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index e17fcd83120f..c605bdad1746 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -30,13 +30,12 @@ #include #include #include -#include +#include #include #include #include #include #include -#include #include #include diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 91a6a9ab7598..d157310eb377 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -26,12 +26,11 @@ #include #include #include -#include +#include #include #include #include /* for initrd_* */ #include -#include #include #include diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 7e7a3126c5e9..2d7cffcaa476 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -14,7 +14,6 @@ #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index d39ec3a4550a..274bd1442dd9 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 2b56d1f30387..93ee3703b42f 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 9216c3a7fcfc..2a51e4cc8246 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -29,10 +29,9 @@ #include #include #include -#include +#include #include #include -#include #include #include diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index bf87d6e13369..5b61704447c1 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index a7226ed9cae6..8cf035e68378 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b3fe79064a69..0a64fffabee1 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -27,12 +27,11 @@ #include #include #include -#include +#include #include #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 954f1986af4d..67b9d7b669a1 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -44,7 +44,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f04f15f9d232..3a048e98a132 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -11,7 +11,7 @@ #define pr_fmt(fmt) "numa: " fmt #include -#include +#include #include #include #include @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index f45b369177a4..f3391be7c762 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 23a67b545b70..aba81cbf0b36 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -17,11 +17,10 @@ #include #include #include -#include +#include #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 12519857a33c..658bfab3350b 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 349a9ff6ca5b..2444feda831f 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index d58c111099b3..1d9bfaff60bc 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -13,9 +13,8 @@ #include #include -#include -#include #include +#include #include #include diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index d17566a8c76f..97eae3871868 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -13,10 +13,9 @@ #include #include #include -#include +#include #include #include -#include #include #include #include diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 781c1053a773..72dd23ef771b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 44f9a7d6450b..f82b3d3c36e2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -20,7 +20,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include -#include +#include #include #include #include @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 799a91882a76..8992b04c0ade 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -8,7 +8,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include -#include +#include #include #include #include diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index ec31b48a42a5..ebe748a9f472 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 84111a43ea29..eba2def3414d 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 873f6ee1c46d..76d0708438e9 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 04638b0b9ef1..0472e27febdf 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -4,14 +4,13 @@ * Author(s): Heiko Carstens */ -#include +#include #include #include #include #include #include #include -#include #include #include #include diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 5a381fc8e958..bfba273c32c0 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 297f5d8b0890..ae0d9e889534 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c index 7f61cc3fd4d1..71a608cd4f61 100644 --- a/arch/s390/numa/toptree.c +++ b/arch/s390/numa/toptree.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 21447f866415..c8c13c777162 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -11,12 +11,11 @@ #include #include #include -#include +#include #include #include #include #include -#include #include #include #include diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c index 927a1294c465..07e744d75fa0 100644 --- a/arch/sh/mm/ioremap_fixed.c +++ b/arch/sh/mm/ioremap_fixed.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index a41526bd91e2..9a26b442f820 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -5,13 +5,11 @@ */ #include #include -#include #include #include #include #include #include -#include #include #include #include diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index 4389944735c6..d41e2a749c5d 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index de7c87b153fe..cd2825cb8420 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 6cc80d0f4b9f..4792e08ad36b 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 880714565c40..d900952bfc5f 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a8c3453195e6..3c8aac21f426 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index b48fea5ad9ef..a6142c5abf61 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index ef19a391214f..673816880cce 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -6,7 +6,7 @@ * Licensed under the GPL. */ -#include +#include #include #include #include diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 20442d20bd09..2b4dded11a7a 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c index 844056cf313e..3678f5b05e42 100644 --- a/arch/um/kernel/initrd.c +++ b/arch/um/kernel/initrd.c @@ -4,7 +4,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 2c672a8f4571..1067469ba2ea 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index 296a91a04598..5bf56af4d5b9 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c index 9969ec374abb..29b71c68eb7c 100644 --- a/arch/unicore32/kernel/hibernate.c +++ b/arch/unicore32/kernel/hibernate.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c index 9f163f976315..b2c38b32ea57 100644 --- a/arch/unicore32/kernel/setup.c +++ b/arch/unicore32/kernel/setup.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -27,7 +27,6 @@ #include #include #include -#include #include #include diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index 44fd0e8fbe87..cf4eb9481fd6 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -11,13 +11,12 @@ #include #include #include -#include +#include #include #include #include #include #include -#include #include #include #include diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c index 18b355a20f0b..040a8c279761 100644 --- a/arch/unicore32/mm/mmu.c +++ b/arch/unicore32/mm/mmu.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index f5ea6415b778..7f5d212551d4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index f1915b744052..ca13851f0570 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -7,7 +7,6 @@ */ #include -#include #include #include #include diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ab731ab09f06..32b2b7a41ef5 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 8c7450900e0e..5fbc57e4b0b9 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -47,7 +47,7 @@ #include #include /* time_after() */ #include -#include +#include #include #include diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 660d0b22e962..cbbd57ae06ee 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1,7 +1,7 @@ /* cpu_feature_enabled() cannot be used this early */ #define USE_EARLY_PGTABLE_L5 -#include +#include #include #include #include diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index a0ec4c37265a..68ff62bffbab 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -9,11 +9,10 @@ * allocation code routines via a platform independent interface (memblock, etc.). */ #include -#include +#include #include #include #include -#include #include #include diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index f1c5eb99d445..3482460d984d 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 7ba73fe0d917..f4562fcec681 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 71c0b01d93b1..bd08b9e1c9e2 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 637982efecd8..9b158b4716d2 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 7005f89bf3b2..b74e7bfed6ab 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 483412fb8a24..e8796fcd7e5a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5369d7fac797..a9134d1910b9 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -49,7 +49,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c index 75730ce01f8d..285aaa62d153 100644 --- a/arch/x86/kernel/tce_64.c +++ b/arch/x86/kernel/tce_64.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c index 048c761d97b0..058b2f36b3a6 100644 --- a/arch/x86/mm/amdtopology.c +++ b/arch/x86/mm/amdtopology.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b24eb4eb9984..71d4b9d4d43f 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -8,7 +8,7 @@ #include /* task_stack_*(), ... */ #include /* oops_begin/end, ... */ #include /* search_exception_tables */ -#include /* max_low_pfn */ +#include /* max_low_pfn */ #include /* NOKPROBE_SYMBOL, ... */ #include /* kmmio_handler, ... */ #include /* perf_sw_event */ diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 62915a5e0fa2..0d4bdcb84da5 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -1,7 +1,7 @@ #include #include #include /* for totalram_pages */ -#include +#include void *kmap(struct page *page) { diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index faca978ebf9d..ef99f3892e1f 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -3,7 +3,6 @@ #include #include #include -#include /* for max_low_pfn */ #include #include diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 3bbe5f58a67d..49ecf5ecf6d3 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index bfb0bedc21d3..5fab264948c2 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 24e0920a9b25..5378d10f1d31 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -6,7 +6,7 @@ * (C) Copyright 1995 1996 Linus Torvalds */ -#include +#include #include #include #include diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 8f87499124b8..04a9cf6b034f 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -5,10 +5,9 @@ /* cpu_feature_enabled() cannot be used this early */ #define USE_EARLY_PGTABLE_L5 -#include +#include #include #include -#include #include #include #include diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 61db77b0eda9..3f452ffed7e9 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 16e37d712ffd..1308f5408bf7 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index e8a4a09e20f1..f2bd3d61e16b 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -22,7 +22,6 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include #include diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 066f3511d5f1..59d80160fa5a 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -3,7 +3,7 @@ * Generic VM initialization for x86-64 NUMA setups. * Copyright 2002,2003 Andi Kleen, SuSE Labs. */ -#include +#include #include "numa_internal.h" diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index b54d52a2d00a..a80fdd7fb40f 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include "numa_internal.h" diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index a25588ad75ef..08f8f76a4852 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -5,7 +5,7 @@ * Clears the a test pte bit on random pages in the direct mapping, * then reverts and compares page tables forwards and afterwards. */ -#include +#include #include #include #include diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 62bb30b4bd2a..f799076e3d57 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -3,7 +3,7 @@ * Thanks to Ben LaHaise for precious feedback. */ #include -#include +#include #include #include #include diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 3d0c83ef6aab..08013524fba1 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c index 7f9acb68324c..bdc98150d4db 100644 --- a/arch/x86/mm/physaddr.c +++ b/arch/x86/mm/physaddr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#include +#include #include #include #include diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index ed4ac215305d..8cd66152cdb0 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 9061babfbc83..7ae939e353cd 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -36,9 +36,8 @@ #include #include #include -#include -#include #include +#include #include #include #include diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index e8da7f492970..cf0347f61b21 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 4b70d0f5a803..95e77a667ba5 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index 140cd76ee897..115c8e4173bb 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -17,7 +17,7 @@ */ #include -#include +#include #include #include #include diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 15695e30f982..be15bdcb20df 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 67b2f31a1265..e996e8e744cb 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG -#include +#include #endif #include #include diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index ec7a4209f310..2f6787fc7106 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -31,7 +31,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index b3e11afed25b..b06731705529 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -67,7 +67,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 21f13e9aabe1..5ca440a74316 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 9220dcde7520..b27359e2a464 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index f7fbe6334939..9750a48f491b 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include #include diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index 1a30a258ccd0..6b95ca43aec0 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -8,11 +8,10 @@ * Copyright (C) 2017 Cadence Design Systems Inc. */ -#include +#include #include #include #include -#include #include #include #include diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index f33a1ff2662a..a4dcfd39bc5c 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -4,7 +4,7 @@ * * Extracted from init.c */ -#include +#include #include #include #include diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 206b9d4591e8..190846dddc67 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c index 58709e89a8ed..c14cc673976c 100644 --- a/arch/xtensa/platforms/iss/setup.c +++ b/arch/xtensa/platforms/iss/setup.c @@ -16,7 +16,7 @@ * option) any later version. * */ -#include +#include #include #include #include diff --git a/block/blk-settings.c b/block/blk-settings.c index ffd459969689..696c04c1ab6c 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -6,7 +6,7 @@ #include #include #include -#include /* for max_pfn/max_low_pfn */ +#include /* for max_pfn/max_low_pfn */ #include #include #include diff --git a/block/bounce.c b/block/bounce.c index ec0d99995f5f..cf49fe02f65c 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 85167603b9c9..274699463b4f 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index a3d012b08fc5..61203eebf3a1 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -31,9 +31,8 @@ #include #include #include -#include -#include #include +#include #include #include "internal.h" diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 23cf4427f425..41b91af95afb 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 5c54d3734daf..5b2867a33b98 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include "clock.h" diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index f2483548cde9..099d83e4e910 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index 2b675f788b61..ac1654f74dc7 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -20,7 +20,7 @@ #define pr_fmt(fmt) "apple-properties: " fmt -#include +#include #include #include #include diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 2224f1dc074b..72d9ea18270b 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -18,7 +18,7 @@ * GNU General Public License for more details. */ -#include +#include #include #include #include diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 03cead6d5f97..2a23453f005a 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index f9f69f7111a9..44bd5b9166bb 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -#include +#include #include #include #include diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 676c029494e4..0e780848f59b 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -13,7 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -#include +#include #include #include #include diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 0069f9084f9f..880a81c82b7a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -38,7 +38,6 @@ #include #include #include -#include #include #include diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c index fc15ec58230a..0d33cf0842ad 100644 --- a/drivers/mtd/ar7part.c +++ b/drivers/mtd/ar7part.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index a07e24970be4..11c5bad95226 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index 38fa60ddaf2e..28510e33924f 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include "arcdevice.h" diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 4e56aaf2b984..2c546013a980 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ffe62a7ae19b..bb532aae0d92 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 01e23b85e798..49ae2aa744d6 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -5,7 +5,7 @@ #define pr_fmt(fmt) "### dt-test ### " fmt -#include +#include #include #include #include diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 16a4e8528bbc..8f3a2eeb28dc 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -8,7 +8,7 @@ * Copyright IBM Corp. 2003, 2009 */ -#include +#include #include #include #include diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 5b8af2782282..2b0c36c2c568 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 8af4948dae80..72dd2471ec1e 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -13,7 +13,7 @@ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include +#include #include #include #include diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 8f5c1d7f751a..97b6f197f007 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c index 153b3f3cc795..a5136901dd8a 100644 --- a/drivers/sfi/sfi_core.c +++ b/drivers/sfi/sfi_core.c @@ -59,7 +59,7 @@ #define KMSG_COMPONENT "SFI" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include +#include #include #include #include diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 79ad30d34949..b929c7ae3a27 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c index 4eba17f3d293..56fc527015cb 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c index e3bff068dc3c..6a1cd03bfe39 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index ddc5fa88f268..d2652dccc699 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index e12bb256036f..a3f5cbfcd4a1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -44,7 +44,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index e6c1934734b7..93194f3e7540 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 84575baceebc..f15f89df1f36 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -33,7 +33,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt -#include +#include #include #include #include diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index c5f26a87d238..2a7f545bd0b5 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -35,7 +35,6 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt -#include #include #include #include diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 55988b8418ee..5165aa82bf7d 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -68,7 +68,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include +#include #include #include #include diff --git a/fs/dcache.c b/fs/dcache.c index c2e443fb76ae..2593153471cf 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/inode.c b/fs/inode.c index 9b808986d440..9e198f00b64c 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/namespace.c b/fs/namespace.c index d86830c86ce8..98d27da43304 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index d297fe4472a9..bbcc185062bb 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/fs/proc/page.c b/fs/proc/page.c index 792c78a49174..6c517b11acf8 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#include +#include #include #include #include diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 0377a104495b..3fe90443c1bb 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h deleted file mode 100644 index b58873a567b2..000000000000 --- a/include/linux/bootmem.h +++ /dev/null @@ -1,173 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 - */ -#ifndef _LINUX_BOOTMEM_H -#define _LINUX_BOOTMEM_H - -#include -#include -#include -#include - -/* - * simple boot-time physical memory area allocator. - */ - -extern unsigned long max_low_pfn; -extern unsigned long min_low_pfn; - -/* - * highest page - */ -extern unsigned long max_pfn; -/* - * highest possible page - */ -extern unsigned long long max_possible_pfn; - -extern unsigned long memblock_free_all(void); -extern void reset_node_managed_pages(pg_data_t *pgdat); -extern void reset_all_zones_managed_pages(void); - -/* We are using top down, so it is safe to use 0 here */ -#define BOOTMEM_LOW_LIMIT 0 - -#ifndef ARCH_LOW_ADDRESS_LIMIT -#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL -#endif - -/* FIXME: use MEMBLOCK_ALLOC_* variants here */ -#define BOOTMEM_ALLOC_ACCESSIBLE 0 -#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0) - -/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */ -void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, - phys_addr_t max_addr, int nid); -void *memblock_alloc_try_nid_nopanic(phys_addr_t size, - phys_addr_t align, phys_addr_t min_addr, - phys_addr_t max_addr, int nid); -void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, - phys_addr_t min_addr, phys_addr_t max_addr, int nid); -void __memblock_free_early(phys_addr_t base, phys_addr_t size); -void __memblock_free_late(phys_addr_t base, phys_addr_t size); - -static inline void * __init memblock_alloc( - phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_alloc_raw( - phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_alloc_from( - phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) -{ - return memblock_alloc_try_nid(size, align, min_addr, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_alloc_nopanic( - phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_try_nid_nopanic(size, align, - BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_alloc_low( - phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_try_nid(size, align, - BOOTMEM_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, - NUMA_NO_NODE); -} -static inline void * __init memblock_alloc_low_nopanic( - phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_try_nid_nopanic(size, align, - BOOTMEM_LOW_LIMIT, - ARCH_LOW_ADDRESS_LIMIT, - NUMA_NO_NODE); -} - -static inline void * __init memblock_alloc_from_nopanic( - phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) -{ - return memblock_alloc_try_nid_nopanic(size, align, min_addr, - BOOTMEM_ALLOC_ACCESSIBLE, - NUMA_NO_NODE); -} - -static inline void * __init memblock_alloc_node( - phys_addr_t size, phys_addr_t align, int nid) -{ - return memblock_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, nid); -} - -static inline void * __init memblock_alloc_node_nopanic( - phys_addr_t size, int nid) -{ - return memblock_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT, - BOOTMEM_ALLOC_ACCESSIBLE, - nid); -} - -static inline void __init memblock_free_early( - phys_addr_t base, phys_addr_t size) -{ - __memblock_free_early(base, size); -} - -static inline void __init memblock_free_early_nid( - phys_addr_t base, phys_addr_t size, int nid) -{ - __memblock_free_early(base, size); -} - -static inline void __init memblock_free_late( - phys_addr_t base, phys_addr_t size) -{ - __memblock_free_late(base, size); -} - -extern void *alloc_large_system_hash(const char *tablename, - unsigned long bucketsize, - unsigned long numentries, - int scale, - int flags, - unsigned int *_hash_shift, - unsigned int *_hash_mask, - unsigned long low_limit, - unsigned long high_limit); - -#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ -#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min - * shift passed via *_hash_shift */ -#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ - -/* Only NUMA needs hash distribution. 64bit NUMA architectures have - * sufficient vmalloc space. - */ -#ifdef CONFIG_NUMA -#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) -extern int hashdist; /* Distribute hashes across NUMA nodes? */ -#else -#define hashdist (0) -#endif - - -#endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 9d46a7204975..1b4d85879cbe 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -15,6 +15,19 @@ #include #include +#include + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; + +/* + * highest page + */ +extern unsigned long max_pfn; +/* + * highest possible page + */ +extern unsigned long long max_possible_pfn; #define INIT_MEMBLOCK_REGIONS 128 #define INIT_PHYSMEM_REGIONS 4 @@ -119,6 +132,10 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); enum memblock_flags choose_memblock_flags(void); +unsigned long memblock_free_all(void); +void reset_node_managed_pages(pg_data_t *pgdat); +void reset_all_zones_managed_pages(void); + /* Low level functions */ int memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, @@ -300,11 +317,116 @@ static inline int memblock_get_region_node(const struct memblock_region *r) } #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +/* Flags for memblock allocation APIs */ +#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 + +/* We are using top down, so it is safe to use 0 here */ +#define MEMBLOCK_LOW_LIMIT 0 + +#ifndef ARCH_LOW_ADDRESS_LIMIT +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL +#endif + phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align); +void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); +void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); +void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, + phys_addr_t min_addr, phys_addr_t max_addr, + int nid); + +static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_raw(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_from(phys_addr_t size, + phys_addr_t align, + phys_addr_t min_addr) +{ + return memblock_alloc_try_nid(size, align, min_addr, + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_nopanic(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_low(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); +} +static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size, + phys_addr_t align) +{ + return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, + ARCH_LOW_ADDRESS_LIMIT, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size, + phys_addr_t align, + phys_addr_t min_addr) +{ + return memblock_alloc_try_nid_nopanic(size, align, min_addr, + MEMBLOCK_ALLOC_ACCESSIBLE, + NUMA_NO_NODE); +} + +static inline void * __init memblock_alloc_node(phys_addr_t size, + phys_addr_t align, int nid) +{ + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, + int nid) +{ + return memblock_alloc_try_nid_nopanic(size, 0, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static inline void __init memblock_free_early(phys_addr_t base, + phys_addr_t size) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_early_nid(phys_addr_t base, + phys_addr_t size, int nid) +{ + __memblock_free_early(base, size); +} + +static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) +{ + __memblock_free_late(base, size); +} + /* * Set the allocation direction to bottom-up or top-down. */ @@ -323,10 +445,6 @@ static inline bool memblock_bottom_up(void) return memblock.bottom_up; } -/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ -#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) -#define MEMBLOCK_ALLOC_ACCESSIBLE 0 - phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, enum memblock_flags flags); @@ -432,6 +550,31 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo i < memblock_type->cnt; \ i++, rgn = &memblock_type->regions[i]) +extern void *alloc_large_system_hash(const char *tablename, + unsigned long bucketsize, + unsigned long numentries, + int scale, + int flags, + unsigned int *_hash_shift, + unsigned int *_hash_mask, + unsigned long low_limit, + unsigned long high_limit); + +#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min + * shift passed via *_hash_shift */ +#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ + +/* Only NUMA needs hash distribution. 64bit NUMA architectures have + * sufficient vmalloc space. + */ +#ifdef CONFIG_NUMA +#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) +extern int hashdist; /* Distribute hashes across NUMA nodes? */ +#else +#define hashdist (0) +#endif + #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); #else diff --git a/init/main.c b/init/main.c index 8cef69c61389..51b8e7b8ae5b 100644 --- a/init/main.c +++ b/init/main.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f14c376937e5..22a12ab5a5e9 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -4,7 +4,7 @@ * * DMA operations that map physical memory directly without using an IOMMU. */ -#include /* for max_pfn */ +#include /* for max_pfn */ #include #include #include diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 801da67e957b..5731daa09a32 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -40,7 +40,7 @@ #include #include -#include +#include #include #define CREATE_TRACE_POINTS diff --git a/kernel/futex.c b/kernel/futex.c index 3e2de8fc1891..f423f9b6577e 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -65,7 +65,7 @@ #include #include #include -#include +#include #include #include diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 0130e488ebfe..8f36c27c1794 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -4,7 +4,7 @@ #endif #include -#include +#include #include /* diff --git a/kernel/pid.c b/kernel/pid.c index cdf63e53a014..b2f6c506035d 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 34116a6097be..3c9e365438ad 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 429e4a3833ca..1b2a029360b7 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/profile.c b/kernel/profile.c index 9aa2a4445b0d..9c08a2c7cb1d 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -16,7 +16,7 @@ #include #include -#include +#include #include #include #include diff --git a/lib/cpumask.c b/lib/cpumask.c index 1405cb22e6bc..75b5e7672c4c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -4,7 +4,7 @@ #include #include #include -#include +#include /** * cpumask_next - get the next cpu in a cpumask diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e35d99844612..c007fb5fb8d5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c index 785a9707786b..c7550eb65922 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/kasan_init.c @@ -10,11 +10,10 @@ * */ -#include +#include #include #include #include -#include #include #include #include diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 4f7e4b5a2f08..877de4fa0720 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -92,7 +92,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/mm/memblock.c b/mm/memblock.c index 2ed73245b5da..c655342569f8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 7e6509a53d79..41e326472ef9 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5f3291601945..ef289fadec0e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/page_ext.c b/mm/page_ext.c index 5323c2ade686..ae44f7adbe07 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include +#include #include #include #include diff --git a/mm/page_idle.c b/mm/page_idle.c index 6302bc62c27d..b9e4b42b33ab 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include +#include #include #include #include diff --git a/mm/page_owner.c b/mm/page_owner.c index d80adfe702d3..87bc0dfdb52b 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/mm/percpu.c b/mm/percpu.c index 3050c1d37d37..61cdbb3b3736 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -65,7 +65,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include +#include #include #include #include diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 7408cabed61a..7fec05796796 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -20,7 +20,6 @@ */ #include #include -#include #include #include #include diff --git a/mm/sparse.c b/mm/sparse.c index b139fbc61d10..ab2ac45e0440 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index f5c9ef2586de..411dd7a90046 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1834818ed07b..9e6bc4d6daa7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -262,7 +262,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ca3ed931f2a9..1976fddb9e00 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -81,7 +81,7 @@ #include #include -#include +#include #include #include #include diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e948db29ab53..9b277bd36d1a 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c index 2ad33ce1ea17..eca8d84d99bf 100644 --- a/net/xfrm/xfrm_hash.c +++ b/net/xfrm/xfrm_hash.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include #include -- cgit v1.2.3 From 7e1c4e27928e5f87b9b1eaf06dc31773b2f1e7f1 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 30 Oct 2018 15:09:57 -0700 Subject: memblock: stop using implicit alignment to SMP_CACHE_BYTES When a memblock allocation APIs are called with align = 0, the alignment is implicitly set to SMP_CACHE_BYTES. Implicit alignment is done deep in the memblock allocator and it can come as a surprise. Not that such an alignment would be wrong even when used incorrectly but it is better to be explicit for the sake of clarity and the prinicple of the least surprise. Replace all such uses of memblock APIs with the 'align' parameter explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment in the memblock internal allocation functions. For the case when memblock APIs are used via helper functions, e.g. like iommu_arena_new_node() in Alpha, the helper functions were detected with Coccinelle's help and then manually examined and updated where appropriate. The direct memblock APIs users were updated using the semantic patch below: @@ expression size, min_addr, max_addr, nid; @@ ( | - memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid) + memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr, nid) | - memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid) + memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr, nid) | - memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid) + memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid) | - memblock_alloc(size, 0) + memblock_alloc(size, SMP_CACHE_BYTES) | - memblock_alloc_raw(size, 0) + memblock_alloc_raw(size, SMP_CACHE_BYTES) | - memblock_alloc_from(size, 0, min_addr) + memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr) | - memblock_alloc_nopanic(size, 0) + memblock_alloc_nopanic(size, SMP_CACHE_BYTES) | - memblock_alloc_low(size, 0) + memblock_alloc_low(size, SMP_CACHE_BYTES) | - memblock_alloc_low_nopanic(size, 0) + memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES) | - memblock_alloc_from_nopanic(size, 0, min_addr) + memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr) | - memblock_alloc_node(size, 0, nid) + memblock_alloc_node(size, SMP_CACHE_BYTES, nid) ) [mhocko@suse.com: changelog update] [akpm@linux-foundation.org: coding-style fixes] [rppt@linux.ibm.com: fix missed uses of implicit alignment] Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport Suggested-by: Michal Hocko Acked-by: Paul Burton [MIPS] Acked-by: Michael Ellerman [powerpc] Acked-by: Michal Hocko Cc: Catalin Marinas Cc: Chris Zankel Cc: Geert Uytterhoeven Cc: Guan Xuetao Cc: Ingo Molnar Cc: Matt Turner Cc: Michal Simek Cc: Richard Weinberger Cc: Russell King Cc: Thomas Gleixner Cc: Tony Luck Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/core_apecs.c | 3 ++- arch/alpha/kernel/core_lca.c | 3 ++- arch/alpha/kernel/core_marvel.c | 4 ++-- arch/alpha/kernel/core_mcpcia.c | 6 +++-- arch/alpha/kernel/core_t2.c | 2 +- arch/alpha/kernel/core_titan.c | 6 +++-- arch/alpha/kernel/core_tsunami.c | 6 +++-- arch/alpha/kernel/core_wildfire.c | 6 +++-- arch/alpha/kernel/pci-noop.c | 4 ++-- arch/alpha/kernel/pci.c | 4 ++-- arch/alpha/kernel/pci_iommu.c | 4 ++-- arch/arm/kernel/setup.c | 4 ++-- arch/arm/mach-omap2/omap_hwmod.c | 8 ++++--- arch/arm64/kernel/setup.c | 2 +- arch/ia64/kernel/mca.c | 4 ++-- arch/ia64/mm/tlb.c | 6 +++-- arch/ia64/sn/kernel/io_common.c | 4 +++- arch/ia64/sn/kernel/setup.c | 5 ++-- arch/m68k/sun3/sun3dvma.c | 2 +- arch/microblaze/mm/init.c | 2 +- arch/mips/kernel/setup.c | 2 +- arch/powerpc/kernel/paca.c | 2 +- arch/powerpc/kernel/pci_32.c | 3 ++- arch/powerpc/lib/alloc.c | 2 +- arch/powerpc/mm/mmu_context_nohash.c | 7 +++--- arch/powerpc/platforms/powermac/nvram.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 6 ++--- arch/powerpc/sysdev/msi_bitmap.c | 2 +- arch/um/drivers/net_kern.c | 2 +- arch/um/drivers/vector_kern.c | 2 +- arch/um/kernel/initrd.c | 2 +- arch/unicore32/kernel/setup.c | 2 +- arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/apic/io_apic.c | 2 +- arch/x86/kernel/e820.c | 3 ++- arch/x86/platform/olpc/olpc_dt.c | 2 +- arch/xtensa/platforms/iss/network.c | 2 +- drivers/clk/ti/clk.c | 2 +- drivers/firmware/efi/memmap.c | 2 +- drivers/firmware/memmap.c | 3 ++- drivers/macintosh/smu.c | 2 +- drivers/of/of_reserved_mem.c | 1 + include/linux/memblock.h | 3 ++- init/main.c | 13 +++++++---- kernel/power/snapshot.c | 3 ++- lib/cpumask.c | 2 +- mm/memblock.c | 8 ------- mm/page_alloc.c | 6 +++-- mm/percpu.c | 38 ++++++++++++++++--------------- mm/sparse.c | 3 ++- 50 files changed, 120 insertions(+), 96 deletions(-) (limited to 'kernel') diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c index 1bf3eef34c22..6df765ff2b10 100644 --- a/arch/alpha/kernel/core_apecs.c +++ b/arch/alpha/kernel/core_apecs.c @@ -346,7 +346,8 @@ apecs_init_arch(void) * Window 1 is direct access 1GB at 1GB * Window 2 is scatter-gather 8MB at 8MB (for isa) */ - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, + SMP_CACHE_BYTES); hose->sg_pci = NULL; __direct_map_base = 0x40000000; __direct_map_size = 0x40000000; diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c index 81c0c43635b0..57e0750419f2 100644 --- a/arch/alpha/kernel/core_lca.c +++ b/arch/alpha/kernel/core_lca.c @@ -275,7 +275,8 @@ lca_init_arch(void) * Note that we do not try to save any of the DMA window CSRs * before setting them, since we cannot read those CSRs on LCA. */ - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, + SMP_CACHE_BYTES); hose->sg_pci = NULL; __direct_map_base = 0x40000000; __direct_map_size = 0x40000000; diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 8a568c4d8e81..c1d0c18c71ca 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -82,7 +82,7 @@ mk_resource_name(int pe, int port, char *str) char *name; sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); - name = memblock_alloc(strlen(tmp) + 1, 0); + name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES); strcpy(name, tmp); return name; @@ -117,7 +117,7 @@ alloc_io7(unsigned int pe) return NULL; } - io7 = memblock_alloc(sizeof(*io7), 0); + io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES); io7->pe = pe; raw_spin_lock_init(&io7->irq_lock); diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c index b1549db54260..74b1d018124c 100644 --- a/arch/alpha/kernel/core_mcpcia.c +++ b/arch/alpha/kernel/core_mcpcia.c @@ -364,9 +364,11 @@ mcpcia_startup_hose(struct pci_controller *hose) * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci) * Window 2 is direct access 2GB at 2GB */ - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, + SMP_CACHE_BYTES); hose->sg_pci = iommu_arena_new(hose, 0x40000000, - size_for_memory(0x40000000), 0); + size_for_memory(0x40000000), + SMP_CACHE_BYTES); __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c index 2c00b61ca379..98d5b6ff8a76 100644 --- a/arch/alpha/kernel/core_t2.c +++ b/arch/alpha/kernel/core_t2.c @@ -351,7 +351,7 @@ t2_sg_map_window2(struct pci_controller *hose, /* Note we can only do 1 SG window, as the other is for direct, so do an ISA SG area, especially for the floppy. */ - hose->sg_isa = iommu_arena_new(hose, base, length, 0); + hose->sg_isa = iommu_arena_new(hose, base, length, SMP_CACHE_BYTES); hose->sg_pci = NULL; temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20); diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c index 97551597581b..2a2820fb1be6 100644 --- a/arch/alpha/kernel/core_titan.c +++ b/arch/alpha/kernel/core_titan.c @@ -316,10 +316,12 @@ titan_init_one_pachip_port(titan_pachip_port *port, int index) * Window 1 is direct access 1GB at 2GB * Window 2 is scatter-gather 1GB at 3GB */ - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, + SMP_CACHE_BYTES); hose->sg_isa->align_entry = 8; /* 64KB for ISA */ - hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0); + hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, + SMP_CACHE_BYTES); hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */ port->wsba[0].csr = hose->sg_isa->dma_base | 3; diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c index f334b8928d72..fc1ab73f23de 100644 --- a/arch/alpha/kernel/core_tsunami.c +++ b/arch/alpha/kernel/core_tsunami.c @@ -319,12 +319,14 @@ tsunami_init_one_pchip(tsunami_pchip *pchip, int index) * NOTE: we need the align_entry settings for Acer devices on ES40, * specifically floppy and IDE when memory is larger than 2GB. */ - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, + SMP_CACHE_BYTES); /* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */ hose->sg_isa->align_entry = 4; hose->sg_pci = iommu_arena_new(hose, 0x40000000, - size_for_memory(0x40000000), 0); + size_for_memory(0x40000000), + SMP_CACHE_BYTES); hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */ __direct_map_base = 0x80000000; diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c index cad36fc6ed7d..353c03d15442 100644 --- a/arch/alpha/kernel/core_wildfire.c +++ b/arch/alpha/kernel/core_wildfire.c @@ -111,8 +111,10 @@ wildfire_init_hose(int qbbno, int hoseno) * ??? We ought to scale window 3 memory. * */ - hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); - hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0); + hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, + SMP_CACHE_BYTES); + hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, + SMP_CACHE_BYTES); pci = WILDFIRE_pci(qbbno, hoseno); diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index a9378ee0c2f1..091cff3c68fd 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -33,7 +33,7 @@ alloc_pci_controller(void) { struct pci_controller *hose; - hose = memblock_alloc(sizeof(*hose), 0); + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); *hose_tail = hose; hose_tail = &hose->next; @@ -44,7 +44,7 @@ alloc_pci_controller(void) struct resource * __init alloc_resource(void) { - return memblock_alloc(sizeof(struct resource), 0); + return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); } SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index 13937e72d875..97098127df83 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -392,7 +392,7 @@ alloc_pci_controller(void) { struct pci_controller *hose; - hose = memblock_alloc(sizeof(*hose), 0); + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); *hose_tail = hose; hose_tail = &hose->next; @@ -403,7 +403,7 @@ alloc_pci_controller(void) struct resource * __init alloc_resource(void) { - return memblock_alloc(sizeof(struct resource), 0); + return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); } diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 82cf950bda2a..46e08e0d9181 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -79,7 +79,7 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, printk("%s: couldn't allocate arena from node %d\n" " falling back to system-wide allocation\n", __func__, nid); - arena = memblock_alloc(sizeof(*arena), 0); + arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); } arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); @@ -92,7 +92,7 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, #else /* CONFIG_DISCONTIGMEM */ - arena = memblock_alloc(sizeof(*arena), 0); + arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); arena->ptes = memblock_alloc_from(mem_size, align, 0); #endif /* CONFIG_DISCONTIGMEM */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 840a4adc69fc..ac7e08886863 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -856,7 +856,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) */ boot_alias_start = phys_to_idmap(start); if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { - res = memblock_alloc(sizeof(*res), 0); + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; res->end = phys_to_idmap(end); @@ -864,7 +864,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) request_resource(&iomem_resource, res); } - res = memblock_alloc(sizeof(*res), 0); + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); res->name = "System RAM"; res->start = start; res->end = end; diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index cd5732ab0cdf..083dcd9942ce 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -726,7 +726,7 @@ static int __init _setup_clkctrl_provider(struct device_node *np) u64 size; int i; - provider = memblock_alloc(sizeof(*provider), 0); + provider = memblock_alloc(sizeof(*provider), SMP_CACHE_BYTES); if (!provider) return -ENOMEM; @@ -736,12 +736,14 @@ static int __init _setup_clkctrl_provider(struct device_node *np) of_property_count_elems_of_size(np, "reg", sizeof(u32)) / 2; provider->addr = - memblock_alloc(sizeof(void *) * provider->num_addrs, 0); + memblock_alloc(sizeof(void *) * provider->num_addrs, + SMP_CACHE_BYTES); if (!provider->addr) return -ENOMEM; provider->size = - memblock_alloc(sizeof(u32) * provider->num_addrs, 0); + memblock_alloc(sizeof(u32) * provider->num_addrs, + SMP_CACHE_BYTES); if (!provider->size) return -ENOMEM; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7ce7306f1d75..953e316521fc 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -218,7 +218,7 @@ static void __init request_standard_resources(void) num_standard_resources = memblock.memory.cnt; standard_resources = memblock_alloc_low(num_standard_resources * sizeof(*standard_resources), - 0); + SMP_CACHE_BYTES); for_each_memblock(memory, region) { res = &standard_resources[i++]; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 9a6603f8e409..91bd1e129379 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -361,9 +361,9 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; #define IA64_LOG_ALLOCATE(it, size) \ {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ - (ia64_err_rec_t *)memblock_alloc(size, 0); \ + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ - (ia64_err_rec_t *)memblock_alloc(size, 0);} + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);} #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index ab545daff7c3..9340bcb4f29c 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -59,8 +59,10 @@ struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; void __init mmu_context_init (void) { - ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 0); - ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 0); + ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, + SMP_CACHE_BYTES); + ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, + SMP_CACHE_BYTES); } /* diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 98f55220c67d..8df13d0d96fa 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -391,7 +391,9 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node) if (node >= num_online_nodes()) /* Headless/memless IO nodes */ node = 0; - hubdev_info = (struct hubdev_info *)memblock_alloc_node(size, 0, node); + hubdev_info = (struct hubdev_info *)memblock_alloc_node(size, + SMP_CACHE_BYTES, + node); npda->pdinfo = (void *)hubdev_info; } diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 71ad6b0ccab4..a6d40a2c5bff 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -511,7 +511,8 @@ static void __init sn_init_pdas(char **cmdline_p) */ for_each_online_node(cnode) { nodepdaindr[cnode] = - memblock_alloc_node(sizeof(nodepda_t), 0, cnode); + memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, + cnode); memset(nodepdaindr[cnode]->phys_cpuid, -1, sizeof(nodepdaindr[cnode]->phys_cpuid)); spin_lock_init(&nodepdaindr[cnode]->ptc_lock); @@ -522,7 +523,7 @@ static void __init sn_init_pdas(char **cmdline_p) */ for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) nodepdaindr[cnode] = - memblock_alloc_node(sizeof(nodepda_t), 0, 0); + memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0); /* * Now copy the array of nodepda pointers to each nodepda. diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index 8be8b750c629..4d64711d3d47 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c @@ -268,7 +268,7 @@ void __init dvma_init(void) list_add(&(hole->list), &hole_list); iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), - 0); + SMP_CACHE_BYTES); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 8c14988f52f2..b17fd8aafd64 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -376,7 +376,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) if (mem_init_done) p = kzalloc(size, mask); else { - p = memblock_alloc(size, 0); + p = memblock_alloc(size, SMP_CACHE_BYTES); if (p) memset(p, 0, size); } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 41c1683761bb..ea09ed6a80a9 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -916,7 +916,7 @@ static void __init resource_init(void) if (end >= HIGHMEM_START) end = HIGHMEM_START - 1; - res = memblock_alloc(sizeof(struct resource), 0); + res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); res->start = start; res->end = end; diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f331a0054b3a..913bfca09c4f 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -198,7 +198,7 @@ void __init allocate_paca_ptrs(void) paca_nr_cpu_ids = nr_cpu_ids; paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; - paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, 0)); + paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES)); memset(paca_ptrs, 0x88, paca_ptrs_size); } diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 274bd1442dd9..d3f04f2d8249 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -203,7 +203,8 @@ pci_create_OF_bus_map(void) struct property* of_prop; struct device_node *dn; - of_prop = memblock_alloc(sizeof(struct property) + 256, 0); + of_prop = memblock_alloc(sizeof(struct property) + 256, + SMP_CACHE_BYTES); dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index 5b61704447c1..dedf88a76f58 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -14,7 +14,7 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) if (slab_is_available()) p = kzalloc(size, mask); else { - p = memblock_alloc(size, 0); + p = memblock_alloc(size, SMP_CACHE_BYTES); } return p; } diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 67b9d7b669a1..2faca46ad720 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -461,10 +461,11 @@ void __init mmu_context_init(void) /* * Allocate the maps used by context management */ - context_map = memblock_alloc(CTX_MAP_SIZE, 0); - context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), 0); + context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); + context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), + SMP_CACHE_BYTES); #ifdef CONFIG_SMP - stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, 0); + stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, "powerpc/mmu/ctx:prepare", diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index f3391be7c762..ae54d7fe68f3 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -513,7 +513,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = memblock_alloc(NVRAM_SIZE, 0); + nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index aba81cbf0b36..dd807446801e 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3769,7 +3769,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb_id = be64_to_cpup(prop64); pr_debug(" PHB-ID : 0x%016llx\n", phb_id); - phb = memblock_alloc(sizeof(*phb), 0); + phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES); /* Allocate PCI controller */ phb->hose = hose = pcibios_alloc_controller(np); @@ -3815,7 +3815,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, else phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; - phb->diag_data = memblock_alloc(phb->diag_data_size, 0); + phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES); /* Parse 32-bit and IO ranges (if any) */ pci_process_bridge_OF_ranges(hose, np, !hose->global_number); @@ -3874,7 +3874,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, } pemap_off = size; size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); - aux = memblock_alloc(size, 0); + aux = memblock_alloc(size, SMP_CACHE_BYTES); phb->ioda.pe_alloc = aux; phb->ioda.m64_segmap = aux + m64map_off; phb->ioda.m32_segmap = aux + m32map_off; diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 2444feda831f..d45450f6666a 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -128,7 +128,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, if (bmp->bitmap_from_slab) bmp->bitmap = kzalloc(size, GFP_KERNEL); else { - bmp->bitmap = memblock_alloc(size, 0); + bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 673816880cce..624cb47cc9cd 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -650,7 +650,7 @@ static int __init eth_setup(char *str) return 1; } - new = memblock_alloc(sizeof(*new), 0); + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); INIT_LIST_HEAD(&new->list); new->index = n; diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 2b4dded11a7a..10d8d20eb9ec 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1580,7 +1580,7 @@ static int __init vector_setup(char *str) str, error); return 1; } - new = memblock_alloc(sizeof(*new), 0); + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); INIT_LIST_HEAD(&new->list); new->unit = n; new->arguments = str; diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c index 3678f5b05e42..ce169ea87e61 100644 --- a/arch/um/kernel/initrd.c +++ b/arch/um/kernel/initrd.c @@ -36,7 +36,7 @@ int __init read_initrd(void) return 0; } - area = memblock_alloc(size, 0); + area = memblock_alloc(size, SMP_CACHE_BYTES); if (load_initrd(initrd, area, size) == -1) return 0; diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c index b2c38b32ea57..4b0cb68c355a 100644 --- a/arch/unicore32/kernel/setup.c +++ b/arch/unicore32/kernel/setup.c @@ -206,7 +206,7 @@ request_standard_resources(struct meminfo *mi) if (mi->bank[i].size == 0) continue; - res = memblock_alloc_low(sizeof(*res), 0); + res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES); res->name = "System RAM"; res->start = mi->bank[i].start; res->end = mi->bank[i].start + mi->bank[i].size - 1; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7f5d212551d4..92c76bf97ad8 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -934,7 +934,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table) */ #define HPET_RESOURCE_NAME_SIZE 9 hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, - 0); + SMP_CACHE_BYTES); hpet_res->name = (void *)&hpet_res[1]; hpet_res->flags = IORESOURCE_MEM; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 5fbc57e4b0b9..2953bbf05c08 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2578,7 +2578,7 @@ static struct resource * __init ioapic_setup_resources(void) n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); n *= nr_ioapics; - mem = memblock_alloc(n, 0); + mem = memblock_alloc(n, SMP_CACHE_BYTES); res = (void *)mem; mem += sizeof(struct resource) * nr_ioapics; diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 68ff62bffbab..50895c2f937d 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1093,7 +1093,8 @@ void __init e820__reserve_resources(void) struct resource *res; u64 end; - res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, 0); + res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, + SMP_CACHE_BYTES); e820_res = res; for (i = 0; i < e820_table->nr_entries; i++) { diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index 115c8e4173bb..24d2175a9480 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -141,7 +141,7 @@ void * __init prom_early_alloc(unsigned long size) * fast enough on the platforms we care about while minimizing * wasted bootmem) and hand off chunks of it to callers. */ - res = memblock_alloc(chunk_size, 0); + res = memblock_alloc(chunk_size, SMP_CACHE_BYTES); BUG_ON(!res); prom_early_allocated += chunk_size; memset(res, 0, chunk_size); diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 190846dddc67..d052712373b6 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -646,7 +646,7 @@ static int __init iss_net_setup(char *str) return 1; } - new = memblock_alloc(sizeof(*new), 0); + new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); if (new == NULL) { pr_err("Alloc_bootmem failed\n"); return 1; diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 5b2867a33b98..e205af814582 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -342,7 +342,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) { struct clk_iomap *io; - io = memblock_alloc(sizeof(*io), 0); + io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES); io->mem = mem; diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index ef618bceb79a..fa2904fb841f 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -15,7 +15,7 @@ static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size) { - return memblock_phys_alloc(size, 0); + return memblock_phys_alloc(size, SMP_CACHE_BYTES); } static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 2a23453f005a..d168c87c7d30 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -333,7 +333,8 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; - entry = memblock_alloc(sizeof(struct firmware_map_entry), 0); + entry = memblock_alloc(sizeof(struct firmware_map_entry), + SMP_CACHE_BYTES); if (WARN_ON(!entry)) return -ENOMEM; diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 880a81c82b7a..0a0b8e1f4236 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -492,7 +492,7 @@ int __init smu_init (void) goto fail_np; } - smu = memblock_alloc(sizeof(struct smu_device), 0); + smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES); spin_lock_init(&smu->lock); INIT_LIST_HEAD(&smu->cmd_list); diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index d6255c276a41..1977ee0adcb1 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -36,6 +36,7 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, * panic()s on allocation failure. */ end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; + align = !align ? SMP_CACHE_BYTES : align; base = __memblock_alloc_base(size, align, end); if (!base) return -ENOMEM; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 1b4d85879cbe..aee299a6aa76 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -406,7 +406,8 @@ static inline void * __init memblock_alloc_node(phys_addr_t size, static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, int nid) { - return memblock_alloc_try_nid_nopanic(size, 0, MEMBLOCK_LOW_LIMIT, + return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, + MEMBLOCK_LOW_LIMIT, MEMBLOCK_ALLOC_ACCESSIBLE, nid); } diff --git a/init/main.c b/init/main.c index 51b8e7b8ae5b..ee147103ba1b 100644 --- a/init/main.c +++ b/init/main.c @@ -375,10 +375,11 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } static void __init setup_command_line(char *command_line) { saved_command_line = - memblock_alloc(strlen(boot_command_line) + 1, 0); + memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES); initcall_command_line = - memblock_alloc(strlen(boot_command_line) + 1, 0); - static_command_line = memblock_alloc(strlen(command_line) + 1, 0); + memblock_alloc(strlen(boot_command_line) + 1, SMP_CACHE_BYTES); + static_command_line = memblock_alloc(strlen(command_line) + 1, + SMP_CACHE_BYTES); strcpy(saved_command_line, boot_command_line); strcpy(static_command_line, command_line); } @@ -773,8 +774,10 @@ static int __init initcall_blacklist(char *str) str_entry = strsep(&str, ","); if (str_entry) { pr_debug("blacklisting initcall %s\n", str_entry); - entry = memblock_alloc(sizeof(*entry), 0); - entry->buf = memblock_alloc(strlen(str_entry) + 1, 0); + entry = memblock_alloc(sizeof(*entry), + SMP_CACHE_BYTES); + entry->buf = memblock_alloc(strlen(str_entry) + 1, + SMP_CACHE_BYTES); strcpy(entry->buf, str_entry); list_add(&entry->next, &blacklisted_initcalls); } diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 3c9e365438ad..b0308a2c6000 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -963,7 +963,8 @@ void __init __register_nosave_region(unsigned long start_pfn, BUG_ON(!region); } else { /* This allocation cannot fail */ - region = memblock_alloc(sizeof(struct nosave_region), 0); + region = memblock_alloc(sizeof(struct nosave_region), + SMP_CACHE_BYTES); } region->start_pfn = start_pfn; region->end_pfn = end_pfn; diff --git a/lib/cpumask.c b/lib/cpumask.c index 75b5e7672c4c..8d666ab84b5c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var); */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { - *mask = memblock_alloc(cpumask_size(), 0); + *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); } /** diff --git a/mm/memblock.c b/mm/memblock.c index c655342569f8..839531133816 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1247,9 +1247,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, { phys_addr_t found; - if (!align) - align = SMP_CACHE_BYTES; - found = memblock_find_in_range_node(size, align, start, end, nid, flags); if (found && !memblock_reserve(found, size)) { @@ -1343,8 +1340,6 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali * The allocation is performed from memory region limited by * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE. * - * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0. - * * The phys address of allocated boot memory block is converted to virtual and * allocated memory is reset to 0. * @@ -1374,9 +1369,6 @@ static void * __init memblock_alloc_internal( if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, nid); - if (!align) - align = SMP_CACHE_BYTES; - if (max_addr > memblock.current_limit) max_addr = memblock.current_limit; again: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef289fadec0e..a919ba5cb3c8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7710,9 +7710,11 @@ void *__init alloc_large_system_hash(const char *tablename, size = bucketsize << log2qty; if (flags & HASH_EARLY) { if (flags & HASH_ZERO) - table = memblock_alloc_nopanic(size, 0); + table = memblock_alloc_nopanic(size, + SMP_CACHE_BYTES); else - table = memblock_alloc_raw(size, 0); + table = memblock_alloc_raw(size, + SMP_CACHE_BYTES); } else if (hashdist) { table = __vmalloc(size, gfp_flags, PAGE_KERNEL); } else { diff --git a/mm/percpu.c b/mm/percpu.c index 61cdbb3b3736..a6b74c6fe0be 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1102,8 +1102,8 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, /* allocate chunk */ chunk = memblock_alloc(sizeof(struct pcpu_chunk) + - BITS_TO_LONGS(region_size >> PAGE_SHIFT), - 0); + BITS_TO_LONGS(region_size >> PAGE_SHIFT), + SMP_CACHE_BYTES); INIT_LIST_HEAD(&chunk->list); @@ -1114,12 +1114,12 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, chunk->nr_pages = region_size >> PAGE_SHIFT; region_bits = pcpu_chunk_map_bits(chunk); - chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * - sizeof(chunk->alloc_map[0]), 0); - chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * - sizeof(chunk->bound_map[0]), 0); - chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * - sizeof(chunk->md_blocks[0]), 0); + chunk->alloc_map = memblock_alloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]), + SMP_CACHE_BYTES); + chunk->bound_map = memblock_alloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]), + SMP_CACHE_BYTES); + chunk->md_blocks = memblock_alloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]), + SMP_CACHE_BYTES); pcpu_init_md_blocks(chunk); /* manage populated page bitmap */ @@ -2075,12 +2075,14 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ - group_offsets = memblock_alloc(ai->nr_groups * - sizeof(group_offsets[0]), 0); - group_sizes = memblock_alloc(ai->nr_groups * - sizeof(group_sizes[0]), 0); - unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); - unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); + group_offsets = memblock_alloc(ai->nr_groups * sizeof(group_offsets[0]), + SMP_CACHE_BYTES); + group_sizes = memblock_alloc(ai->nr_groups * sizeof(group_sizes[0]), + SMP_CACHE_BYTES); + unit_map = memblock_alloc(nr_cpu_ids * sizeof(unit_map[0]), + SMP_CACHE_BYTES); + unit_off = memblock_alloc(nr_cpu_ids * sizeof(unit_off[0]), + SMP_CACHE_BYTES); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; @@ -2144,8 +2146,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, * empty chunks. */ pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; - pcpu_slot = memblock_alloc( - pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); + pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]), + SMP_CACHE_BYTES); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_slot[i]); @@ -2458,7 +2460,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); - areas = memblock_alloc_nopanic(areas_size, 0); + areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES); if (!areas) { rc = -ENOMEM; goto out_free; @@ -2599,7 +2601,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); - pages = memblock_alloc(pages_size, 0); + pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); /* allocate pages */ j = 0; diff --git a/mm/sparse.c b/mm/sparse.c index ab2ac45e0440..33307fc05c4d 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -68,7 +68,8 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid) if (slab_is_available()) section = kzalloc_node(array_size, GFP_KERNEL, nid); else - section = memblock_alloc_node(array_size, 0, nid); + section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, + nid); return section; } -- cgit v1.2.3 From 0962590e553331db2cc0aef2dc35c57f6300dbbe Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 1 Nov 2018 00:05:52 +0100 Subject: bpf: fix partial copy of map_ptr when dst is scalar ALU operations on pointers such as scalar_reg += map_value_ptr are handled in adjust_ptr_min_max_vals(). Problem is however that map_ptr and range in the register state share a union, so transferring state through dst_reg->range = ptr_reg->range is just buggy as any new map_ptr in the dst_reg is then truncated (or null) for subsequent checks. Fix this by adding a raw member and use it for copying state over to dst_reg. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Daniel Borkmann Cc: Edward Cree Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 3 +++ kernel/bpf/verifier.c | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 9e8056ec20fa..d93e89761a8b 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -51,6 +51,9 @@ struct bpf_reg_state { * PTR_TO_MAP_VALUE_OR_NULL */ struct bpf_map *map_ptr; + + /* Max size from any of the above. */ + unsigned long raw; }; /* Fixed part of pointer offset, pointer types only */ s32 off; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 171a2c88e77d..774fa40a32ae 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3046,7 +3046,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; - dst_reg->range = ptr_reg->range; + dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. Note that off_reg->off @@ -3076,10 +3076,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; + dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ - dst_reg->range = 0; + dst_reg->raw = 0; } break; case BPF_SUB: @@ -3108,7 +3109,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; - dst_reg->range = ptr_reg->range; + dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. If the subtrahend is known @@ -3134,11 +3135,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; + dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) - dst_reg->range = 0; + dst_reg->raw = 0; } break; case BPF_AND: -- cgit v1.2.3 From 4d31f30148cea6e97e42616231eed55295117fe7 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 1 Nov 2018 00:05:53 +0100 Subject: bpf: don't set id on after map lookup with ptr_to_map_val return In the verifier there is no such semantics where registers with PTR_TO_MAP_VALUE type have an id assigned to them. This is only used in PTR_TO_MAP_VALUE_OR_NULL and later on nullified once the test against NULL has been pattern matched and type transformed into PTR_TO_MAP_VALUE. Fixes: 3e6a4b3e0289 ("bpf/verifier: introduce BPF_PTR_TO_MAP_VALUE") Signed-off-by: Daniel Borkmann Cc: Roman Gushchin Acked-by: Alexei Starovoitov Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 774fa40a32ae..1971ca325fb4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2852,10 +2852,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || fn->ret_type == RET_PTR_TO_MAP_VALUE) { - if (fn->ret_type == RET_PTR_TO_MAP_VALUE) - regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; - else - regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); /* remember map_ptr, so that check_map_access() @@ -2868,7 +2864,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; - regs[BPF_REG_0].id = ++env->id_gen; + if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; + } else { + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; + regs[BPF_REG_0].id = ++env->id_gen; + } } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { int id = acquire_reference_state(env, insn_idx); if (id < 0) -- cgit v1.2.3 From 57f01796f14fecf00d330fe39c8d2477ced9cd79 Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Thu, 1 Nov 2018 00:35:05 +0000 Subject: irq/matrix: Fix memory overallocation IRQ_MATRIX_SIZE is the number of longs needed for a bitmap, multiplied by the size of a long, yielding a byte count. But it is used to size an array of longs, which is way more memory than is needed. Change IRQ_MATRIX_SIZE so it is just the number of longs needed and the arrays come out the correct size. Fixes: 2f75d9e1c905 ("genirq: Implement bitmap matrix allocator") Signed-off-by: Michael Kelley Signed-off-by: Thomas Gleixner Cc: KY Srinivasan Link: https://lkml.kernel.org/r/1541032428-10392-1-git-send-email-mikelley@microsoft.com --- kernel/irq/matrix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 6e6d467f3dec..1f0985adf193 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -8,7 +8,7 @@ #include #include -#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long)) +#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) struct cpumap { unsigned int available; -- cgit v1.2.3 From 98f76206b33504b934209d16196477dfa519a807 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Fri, 12 Oct 2018 14:42:53 +0100 Subject: compat: Cleanup in_compat_syscall() callers Now that in_compat_syscall() is consistent on all architectures and does not longer report true on native i686, the workarounds (ifdeffery and helpers) can be removed. Signed-off-by: Dmitry Safonov Signed-off-by: Thomas Gleixner Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Ard Biesheuvel Cc: Andy Lutomirsky Cc: "David S. Miller" Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: John Stultz Cc: "Kirill A. Shutemov" Cc: Oleg Nesterov Cc: Steffen Klassert Cc: Stephen Boyd Cc: Steven Rostedt Cc: linux-efi@vger.kernel.org Cc: netdev@vger.kernel.org Link: https://lkml.kernel.org/r/20181012134253.23266-3-dima@arista.com --- drivers/firmware/efi/efivars.c | 16 ++++------------ kernel/time/time.c | 2 +- net/xfrm/xfrm_state.c | 2 -- net/xfrm/xfrm_user.c | 2 -- 4 files changed, 5 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 3e626fd9bd4e..8061667a6765 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -229,14 +229,6 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, return 0; } -static inline bool is_compat(void) -{ - if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall()) - return true; - - return false; -} - static void copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src) { @@ -263,7 +255,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) u8 *data; int err; - if (is_compat()) { + if (in_compat_syscall()) { struct compat_efi_variable *compat; if (count != sizeof(*compat)) @@ -324,7 +316,7 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) &entry->var.DataSize, entry->var.Data)) return -EIO; - if (is_compat()) { + if (in_compat_syscall()) { compat = (struct compat_efi_variable *)buf; size = sizeof(*compat); @@ -418,7 +410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, struct compat_efi_variable *compat = (struct compat_efi_variable *)buf; struct efi_variable *new_var = (struct efi_variable *)buf; struct efivar_entry *new_entry; - bool need_compat = is_compat(); + bool need_compat = in_compat_syscall(); efi_char16_t *name; unsigned long size; u32 attributes; @@ -495,7 +487,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (is_compat()) { + if (in_compat_syscall()) { if (count != sizeof(*compat)) return -EINVAL; diff --git a/kernel/time/time.c b/kernel/time/time.c index e3a7f7fd3abc..ad204cf6d001 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -842,7 +842,7 @@ int get_timespec64(struct timespec64 *ts, ts->tv_sec = kts.tv_sec; /* Zero out the padding for 32 bit systems or in compat mode */ - if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())) + if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall()) kts.tv_nsec &= 0xFFFFFFFFUL; ts->tv_nsec = kts.tv_nsec; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index b669262682c9..dc4a9f1fb941 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2077,10 +2077,8 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen struct xfrm_mgr *km; struct xfrm_policy *pol = NULL; -#ifdef CONFIG_COMPAT if (in_compat_syscall()) return -EOPNOTSUPP; -#endif if (!optval && !optlen) { xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ca7a207b81a9..c9a84e22f5d5 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2621,10 +2621,8 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, const struct xfrm_link *link; int type, err; -#ifdef CONFIG_COMPAT if (in_compat_syscall()) return -EOPNOTSUPP; -#endif type = nlh->nlmsg_type; if (type > XFRM_MSG_MAX) -- cgit v1.2.3 From b5f2954d30c77649bce9c27e7a0a94299d9cfdf8 Mon Sep 17 00:00:00 2001 From: Dennis Zhou Date: Thu, 1 Nov 2018 17:24:10 -0400 Subject: blkcg: revert blkcg cleanups series This reverts a series committed earlier due to null pointer exception bug report in [1]. It seems there are edge case interactions that I did not consider and will need some time to understand what causes the adverse interactions. The original series can be found in [2] with a follow up series in [3]. [1] https://www.spinics.net/lists/cgroups/msg20719.html [2] https://lore.kernel.org/lkml/20180911184137.35897-1-dennisszhou@gmail.com/ [3] https://lore.kernel.org/lkml/20181020185612.51587-1-dennis@kernel.org/ This reverts the following commits: d459d853c2ed, b2c3fa546705, 101246ec02b5, b3b9f24f5fcc, e2b0989954ae, f0fcb3ec89f3, c839e7a03f92, bdc2491708c4, 74b7c02a9bc1, 5bf9a1f3b4ef, a7b39b4e961c, 07b05bcc3213, 49f4c2dc2b50, 27e6fa996c53 Signed-off-by: Dennis Zhou Signed-off-by: Jens Axboe --- Documentation/admin-guide/cgroup-v2.rst | 8 +- block/bfq-cgroup.c | 4 +- block/bfq-iosched.c | 2 +- block/bio.c | 174 +++++++++----------------------- block/blk-cgroup.c | 123 +++++++--------------- block/blk-core.c | 1 - block/blk-iolatency.c | 26 ++++- block/blk-throttle.c | 13 ++- block/bounce.c | 4 +- block/cfq-iosched.c | 4 +- drivers/block/loop.c | 5 +- drivers/md/raid0.c | 2 +- fs/buffer.c | 10 +- fs/ext4/page-io.c | 2 +- include/linux/bio.h | 26 ++--- include/linux/blk-cgroup.h | 145 +++++++++----------------- include/linux/blk_types.h | 1 + include/linux/cgroup.h | 2 - include/linux/writeback.h | 5 +- kernel/cgroup/cgroup.c | 48 ++------- kernel/trace/blktrace.c | 4 +- mm/page_io.c | 2 +- 22 files changed, 208 insertions(+), 403 deletions(-) (limited to 'kernel') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index caf36105a1c7..184193bcb262 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1857,10 +1857,8 @@ following two functions. wbc_init_bio(@wbc, @bio) Should be called for each bio carrying writeback data and - associates the bio with the inode's owner cgroup and the - corresponding request queue. This must be called after - a queue (device) has been associated with the bio and - before submission. + associates the bio with the inode's owner cgroup. Can be + called anytime between bio allocation and submission. wbc_account_io(@wbc, @page, @bytes) Should be called for each data segment being written out. @@ -1879,7 +1877,7 @@ the configuration, the bio may be executed at a lower priority and if the writeback session is holding shared resources, e.g. a journal entry, may lead to priority inversion. There is no one easy solution for the problem. Filesystems can try to work around specific problem -cases by skipping wbc_init_bio() or using bio_associate_create_blkg() +cases by skipping wbc_init_bio() or using bio_associate_blkcg() directly. diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index d9a7916ff0ab..9fe5952d117d 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) uint64_t serial_nr; rcu_read_lock(); - serial_nr = __bio_blkcg(bio)->css.serial_nr; + serial_nr = bio_blkcg(bio)->css.serial_nr; /* * Check whether blkcg has changed. The condition may trigger @@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) goto out; - bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); + bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); /* * Update blkg_path for bfq_log_* functions. We cache this * path, and update it here, for the following diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 6075100f03a5..3a27d31fcda6 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4384,7 +4384,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, rcu_read_lock(); - bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio)); + bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); if (!bfqg) { bfqq = &bfqd->oom_bfqq; goto out; diff --git a/block/bio.c b/block/bio.c index bbfeb4ee2892..4a5a036268fb 100644 --- a/block/bio.c +++ b/block/bio.c @@ -609,9 +609,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; - bio_clone_blkg_association(bio, bio_src); - - blkcg_bio_issue_init(bio); + bio_clone_blkcg_association(bio, bio_src); } EXPORT_SYMBOL(__bio_clone_fast); @@ -1956,151 +1954,69 @@ EXPORT_SYMBOL(bioset_init_from_src); #ifdef CONFIG_BLK_CGROUP -/** - * bio_associate_blkg - associate a bio with the a blkg - * @bio: target bio - * @blkg: the blkg to associate - * - * This tries to associate @bio with the specified blkg. Association failure - * is handled by walking up the blkg tree. Therefore, the blkg associated can - * be anything between @blkg and the root_blkg. This situation only happens - * when a cgroup is dying and then the remaining bios will spill to the closest - * alive blkg. - * - * A reference will be taken on the @blkg and will be released when @bio is - * freed. - */ -int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) -{ - if (unlikely(bio->bi_blkg)) - return -EBUSY; - bio->bi_blkg = blkg_tryget_closest(blkg); - return 0; -} - -/** - * __bio_associate_blkg_from_css - internal blkg association function - * - * This in the core association function that all association paths rely on. - * A blkg reference is taken which is released upon freeing of the bio. - */ -static int __bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ - struct request_queue *q = bio->bi_disk->queue; - struct blkcg_gq *blkg; - int ret; - - rcu_read_lock(); - - if (!css || !css->parent) - blkg = q->root_blkg; - else - blkg = blkg_lookup_create(css_to_blkcg(css), q); - - ret = bio_associate_blkg(bio, blkg); - - rcu_read_unlock(); - return ret; -} - -/** - * bio_associate_blkg_from_css - associate a bio with a specified css - * @bio: target bio - * @css: target css - * - * Associate @bio with the blkg found by combining the css's blkg and the - * request_queue of the @bio. This falls back to the queue's root_blkg if - * the association fails with the css. - */ -int bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ - if (unlikely(bio->bi_blkg)) - return -EBUSY; - return __bio_associate_blkg_from_css(bio, css); -} -EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); - #ifdef CONFIG_MEMCG /** - * bio_associate_blkg_from_page - associate a bio with the page's blkg + * bio_associate_blkcg_from_page - associate a bio with the page's blkcg * @bio: target bio * @page: the page to lookup the blkcg from * - * Associate @bio with the blkg from @page's owning memcg and the respective - * request_queue. If cgroup_e_css returns NULL, fall back to the queue's - * root_blkg. - * - * Note: this must be called after bio has an associated device. + * Associate @bio with the blkcg from @page's owning memcg. This works like + * every other associate function wrt references. */ -int bio_associate_blkg_from_page(struct bio *bio, struct page *page) +int bio_associate_blkcg_from_page(struct bio *bio, struct page *page) { - struct cgroup_subsys_state *css; - int ret; + struct cgroup_subsys_state *blkcg_css; - if (unlikely(bio->bi_blkg)) + if (unlikely(bio->bi_css)) return -EBUSY; if (!page->mem_cgroup) return 0; - - rcu_read_lock(); - - css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); - - ret = __bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); - return ret; + blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup, + &io_cgrp_subsys); + bio->bi_css = blkcg_css; + return 0; } #endif /* CONFIG_MEMCG */ /** - * bio_associate_create_blkg - associate a bio with a blkg from q - * @q: request_queue where bio is going + * bio_associate_blkcg - associate a bio with the specified blkcg * @bio: target bio + * @blkcg_css: css of the blkcg to associate + * + * Associate @bio with the blkcg specified by @blkcg_css. Block layer will + * treat @bio as if it were issued by a task which belongs to the blkcg. * - * Associate @bio with the blkg found from the bio's css and the request_queue. - * If one is not found, bio_lookup_blkg creates the blkg. This falls back to - * the queue's root_blkg if association fails. + * This function takes an extra reference of @blkcg_css which will be put + * when @bio is released. The caller must own @bio and is responsible for + * synchronizing calls to this function. */ -int bio_associate_create_blkg(struct request_queue *q, struct bio *bio) +int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) { - struct cgroup_subsys_state *css; - int ret = 0; - - /* someone has already associated this bio with a blkg */ - if (bio->bi_blkg) - return ret; - - rcu_read_lock(); - - css = blkcg_css(); - - ret = __bio_associate_blkg_from_css(bio, css); - - rcu_read_unlock(); - return ret; + if (unlikely(bio->bi_css)) + return -EBUSY; + css_get(blkcg_css); + bio->bi_css = blkcg_css; + return 0; } +EXPORT_SYMBOL_GPL(bio_associate_blkcg); /** - * bio_reassociate_blkg - reassociate a bio with a blkg from q - * @q: request_queue where bio is going + * bio_associate_blkg - associate a bio with the specified blkg * @bio: target bio + * @blkg: the blkg to associate * - * When submitting a bio, multiple recursive calls to make_request() may occur. - * This causes the initial associate done in blkcg_bio_issue_check() to be - * incorrect and reference the prior request_queue. This performs reassociation - * when this situation happens. + * Associate @bio with the blkg specified by @blkg. This is the queue specific + * blkcg information associated with the @bio, a reference will be taken on the + * @blkg and will be freed when the bio is freed. */ -int bio_reassociate_blkg(struct request_queue *q, struct bio *bio) +int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) { - if (bio->bi_blkg) { - blkg_put(bio->bi_blkg); - bio->bi_blkg = NULL; - } - - return bio_associate_create_blkg(q, bio); + if (unlikely(bio->bi_blkg)) + return -EBUSY; + if (!blkg_try_get(blkg)) + return -ENODEV; + bio->bi_blkg = blkg; + return 0; } /** @@ -2113,6 +2029,10 @@ void bio_disassociate_task(struct bio *bio) put_io_context(bio->bi_ioc); bio->bi_ioc = NULL; } + if (bio->bi_css) { + css_put(bio->bi_css); + bio->bi_css = NULL; + } if (bio->bi_blkg) { blkg_put(bio->bi_blkg); bio->bi_blkg = NULL; @@ -2120,16 +2040,16 @@ void bio_disassociate_task(struct bio *bio) } /** - * bio_clone_blkg_association - clone blkg association from src to dst bio + * bio_clone_blkcg_association - clone blkcg association from src to dst bio * @dst: destination bio * @src: source bio */ -void bio_clone_blkg_association(struct bio *dst, struct bio *src) +void bio_clone_blkcg_association(struct bio *dst, struct bio *src) { - if (src->bi_blkg) - bio_associate_blkg(dst, src->bi_blkg); + if (src->bi_css) + WARN_ON(bio_associate_blkcg(dst, src->bi_css)); } -EXPORT_SYMBOL_GPL(bio_clone_blkg_association); +EXPORT_SYMBOL_GPL(bio_clone_blkcg_association); #endif /* CONFIG_BLK_CGROUP */ static void __init biovec_init_slabs(void) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 992da5592c6e..c630e02836a8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -84,37 +84,6 @@ static void blkg_free(struct blkcg_gq *blkg) kfree(blkg); } -static void __blkg_release(struct rcu_head *rcu) -{ - struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); - - percpu_ref_exit(&blkg->refcnt); - - /* release the blkcg and parent blkg refs this blkg has been holding */ - css_put(&blkg->blkcg->css); - if (blkg->parent) - blkg_put(blkg->parent); - - wb_congested_put(blkg->wb_congested); - - blkg_free(blkg); -} - -/* - * A group is RCU protected, but having an rcu lock does not mean that one - * can access all the fields of blkg and assume these are valid. For - * example, don't try to follow throtl_data and request queue links. - * - * Having a reference to blkg under an rcu allows accesses to only values - * local to groups like group stats and group rate limits. - */ -static void blkg_release(struct percpu_ref *ref) -{ - struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); - - call_rcu(&blkg->rcu_head, __blkg_release); -} - /** * blkg_alloc - allocate a blkg * @blkcg: block cgroup the new blkg is associated with @@ -141,6 +110,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, blkg->q = q; INIT_LIST_HEAD(&blkg->q_node); blkg->blkcg = blkcg; + atomic_set(&blkg->refcnt, 1); /* root blkg uses @q->root_rl, init rl only for !root blkgs */ if (blkcg != &blkcg_root) { @@ -247,11 +217,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, blkg_get(blkg->parent); } - ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0, - GFP_NOWAIT | __GFP_NOWARN); - if (ret) - goto err_cancel_ref; - /* invoke per-policy init */ for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; @@ -284,8 +249,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, blkg_put(blkg); return ERR_PTR(ret); -err_cancel_ref: - percpu_ref_exit(&blkg->refcnt); err_put_congested: wb_congested_put(wb_congested); err_put_css: @@ -296,7 +259,7 @@ err_free_blkg: } /** - * __blkg_lookup_create - lookup blkg, try to create one if not there + * blkg_lookup_create - lookup blkg, try to create one if not there * @blkcg: blkcg of interest * @q: request_queue of interest * @@ -305,11 +268,12 @@ err_free_blkg: * that all non-root blkg's have access to the parent blkg. This function * should be called under RCU read lock and @q->queue_lock. * - * Returns the blkg or the closest blkg if blkg_create fails as it walks - * down from root. + * Returns pointer to the looked up or created blkg on success, ERR_PTR() + * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not + * dead and bypassing, returns ERR_PTR(-EBUSY). */ -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q) { struct blkcg_gq *blkg; @@ -321,7 +285,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, * we shouldn't allow anything to go through for a bypassing queue. */ if (unlikely(blk_queue_bypass(q))) - return q->root_blkg; + return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); blkg = __blkg_lookup(blkcg, q, true); if (blkg) @@ -329,58 +293,23 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, /* * Create blkgs walking down from blkcg_root to @blkcg, so that all - * non-root blkgs have access to their parents. Returns the closest - * blkg to the intended blkg should blkg_create() fail. + * non-root blkgs have access to their parents. */ while (true) { struct blkcg *pos = blkcg; struct blkcg *parent = blkcg_parent(blkcg); - struct blkcg_gq *ret_blkg = q->root_blkg; - - while (parent) { - blkg = __blkg_lookup(parent, q, false); - if (blkg) { - /* remember closest blkg */ - ret_blkg = blkg; - break; - } + + while (parent && !__blkg_lookup(parent, q, false)) { pos = parent; parent = blkcg_parent(parent); } blkg = blkg_create(pos, q, NULL); - if (IS_ERR(blkg)) - return ret_blkg; - if (pos == blkcg) + if (pos == blkcg || IS_ERR(blkg)) return blkg; } } -/** - * blkg_lookup_create - find or create a blkg - * @blkcg: target block cgroup - * @q: target request_queue - * - * This looks up or creates the blkg representing the unique pair - * of the blkcg and the request_queue. - */ -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q) -{ - struct blkcg_gq *blkg = blkg_lookup(blkcg, q); - unsigned long flags; - - if (unlikely(!blkg)) { - spin_lock_irqsave(q->queue_lock, flags); - - blkg = __blkg_lookup_create(blkcg, q); - - spin_unlock_irqrestore(q->queue_lock, flags); - } - - return blkg; -} - static void blkg_destroy(struct blkcg_gq *blkg) { struct blkcg *blkcg = blkg->blkcg; @@ -424,7 +353,7 @@ static void blkg_destroy(struct blkcg_gq *blkg) * Put the reference taken at the time of creation so that when all * queues are gone, group can be destroyed. */ - percpu_ref_kill(&blkg->refcnt); + blkg_put(blkg); } /** @@ -451,6 +380,29 @@ static void blkg_destroy_all(struct request_queue *q) q->root_rl.blkg = NULL; } +/* + * A group is RCU protected, but having an rcu lock does not mean that one + * can access all the fields of blkg and assume these are valid. For + * example, don't try to follow throtl_data and request queue links. + * + * Having a reference to blkg under an rcu allows accesses to only values + * local to groups like group stats and group rate limits. + */ +void __blkg_release_rcu(struct rcu_head *rcu_head) +{ + struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); + + /* release the blkcg and parent blkg refs this blkg has been holding */ + css_put(&blkg->blkcg->css); + if (blkg->parent) + blkg_put(blkg->parent); + + wb_congested_put(blkg->wb_congested); + + blkg_free(blkg); +} +EXPORT_SYMBOL_GPL(__blkg_release_rcu); + /* * The next function used by blk_queue_for_each_rl(). It's a bit tricky * because the root blkg uses @q->root_rl instead of its own rl. @@ -1796,7 +1748,8 @@ void blkcg_maybe_throttle_current(void) blkg = blkg_lookup(blkcg, q); if (!blkg) goto out; - if (!blkg_tryget(blkg)) + blkg = blkg_try_get(blkg); + if (!blkg) goto out; rcu_read_unlock(); diff --git a/block/blk-core.c b/block/blk-core.c index 26a5dac80ed9..ce12515f9b9b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2435,7 +2435,6 @@ blk_qc_t generic_make_request(struct bio *bio) if (q) blk_queue_exit(q); q = bio->bi_disk->queue; - bio_reassociate_blkg(q, bio); flags = 0; if (bio->bi_opf & REQ_NOWAIT) flags = BLK_MQ_REQ_NOWAIT; diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 35c48d7b8f78..bb240a0c1309 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -480,12 +480,34 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) { struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); - struct blkcg_gq *blkg = bio->bi_blkg; + struct blkcg *blkcg; + struct blkcg_gq *blkg; + struct request_queue *q = rqos->q; bool issue_as_root = bio_issue_as_root_blkg(bio); if (!blk_iolatency_enabled(blkiolat)) return; + rcu_read_lock(); + blkcg = bio_blkcg(bio); + bio_associate_blkcg(bio, &blkcg->css); + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) { + if (!lock) + spin_lock_irq(q->queue_lock); + blkg = blkg_lookup_create(blkcg, q); + if (IS_ERR(blkg)) + blkg = NULL; + if (!lock) + spin_unlock_irq(q->queue_lock); + } + if (!blkg) + goto out; + + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); + bio_associate_blkg(bio, blkg); +out: + rcu_read_unlock(); while (blkg && blkg->parent) { struct iolatency_grp *iolat = blkg_to_lat(blkg); if (!iolat) { @@ -706,7 +728,7 @@ static void blkiolatency_timer_fn(struct timer_list *t) * We could be exiting, don't access the pd unless we have a * ref on the blkg. */ - if (!blkg_tryget(blkg)) + if (!blkg_try_get(blkg)) continue; iolat = blkg_to_lat(blkg); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 4bda70e8db48..db1a3a2ae006 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2115,11 +2115,21 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td) } #endif +static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) +{ +#ifdef CONFIG_BLK_DEV_THROTTLING_LOW + /* fallback to root_blkg if we fail to get a blkg ref */ + if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV)) + bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg); + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); +#endif +} + bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, struct bio *bio) { struct throtl_qnode *qn = NULL; - struct throtl_grp *tg = blkg_to_tg(blkg); + struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); struct throtl_service_queue *sq; bool rw = bio_data_dir(bio); bool throttled = false; @@ -2138,6 +2148,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, if (unlikely(blk_queue_bypass(q))) goto out_unlock; + blk_throtl_assoc_bio(tg, bio); blk_throtl_update_idletime(tg); sq = &tg->service_queue; diff --git a/block/bounce.c b/block/bounce.c index ec0d99995f5f..418677dcec60 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -276,9 +276,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, } } - bio_clone_blkg_association(bio, bio_src); - - blkcg_bio_issue_init(bio); + bio_clone_blkcg_association(bio, bio_src); return bio; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6a3d87dd3c1a..ed41aa978c4a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3759,7 +3759,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) uint64_t serial_nr; rcu_read_lock(); - serial_nr = __bio_blkcg(bio)->css.serial_nr; + serial_nr = bio_blkcg(bio)->css.serial_nr; rcu_read_unlock(); /* @@ -3824,7 +3824,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, struct cfq_group *cfqg; rcu_read_lock(); - cfqg = cfq_lookup_cfqg(cfqd, __bio_blkcg(bio)); + cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio)); if (!cfqg) { cfqq = &cfqd->oom_cfqq; goto out; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index abad6d15f956..ea9debf59b22 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -77,7 +77,6 @@ #include #include #include -#include #include "loop.h" @@ -1761,8 +1760,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, /* always use the first bio's css */ #ifdef CONFIG_BLK_CGROUP - if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) { - cmd->css = &bio_blkcg(rq->bio)->css; + if (cmd->use_aio && rq->bio && rq->bio->bi_css) { + cmd->css = rq->bio->bi_css; css_get(cmd->css); } else #endif diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f3fb5bb8c82a..ac1cffd2a09b 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) !discard_bio) continue; bio_chain(discard_bio, bio); - bio_clone_blkg_association(discard_bio, bio); + bio_clone_blkcg_association(discard_bio, bio); if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), diff --git a/fs/buffer.c b/fs/buffer.c index 109f55196866..6f1ae3ac9789 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3060,6 +3060,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, */ bio = bio_alloc(GFP_NOIO, 1); + if (wbc) { + wbc_init_bio(wbc, bio); + wbc_account_io(wbc, bh->b_page, bh->b_size); + } + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_write_hint = write_hint; @@ -3079,11 +3084,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, op_flags |= REQ_PRIO; bio_set_op_attrs(bio, op, op_flags); - if (wbc) { - wbc_init_bio(wbc, bio); - wbc_account_io(wbc, bh->b_page, bh->b_size); - } - submit_bio(bio); return 0; } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 2aa62d58d8dd..db7590178dfc 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io, bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); if (!bio) return -ENOMEM; + wbc_init_bio(io->io_wbc, bio); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_end_io = ext4_end_bio; bio->bi_private = ext4_get_io_end(io->io_end); io->io_bio = bio; io->io_next_block = bh->b_blocknr; - wbc_init_bio(io->io_wbc, bio); return 0; } diff --git a/include/linux/bio.h b/include/linux/bio.h index b47c7f716731..056fb627edb3 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -503,31 +503,23 @@ do { \ disk_devt((bio)->bi_disk) #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -int bio_associate_blkg_from_page(struct bio *bio, struct page *page); +int bio_associate_blkcg_from_page(struct bio *bio, struct page *page); #else -static inline int bio_associate_blkg_from_page(struct bio *bio, - struct page *page) { return 0; } +static inline int bio_associate_blkcg_from_page(struct bio *bio, + struct page *page) { return 0; } #endif #ifdef CONFIG_BLK_CGROUP +int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); -int bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css); -int bio_associate_create_blkg(struct request_queue *q, struct bio *bio); -int bio_reassociate_blkg(struct request_queue *q, struct bio *bio); void bio_disassociate_task(struct bio *bio); -void bio_clone_blkg_association(struct bio *dst, struct bio *src); +void bio_clone_blkcg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline int bio_associate_blkg_from_css(struct bio *bio, - struct cgroup_subsys_state *css) -{ return 0; } -static inline int bio_associate_create_blkg(struct request_queue *q, - struct bio *bio) { return 0; } -static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio) -{ return 0; } +static inline int bio_associate_blkcg(struct bio *bio, + struct cgroup_subsys_state *blkcg_css) { return 0; } static inline void bio_disassociate_task(struct bio *bio) { } -static inline void bio_clone_blkg_association(struct bio *dst, - struct bio *src) { } +static inline void bio_clone_blkcg_association(struct bio *dst, + struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_HIGHMEM diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 1e76ceebeb5d..6d766a19f2bb 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -126,7 +126,7 @@ struct blkcg_gq { struct request_list rl; /* reference count */ - struct percpu_ref refcnt; + atomic_t refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; @@ -184,8 +184,6 @@ extern struct cgroup_subsys_state * const blkcg_root_css; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); int blkcg_init_queue(struct request_queue *q); @@ -232,59 +230,22 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx); void blkg_conf_finish(struct blkg_conf_ctx *ctx); -/** - * blkcg_css - find the current css - * - * Find the css associated with either the kthread or the current task. - * This may return a dying css, so it is up to the caller to use tryget logic - * to confirm it is alive and well. - */ -static inline struct cgroup_subsys_state *blkcg_css(void) -{ - struct cgroup_subsys_state *css; - - css = kthread_blkcg(); - if (css) - return css; - return task_css(current, io_cgrp_id); -} static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; } -/** - * __bio_blkcg - internal version of bio_blkcg for bfq and cfq - * - * DO NOT USE. - * There is a flaw using this version of the function. In particular, this was - * used in a broken paradigm where association was called on the given css. It - * is possible though that the returned css from task_css() is in the process - * of dying due to migration of the current task. So it is improper to assume - * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will - * take additional work to handle more gracefully. - */ -static inline struct blkcg *__bio_blkcg(struct bio *bio) -{ - if (bio && bio->bi_blkg) - return bio->bi_blkg->blkcg; - return css_to_blkcg(blkcg_css()); -} - -/** - * bio_blkcg - grab the blkcg associated with a bio - * @bio: target bio - * - * This returns the blkcg associated with a bio, NULL if not associated. - * Callers are expected to either handle NULL or know association has been - * done prior to calling this. - */ static inline struct blkcg *bio_blkcg(struct bio *bio) { - if (bio && bio->bi_blkg) - return bio->bi_blkg->blkcg; - return NULL; + struct cgroup_subsys_state *css; + + if (bio && bio->bi_css) + return css_to_blkcg(bio->bi_css); + css = kthread_blkcg(); + if (css) + return css_to_blkcg(css); + return css_to_blkcg(task_css(current, io_cgrp_id)); } static inline bool blk_cgroup_congested(void) @@ -490,35 +451,26 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) */ static inline void blkg_get(struct blkcg_gq *blkg) { - percpu_ref_get(&blkg->refcnt); + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); } /** - * blkg_tryget - try and get a blkg reference + * blkg_try_get - try and get a blkg reference * @blkg: blkg to get * * This is for use when doing an RCU lookup of the blkg. We may be in the midst * of freeing this blkg, so we can only use it if the refcnt is not zero. */ -static inline bool blkg_tryget(struct blkcg_gq *blkg) +static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) { - return percpu_ref_tryget(&blkg->refcnt); + if (atomic_inc_not_zero(&blkg->refcnt)) + return blkg; + return NULL; } -/** - * blkg_tryget_closest - try and get a blkg ref on the closet blkg - * @blkg: blkg to get - * - * This walks up the blkg tree to find the closest non-dying blkg and returns - * the blkg that it did association with as it may not be the passed in blkg. - */ -static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) -{ - while (!percpu_ref_tryget(&blkg->refcnt)) - blkg = blkg->parent; - return blkg; -} +void __blkg_release_rcu(struct rcu_head *rcu); /** * blkg_put - put a blkg reference @@ -526,7 +478,9 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) */ static inline void blkg_put(struct blkcg_gq *blkg) { - percpu_ref_put(&blkg->refcnt); + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) + call_rcu(&blkg->rcu_head, __blkg_release_rcu); } /** @@ -579,36 +533,25 @@ static inline struct request_list *blk_get_rl(struct request_queue *q, rcu_read_lock(); - if (bio && bio->bi_blkg) { - blkcg = bio->bi_blkg->blkcg; - if (blkcg == &blkcg_root) - goto rl_use_root; - - blkg_get(bio->bi_blkg); - rcu_read_unlock(); - return &bio->bi_blkg->rl; - } + blkcg = bio_blkcg(bio); - blkcg = css_to_blkcg(blkcg_css()); + /* bypass blkg lookup and use @q->root_rl directly for root */ if (blkcg == &blkcg_root) - goto rl_use_root; + goto root_rl; + /* + * Try to use blkg->rl. blkg lookup may fail under memory pressure + * or if either the blkcg or queue is going away. Fall back to + * root_rl in such cases. + */ blkg = blkg_lookup(blkcg, q); if (unlikely(!blkg)) - blkg = __blkg_lookup_create(blkcg, q); - - if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg)) - goto rl_use_root; + goto root_rl; + blkg_get(blkg); rcu_read_unlock(); return &blkg->rl; - - /* - * Each blkg has its own request_list, however, the root blkcg - * uses the request_queue's root_rl. This is to avoid most - * overhead for the root blkcg. - */ -rl_use_root: +root_rl: rcu_read_unlock(); return &q->root_rl; } @@ -854,26 +797,32 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg struct bio *bio) { return false; } #endif - -static inline void blkcg_bio_issue_init(struct bio *bio) -{ - bio_issue_init(&bio->bi_issue, bio_sectors(bio)); -} - static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { + struct blkcg *blkcg; struct blkcg_gq *blkg; bool throtl = false; rcu_read_lock(); + blkcg = bio_blkcg(bio); + + /* associate blkcg if bio hasn't attached one */ + bio_associate_blkcg(bio, &blkcg->css); - bio_associate_create_blkg(q, bio); - blkg = bio->bi_blkg; + blkg = blkg_lookup(blkcg, q); + if (unlikely(!blkg)) { + spin_lock_irq(q->queue_lock); + blkg = blkg_lookup_create(blkcg, q); + if (IS_ERR(blkg)) + blkg = NULL; + spin_unlock_irq(q->queue_lock); + } throtl = blk_throtl_bio(q, blkg, bio); if (!throtl) { + blkg = blkg ?: q->root_blkg; /* * If the bio is flagged with BIO_QUEUE_ENTERED it means this * is a split bio and we would have already accounted for the @@ -885,8 +834,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); } - blkcg_bio_issue_init(bio); - rcu_read_unlock(); return !throtl; } @@ -983,7 +930,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } -static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, @@ -999,7 +945,6 @@ static inline void blk_put_rl(struct request_list *rl) { } static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } -static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { return true; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 093a818c5b68..1dcf652ba0aa 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -178,6 +178,7 @@ struct bio { * release. Read comment on top of bio_associate_current(). */ struct io_context *bi_ioc; + struct cgroup_subsys_state *bi_css; struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; #endif diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b8bcbdeb2eac..32c553556bbd 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -93,8 +93,6 @@ extern struct css_set init_css_set; bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); -struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, - struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 738a0c24874f..fdfd04e348f6 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -246,8 +246,7 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, * * @bio is a part of the writeback in progress controlled by @wbc. Perform * writeback specific initialization. This is used to apply the cgroup - * writeback context. Must be called after the bio has been associated with - * a device. + * writeback context. */ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { @@ -258,7 +257,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) * regular writeback instead of writing things out itself. */ if (wbc->wb) - bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); + bio_associate_blkcg(bio, wbc->wb->blkcg_css); } #else /* CONFIG_CGROUP_WRITEBACK */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 4c1cf0969a80..4a3dae2a8283 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -492,7 +492,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, } /** - * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss + * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem * @cgrp: the cgroup of interest * @ss: the subsystem of interest (%NULL returns @cgrp->self) * @@ -501,8 +501,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, * enabled. If @ss is associated with the hierarchy @cgrp is on, this * function is guaranteed to return non-NULL css. */ -static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, - struct cgroup_subsys *ss) +static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, + struct cgroup_subsys *ss) { lockdep_assert_held(&cgroup_mutex); @@ -522,35 +522,6 @@ static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, return cgroup_css(cgrp, ss); } -/** - * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem - * @cgrp: the cgroup of interest - * @ss: the subsystem of interest - * - * Find and get the effective css of @cgrp for @ss. The effective css is - * defined as the matching css of the nearest ancestor including self which - * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, - * the root css is returned, so this function always returns a valid css. - * - * The returned css is not guaranteed to be online, and therefore it is the - * callers responsiblity to tryget a reference for it. - */ -struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, - struct cgroup_subsys *ss) -{ - struct cgroup_subsys_state *css; - - do { - css = cgroup_css(cgrp, ss); - - if (css) - return css; - cgrp = cgroup_parent(cgrp); - } while (cgrp); - - return init_css_set.subsys[ss->id]; -} - /** * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem * @cgrp: the cgroup of interest @@ -633,11 +604,10 @@ EXPORT_SYMBOL_GPL(of_css); * * Should be called under cgroup_[tree_]mutex. */ -#define for_each_e_css(css, ssid, cgrp) \ - for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ - if (!((css) = cgroup_e_css_by_mask(cgrp, \ - cgroup_subsys[(ssid)]))) \ - ; \ +#define for_each_e_css(css, ssid, cgrp) \ + for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ + if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \ + ; \ else /** @@ -1036,7 +1006,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, * @ss is in this hierarchy, so we want the * effective css from @cgrp. */ - template[i] = cgroup_e_css_by_mask(cgrp, ss); + template[i] = cgroup_e_css(cgrp, ss); } else { /* * @ss is not in this hierarchy, so we don't want @@ -3053,7 +3023,7 @@ static int cgroup_apply_control(struct cgroup *cgrp) return ret; /* - * At this point, cgroup_e_css_by_mask() results reflect the new csses + * At this point, cgroup_e_css() results reflect the new csses * making the following cgroup_update_dfl_csses() properly update * css associations of all tasks in the subtree. */ diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index fac0ddf8a8e2..2868d85f1fb1 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) return NULL; - if (!bio->bi_blkg) + if (!bio->bi_css) return NULL; - return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup); + return cgroup_get_kernfs_id(bio->bi_css->cgroup); } #else static union kernfs_node_id * diff --git a/mm/page_io.c b/mm/page_io.c index 573d3663d846..aafd19ec1db4 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -339,7 +339,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, goto out; } bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); - bio_associate_blkg_from_page(bio, page); + bio_associate_blkcg_from_page(bio, page); count_swpout_vm_event(page); set_page_writeback(page); unlock_page(page); -- cgit v1.2.3 From df0734702a7cbba49d6765bd5ba069340bf9c5db Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 2 Nov 2018 10:16:15 -0700 Subject: bpf: show real jited prog address in /proc/kallsyms Currently, /proc/kallsyms shows page address of jited bpf program. The main reason here is to not expose randomized start address. However, This is not ideal for detailed profiling (find hot instructions from stack traces). This patch replaces the page address with real prog start address. This change is OK because these addresses are still protected by sysctl kptr_restrict (see kallsyms_show_value()), and only programs loaded by root are added to kallsyms (see bpf_prog_kallsyms_add()). Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 6377225b2082..1a796e0799ec 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr) int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *sym) { - unsigned long symbol_start, symbol_end; struct bpf_prog_aux *aux; unsigned int it = 0; int ret = -ERANGE; @@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, if (it++ != symnum) continue; - bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); bpf_get_prog_name(aux->prog, sym); - *value = symbol_start; + *value = (unsigned long)aux->prog->bpf_func; *type = BPF_SYM_ELF_TYPE; ret = 0; -- cgit v1.2.3 From de57e99ceb65d0d7775cc14a8ba5931d7de1d708 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 2 Nov 2018 10:16:16 -0700 Subject: bpf: show real jited address in bpf_prog_info->jited_ksyms Currently, jited_ksyms in bpf_prog_info shows page addresses of jited bpf program. The main reason here is to not expose randomized start address. However, this is not ideal for detailed profiling (find hot instructions from stack traces). This patch replaces the page address with real prog start address. This change is OK because bpf_prog_get_info_by_fd() is only available to root. Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/syscall.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ccb93277aae2..34a9eef5992c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2172,7 +2172,6 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, user_ksyms = u64_to_user_ptr(info.jited_ksyms); for (i = 0; i < ulen; i++) { ksym_addr = (ulong) prog->aux->func[i]->bpf_func; - ksym_addr &= PAGE_MASK; if (put_user((u64) ksym_addr, &user_ksyms[i])) return -EFAULT; } -- cgit v1.2.3 From ff1889fc531f582f902175c0acc80321af540b24 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 2 Nov 2018 10:16:17 -0700 Subject: bpf: show main program address and length in bpf_prog_info Currently, when there is no subprog (prog->aux->func_cnt == 0), bpf_prog_info does not return any jited_ksyms or jited_func_lens. This patch adds main program address (prog->bpf_func) and main program length (prog->jited_len) to bpf_prog_info. Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann --- kernel/bpf/syscall.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 34a9eef5992c..9418174c276c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2158,11 +2158,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, } ulen = info.nr_jited_ksyms; - info.nr_jited_ksyms = prog->aux->func_cnt; + info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; if (info.nr_jited_ksyms && ulen) { if (bpf_dump_raw_ok()) { + unsigned long ksym_addr; u64 __user *user_ksyms; - ulong ksym_addr; u32 i; /* copy the address of the kernel symbol @@ -2170,9 +2170,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, */ ulen = min_t(u32, info.nr_jited_ksyms, ulen); user_ksyms = u64_to_user_ptr(info.jited_ksyms); - for (i = 0; i < ulen; i++) { - ksym_addr = (ulong) prog->aux->func[i]->bpf_func; - if (put_user((u64) ksym_addr, &user_ksyms[i])) + if (prog->aux->func_cnt) { + for (i = 0; i < ulen; i++) { + ksym_addr = (unsigned long) + prog->aux->func[i]->bpf_func; + if (put_user((u64) ksym_addr, + &user_ksyms[i])) + return -EFAULT; + } + } else { + ksym_addr = (unsigned long) prog->bpf_func; + if (put_user((u64) ksym_addr, &user_ksyms[0])) return -EFAULT; } } else { @@ -2181,7 +2189,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, } ulen = info.nr_jited_func_lens; - info.nr_jited_func_lens = prog->aux->func_cnt; + info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; if (info.nr_jited_func_lens && ulen) { if (bpf_dump_raw_ok()) { u32 __user *user_lens; @@ -2190,9 +2198,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, /* copy the JITed image lengths for each function */ ulen = min_t(u32, info.nr_jited_func_lens, ulen); user_lens = u64_to_user_ptr(info.jited_func_lens); - for (i = 0; i < ulen; i++) { - func_len = prog->aux->func[i]->jited_len; - if (put_user(func_len, &user_lens[i])) + if (prog->aux->func_cnt) { + for (i = 0; i < ulen; i++) { + func_len = + prog->aux->func[i]->jited_len; + if (put_user(func_len, &user_lens[i])) + return -EFAULT; + } + } else { + func_len = prog->jited_len; + if (put_user(func_len, &user_lens[0])) return -EFAULT; } } else { -- cgit v1.2.3 From 28c2fae726bf5003cd209b0d5910a642af98316f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 2 Nov 2018 11:35:46 +0100 Subject: bpf: fix bpf_prog_get_info_by_fd to return 0 func_lens for unpriv While dbecd7388476 ("bpf: get kernel symbol addresses via syscall") zeroed info.nr_jited_ksyms in bpf_prog_get_info_by_fd() for queries from unprivileged users, commit 815581c11cc2 ("bpf: get JITed image lengths of functions via syscall") forgot about doing so and therefore returns the #elems of the user set up buffer which is incorrect. It also needs to indicate a info.nr_jited_func_lens of zero. Fixes: 815581c11cc2 ("bpf: get JITed image lengths of functions via syscall") Signed-off-by: Daniel Borkmann Cc: Sandipan Das Cc: Song Liu Signed-off-by: Alexei Starovoitov --- kernel/bpf/syscall.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9418174c276c..cf5040fd5434 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, info.jited_prog_len = 0; info.xlated_prog_len = 0; info.nr_jited_ksyms = 0; + info.nr_jited_func_lens = 0; goto done; } -- cgit v1.2.3 From 3383b36040522505546cb112f0a543a5998edfb6 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Fri, 2 Nov 2018 15:48:35 -0700 Subject: kernel/kexec_file.c: remove some duplicated includes We include kexec.h and slab.h twice in kexec_file.c. It's unnecessary. hence just remove them. Link: http://lkml.kernel.org/r/1537498098-19171-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang Reviewed-by: Bhupesh Sharma Reviewed-by: Andrew Morton Acked-by: Baoquan He Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec_file.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index c6a3b6851372..35cf0ad29718 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -25,8 +25,6 @@ #include #include #include -#include -#include #include #include #include "kexec_internal.h" -- cgit v1.2.3 From 6f0483d1f91b612186abeaebf3ce43bf805eb9f7 Mon Sep 17 00:00:00 2001 From: Michael Schupikov Date: Fri, 2 Nov 2018 15:48:38 -0700 Subject: kernel/sysctl.c: remove duplicated include Remove one include of . No functional changes. Link: http://lkml.kernel.org/r/20181004134223.17735-1-michael@schupikov.de Signed-off-by: Michael Schupikov Reviewed-by: Richard Weinberger Acked-by: Luis Chamberlain Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3ae223f7b5df..5fc724e4e454 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -66,7 +66,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 993f0b0510dad98b4e6e39506834dab0d13fd539 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Nov 2018 14:22:25 +0100 Subject: sched/topology: Fix off by one bug With the addition of the NUMA identity level, we increased @level by one and will run off the end of the array in the distance sort loop. Fixed: 051f3ca02e46 ("sched/topology: Introduce NUMA identity node sched domain") Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 9d74371e4aad..8d7f15ba5916 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1337,7 +1337,7 @@ void sched_init_numa(void) int level = 0; int i, j, k; - sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); + sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL); if (!sched_domains_numa_distance) return; -- cgit v1.2.3 From 40fa3780bac2b654edf23f6b13f4e2dd550aea10 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Tue, 23 Oct 2018 14:37:31 +0100 Subject: sched/core: Take the hotplug lock in sched_init_smp() When running on linux-next (8c60c36d0b8c ("Add linux-next specific files for 20181019")) + CONFIG_PROVE_LOCKING=y on a big.LITTLE system (e.g. Juno or HiKey960), we get the following report: [ 0.748225] Call trace: [ 0.750685] lockdep_assert_cpus_held+0x30/0x40 [ 0.755236] static_key_enable_cpuslocked+0x20/0xc8 [ 0.760137] build_sched_domains+0x1034/0x1108 [ 0.764601] sched_init_domains+0x68/0x90 [ 0.768628] sched_init_smp+0x30/0x80 [ 0.772309] kernel_init_freeable+0x278/0x51c [ 0.776685] kernel_init+0x10/0x108 [ 0.780190] ret_from_fork+0x10/0x18 The static_key in question is 'sched_asym_cpucapacity' introduced by commit: df054e8445a4 ("sched/topology: Add static_key for asymmetric CPU capacity optimizations") In this particular case, we enable it because smp_prepare_cpus() will end up fetching the capacity-dmips-mhz entry from the devicetree, so we already have some asymmetry detected when entering sched_init_smp(). This didn't get detected in tip/sched/core because we were missing: commit cb538267ea1e ("jump_label/lockdep: Assert we hold the hotplug lock for _cpuslocked() operations") Calls to build_sched_domains() post sched_init_smp() will hold the hotplug lock, it just so happens that this very first call is a special case. As stated by a comment in sched_init_smp(), "There's no userspace yet to cause hotplug operations" so this is a harmless warning. However, to both respect the semantics of underlying callees and make lockdep happy, take the hotplug lock in sched_init_smp(). This also satisfies the comment atop sched_init_domains() that says "Callers must hold the hotplug lock". Reported-by: Sudeep Holla Tested-by: Sudeep Holla Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Cc: Dietmar.Eggemann@arm.com Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: morten.rasmussen@arm.com Cc: quentin.perret@arm.com Link: http://lkml.kernel.org/r/1540301851-3048-1-git-send-email-valentin.schneider@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fd2fce8a001b..02a20ef196a6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5859,11 +5859,14 @@ void __init sched_init_smp(void) /* * There's no userspace yet to cause hotplug operations; hence all the * CPU masks are stable and all blatant races in the below code cannot - * happen. + * happen. The hotplug lock is nevertheless taken to satisfy lockdep, + * but there won't be any contention on it. */ + cpus_read_lock(); mutex_lock(&sched_domains_mutex); sched_init_domains(cpu_active_mask); mutex_unlock(&sched_domains_mutex); + cpus_read_unlock(); /* Move init over to a non-isolated CPU */ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) -- cgit v1.2.3 From e1ff516a56ad56c476b47795d3811eef79d25fbe Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Mon, 5 Nov 2018 08:50:13 +0800 Subject: sched/fair: Fix a comment in task_numa_fault() Duplicated 'case it'. Signed-off-by: Yi Wang Reviewed-by: Xi Xu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: zhong.weidong@zte.com.cn Link: http://lkml.kernel.org/r/1541379013-11352-1-git-send-email-wang.yi59@zte.com.cn Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ee271bb661cc..3648d0300fdf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) local = 1; /* - * Retry task to preferred node migration periodically, in case it - * case it previously failed, or the scheduler moved us. + * Retry to migrate task to preferred node periodically, in case it + * previously failed, or the scheduler moved us. */ if (time_after(jiffies, p->numa_migrate_retry)) { task_numa_placement(p); -- cgit v1.2.3 From f75d651587f719a813ebbbfeee570e6570731d55 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 4 Nov 2018 18:40:14 -0800 Subject: resource/docs: Fix new kernel-doc warnings The first group of warnings is caused by a "/**" kernel-doc notation marker but the function comments are not in kernel-doc format. Also add another error return value here. ../kernel/resource.c:337: warning: Function parameter or member 'start' not described in 'find_next_iomem_res' ../kernel/resource.c:337: warning: Function parameter or member 'end' not described in 'find_next_iomem_res' ../kernel/resource.c:337: warning: Function parameter or member 'flags' not described in 'find_next_iomem_res' ../kernel/resource.c:337: warning: Function parameter or member 'desc' not described in 'find_next_iomem_res' ../kernel/resource.c:337: warning: Function parameter or member 'first_lvl' not described in 'find_next_iomem_res' ../kernel/resource.c:337: warning: Function parameter or member 'res' not described in 'find_next_iomem_res' Add the missing function parameter documentation for the other warnings: ../kernel/resource.c:409: warning: Function parameter or member 'arg' not described in 'walk_iomem_res_desc' ../kernel/resource.c:409: warning: Function parameter or member 'func' not described in 'walk_iomem_res_desc' Signed-off-by: Randy Dunlap Cc: Andrew Morton Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: b69c2e20f6e4 ("resource: Clean it up a bit") Link: http://lkml.kernel.org/r/dda2e4d8-bedd-3167-20fe-8c7d2d35b354@infradead.org Signed-off-by: Ingo Molnar --- kernel/resource.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index b3a3a1fc499e..17bcb189d530 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -318,14 +318,14 @@ int release_resource(struct resource *old) EXPORT_SYMBOL(release_resource); -/** +/* * Finds the lowest iomem resource that covers part of [start..end]. The * caller must specify start, end, flags, and desc (which may be * IORES_DESC_NONE). * * If a resource is found, returns 0 and *res is overwritten with the part * of the resource that's within [start..end]; if none is found, returns - * -1. + * -1. Returns -EINVAL for other invalid parameters. * * This function walks the whole tree and not just first level children * unless @first_lvl is true. @@ -390,7 +390,9 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, } /** - * Walks through iomem resources and calls func() with matching resource + * walk_iomem_res_desc - walk through iomem resources + * + * Walks through iomem resources and calls @func() with matching resource * ranges. This walks through whole tree and not just first level children. * All the memory ranges which overlap start,end and also match flags and * desc are valid candidates. @@ -399,6 +401,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, * @flags: I/O resource flags * @start: start addr * @end: end addr + * @arg: function argument for the callback @func + * @func: callback function that is called for each qualifying resource area * * NOTE: For a new descriptor search, define a new IORES_DESC in * and set it in 'desc' of a target resource entry. -- cgit v1.2.3 From ee474b81fe5aa5dc0faae920bf66240fbf55f891 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 1 Nov 2018 23:29:28 +0900 Subject: tracing/kprobes: Fix strpbrk() argument order Fix strpbrk()'s argument order, it must pass acceptable string in 2nd argument. Note that this can cause a kernel panic where it recovers backup character to code->data. Link: http://lkml.kernel.org/r/154108256792.2604.1816052586385217811.stgit@devbox Fixes: a6682814f371 ("tracing/kprobes: Allow kprobe-events to record module symbol") Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 3ef15a6683c0..bd30e9398d2a 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg) if (code[1].op != FETCH_OP_IMM) return -EINVAL; - tmp = strpbrk("+-", code->data); + tmp = strpbrk(code->data, "+-"); if (tmp) c = *tmp; ret = traceprobe_split_symbol_offset(code->data, -- cgit v1.2.3 From f26621e60b35369bca9228bc936dc723b3e421af Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 5 Nov 2018 10:33:07 +0100 Subject: resource/docs: Complete kernel-doc style function documentation Add the missing kernel-doc style function parameters documentation. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: linux-tip-commits@vger.kernel.org Cc: rdunlap@infradead.org Fixes: b69c2e20f6e4 ("resource: Clean it up a bit") Link: http://lkml.kernel.org/r/20181105093307.GA12445@zn.tnic Signed-off-by: Ingo Molnar --- kernel/resource.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 17bcb189d530..b0fbf685c77a 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -318,17 +318,24 @@ int release_resource(struct resource *old) EXPORT_SYMBOL(release_resource); -/* - * Finds the lowest iomem resource that covers part of [start..end]. The - * caller must specify start, end, flags, and desc (which may be +/** + * Finds the lowest iomem resource that covers part of [@start..@end]. The + * caller must specify @start, @end, @flags, and @desc (which may be * IORES_DESC_NONE). * - * If a resource is found, returns 0 and *res is overwritten with the part - * of the resource that's within [start..end]; if none is found, returns - * -1. Returns -EINVAL for other invalid parameters. + * If a resource is found, returns 0 and @*res is overwritten with the part + * of the resource that's within [@start..@end]; if none is found, returns + * -1 or -EINVAL for other invalid parameters. * * This function walks the whole tree and not just first level children * unless @first_lvl is true. + * + * @start: start address of the resource searched for + * @end: end address of same resource + * @flags: flags which the resource must have + * @desc: descriptor the resource must have + * @first_lvl: walk only the first level children, if set + * @res: return ptr, if resource found */ static int find_next_iomem_res(resource_size_t start, resource_size_t end, unsigned long flags, unsigned long desc, @@ -390,9 +397,7 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, } /** - * walk_iomem_res_desc - walk through iomem resources - * - * Walks through iomem resources and calls @func() with matching resource + * Walks through iomem resources and calls func() with matching resource * ranges. This walks through whole tree and not just first level children. * All the memory ranges which overlap start,end and also match flags and * desc are valid candidates. -- cgit v1.2.3 From d2f007dbe7e4c9583eea6eb04d60001e85c6f1bd Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Mon, 5 Nov 2018 20:55:09 +0100 Subject: userns: also map extents in the reverse map to kernel IDs The current logic first clones the extent array and sorts both copies, then maps the lower IDs of the forward mapping into the lower namespace, but doesn't map the lower IDs of the reverse mapping. This means that code in a nested user namespace with >5 extents will see incorrect IDs. It also breaks some access checks, like inode_owner_or_capable() and privileged_wrt_inode_uidgid(), so a process can incorrectly appear to be capable relative to an inode. To fix it, we have to make sure that the "lower_first" members of extents in both arrays are translated; and we have to make sure that the reverse map is sorted *after* the translation (since otherwise the translation can break the sorting). This is CVE-2018-18955. Fixes: 6397fac4915a ("userns: bump idmap limits to 340") Cc: stable@vger.kernel.org Signed-off-by: Jann Horn Tested-by: Eric W. Biederman Reviewed-by: Eric W. Biederman Signed-off-by: Eric W. Biederman --- kernel/user_namespace.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index e5222b5fb4fe..923414a246e9 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf, if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) goto out; - ret = sort_idmaps(&new_map); - if (ret < 0) - goto out; - ret = -EPERM; /* Map the lower ids from the parent user namespace to the * kernel global id space. @@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf, e->lower_first = lower_first; } + /* + * If we want to use binary search for lookup, this clones the extent + * array and sorts both copies. + */ + ret = sort_idmaps(&new_map); + if (ret < 0) + goto out; + /* Install the map */ if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) { memcpy(map->extent, new_map.extent, -- cgit v1.2.3 From e6a2d72c10405b30ddba5af2e44a9d3d925a56d3 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Wed, 7 Nov 2018 12:10:32 +0100 Subject: posix-cpu-timers: Remove useless call to check_dl_overrun() check_dl_overrun() is used to send a SIGXCPU to users that asked to be informed when a SCHED_DEADLINE runtime overruns occur. The function is called by check_thread_timers() already, so the call in check_process_timers() is redundant/wrong (even though harmless). Remove it. Fixes: 34be39305a77 ("sched/deadline: Implement "runtime overrun signal" support") Signed-off-by: Juri Lelli Signed-off-by: Thomas Gleixner Reviewed-by: Daniel Bristot de Oliveira Reviewed-by: Steven Rostedt (VMware) Cc: linux-rt-users@vger.kernel.org Cc: mtk.manpages@gmail.com Cc: Mathieu Poirier Cc: Peter Zijlstra Cc: Luca Abeni Cc: Claudio Scordino Link: https://lkml.kernel.org/r/20181107111032.32291-1-juri.lelli@redhat.com --- kernel/time/posix-cpu-timers.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index ce32cf741b25..8f0644af40be 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk, struct task_cputime cputime; unsigned long soft; - if (dl_task(tsk)) - check_dl_overrun(tsk); - /* * If cputimer is not running, then there are no active * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). -- cgit v1.2.3