summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bug.h80
-rw-r--r--include/asm-generic/rqspinlock.h60
-rw-r--r--include/asm-generic/thread_info_tif.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h79
4 files changed, 140 insertions, 82 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 387720933973..09e8eccee8ed 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -13,10 +13,19 @@
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
+#define BUGFLAG_ARGS (1 << 4)
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
+#ifndef WARN_CONDITION_STR
+#ifdef CONFIG_DEBUG_BUGVERBOSE_DETAILED
+# define WARN_CONDITION_STR(cond_str) "[" cond_str "] "
+#else
+# define WARN_CONDITION_STR(cond_str)
+#endif
+#endif /* WARN_CONDITION_STR */
+
#ifndef __ASSEMBLY__
#include <linux/panic.h>
#include <linux/printk.h>
@@ -29,19 +38,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#ifdef CONFIG_BUG
-#ifdef CONFIG_GENERIC_BUG
-struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- unsigned long bug_addr;
+#define BUG_REL(type, name) type name
#else
- signed int bug_addr_disp;
+#define BUG_REL(type, name) signed int name##_disp
#endif
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- const char *file;
-#else
- signed int file_disp;
+
+#ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
+ BUG_REL(unsigned long, bug_addr);
+#ifdef HAVE_ARCH_BUG_FORMAT
+ BUG_REL(const char *, format);
#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ BUG_REL(const char *, file);
unsigned short line;
#endif
unsigned short flags;
@@ -92,28 +102,50 @@ void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#ifndef __WARN_FLAGS
-#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#ifdef __WARN_FLAGS
+#define __WARN() __WARN_FLAGS("", BUGFLAG_TAINT(TAINT_WARN))
+
+#ifndef WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(#condition, \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(#condition, \
+ BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+#endif /* __WARN_FLAGS */
+
+#if defined(__WARN_FLAGS) && !defined(__WARN_printf)
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
- warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ __warn_printk(arg); \
+ __WARN_FLAGS("", BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
instrumentation_end(); \
} while (0)
-#else
-#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#endif
+
+#ifndef __WARN_printf
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
- __warn_printk(arg); \
- __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
instrumentation_end(); \
} while (0)
-#define WARN_ON_ONCE(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_FLAGS(BUGFLAG_ONCE | \
- BUGFLAG_TAINT(TAINT_WARN)); \
- unlikely(__ret_warn_on); \
-})
+#endif
+
+#ifndef __WARN
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
#endif
/* used internally by panic.c */
@@ -148,8 +180,10 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
DO_ONCE_LITE_IF(condition, WARN_ON, 1)
#endif
+#ifndef WARN_ONCE
#define WARN_ONCE(condition, format...) \
DO_ONCE_LITE_IF(condition, WARN, 1, format)
+#endif
#define WARN_TAINT_ONCE(condition, taint, format...) \
DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index 6d4244d643df..0f2dcbbfee2f 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -129,8 +129,8 @@ dec:
* <error> for lock B
* release_held_lock_entry
*
- * try_cmpxchg_acquire for lock A
* grab_held_lock_entry
+ * try_cmpxchg_acquire for lock A
*
* Lack of any ordering means reordering may occur such that dec, inc
* are done before entry is overwritten. This permits a remote lock
@@ -139,13 +139,8 @@ dec:
* CPU holds a lock it is attempting to acquire, leading to false ABBA
* diagnosis).
*
- * In case of unlock, we will always do a release on the lock word after
- * releasing the entry, ensuring that other CPUs cannot hold the lock
- * (and make conclusions about deadlocks) until the entry has been
- * cleared on the local CPU, preventing any anomalies. Reordering is
- * still possible there, but a remote CPU cannot observe a lock in our
- * table which it is already holding, since visibility entails our
- * release store for the said lock has not retired.
+ * The case of unlock is treated differently due to NMI reentrancy, see
+ * comments in res_spin_unlock.
*
* In theory we don't have a problem if the dec and WRITE_ONCE above get
* reordered with each other, we either notice an empty NULL entry on
@@ -175,10 +170,22 @@ static __always_inline int res_spin_lock(rqspinlock_t *lock)
{
int val = 0;
- if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) {
- grab_held_lock_entry(lock);
+ /*
+ * Grab the deadlock detection entry before doing the cmpxchg, so that
+ * reentrancy due to NMIs between the succeeding cmpxchg and creation of
+ * held lock entry can correctly detect an acquisition attempt in the
+ * interrupted context.
+ *
+ * cmpxchg lock A
+ * <NMI>
+ * res_spin_lock(A) --> missed AA, leads to timeout
+ * </NMI>
+ * grab_held_lock_entry(A)
+ */
+ grab_held_lock_entry(lock);
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return 0;
- }
return resilient_queued_spin_lock_slowpath(lock, val);
}
@@ -192,28 +199,25 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
- if (unlikely(rqh->cnt > RES_NR_HELD))
- goto unlock;
- WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
-unlock:
/*
- * Release barrier, ensures correct ordering. See release_held_lock_entry
- * for details. Perform release store instead of queued_spin_unlock,
- * since we use this function for test-and-set fallback as well. When we
- * have CONFIG_QUEUED_SPINLOCKS=n, we clear the full 4-byte lockword.
+ * Release barrier, ensures correct ordering. Perform release store
+ * instead of queued_spin_unlock, since we use this function for the TAS
+ * fallback as well. When we have CONFIG_QUEUED_SPINLOCKS=n, we clear
+ * the full 4-byte lockword.
*
- * Like release_held_lock_entry, we can do the release before the dec.
- * We simply care about not seeing the 'lock' in our table from a remote
- * CPU once the lock has been released, which doesn't rely on the dec.
+ * Perform the smp_store_release before clearing the lock entry so that
+ * NMIs landing in the unlock path can correctly detect AA issues. The
+ * opposite order shown below may lead to missed AA checks:
*
- * Unlike smp_wmb(), release is not a two way fence, hence it is
- * possible for a inc to move up and reorder with our clearing of the
- * entry. This isn't a problem however, as for a misdiagnosis of ABBA,
- * the remote CPU needs to hold this lock, which won't be released until
- * the store below is done, which would ensure the entry is overwritten
- * to NULL, etc.
+ * WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL)
+ * <NMI>
+ * res_spin_lock(A) --> missed AA, leads to timeout
+ * </NMI>
+ * smp_store_release(A->locked, 0)
*/
smp_store_release(&lock->locked, 0);
+ if (likely(rqh->cnt <= RES_NR_HELD))
+ WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
this_cpu_dec(rqspinlock_held_locks.cnt);
}
diff --git a/include/asm-generic/thread_info_tif.h b/include/asm-generic/thread_info_tif.h
index ee3793e9b1a4..da1610a78f92 100644
--- a/include/asm-generic/thread_info_tif.h
+++ b/include/asm-generic/thread_info_tif.h
@@ -45,4 +45,7 @@
# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
#endif
+#define TIF_RSEQ 11 // Run RSEQ fast path
+#define _TIF_RSEQ BIT(TIF_RSEQ)
+
#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e04d56a5332e..a464ff6c1a61 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -87,39 +87,56 @@
#define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT)
/*
- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
- * generates .data.identifier sections, which need to be pulled in with
- * .data. We don't want to pull in .data..other sections, which Linux
- * has defined. Same for text and bss.
+ * Support -ffunction-sections by matching .text and .text.*,
+ * but exclude '.text..*', .text.startup[.*], and .text.exit[.*].
*
- * With LTO_CLANG, the linker also splits sections by default, so we need
- * these macros to combine the sections during the final link.
+ * .text.startup and .text.startup.* are matched later by INIT_TEXT, and
+ * .text.exit and .text.exit.* are matched later by EXIT_TEXT, so they must be
+ * explicitly excluded here.
*
- * With AUTOFDO_CLANG and PROPELLER_CLANG, by default, the linker splits
- * text sections and regroups functions into subsections.
+ * Other .text.* sections that are typically grouped separately, such as
+ * .text.unlikely or .text.hot, must be matched explicitly before using
+ * TEXT_MAIN.
*
- * RODATA_MAIN is not used because existing code already defines .rodata.x
- * sections to be brought in with rodata.
+ * NOTE: builds *with* and *without* -ffunction-sections are both supported by
+ * this single macro. Even with -ffunction-sections, there may be some objects
+ * NOT compiled with the flag due to the use of a specific Makefile override
+ * like cflags-y or AUTOFDO_PROFILE_foo.o. So this single catchall rule is
+ * needed to support mixed object builds.
+ *
+ * One implication is that functions named startup(), exit(), split(),
+ * unlikely(), hot(), and unknown() are not allowed in the kernel due to the
+ * ambiguity of their section names with -ffunction-sections. For example,
+ * .text.startup could be __attribute__((constructor)) code in a *non*
+ * ffunction-sections object, which should be placed in .init.text; or it could
+ * be an actual function named startup() in an ffunction-sections object, which
+ * should be placed in .text. The build will detect and complain about any such
+ * ambiguously named functions.
+ */
+#define TEXT_MAIN \
+ .text \
+ .text.[_0-9A-Za-df-rt-z]* \
+ .text.s[_0-9A-Za-su-z]* .text.s .text.s.* \
+ .text.st[_0-9A-Zb-z]* .text.st .text.st.* \
+ .text.sta[_0-9A-Za-qs-z]* .text.sta .text.sta.* \
+ .text.star[_0-9A-Za-su-z]* .text.star .text.star.* \
+ .text.start[_0-9A-Za-tv-z]* .text.start .text.start.* \
+ .text.startu[_0-9A-Za-oq-z]* .text.startu .text.startu.* \
+ .text.startup[_0-9A-Za-z]* \
+ .text.e[_0-9A-Za-wy-z]* .text.e .text.e.* \
+ .text.ex[_0-9A-Za-hj-z]* .text.ex .text.ex.* \
+ .text.exi[_0-9A-Za-su-z]* .text.exi .text.exi.* \
+ .text.exit[_0-9A-Za-z]*
+
+/*
+ * Support -fdata-sections by matching .data, .data.*, and others,
+ * but exclude '.data..*'.
*/
-#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) || \
-defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
-#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#else
-#define TEXT_MAIN .text
-#endif
-#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
-#else
-#define DATA_MAIN .data .data.rel .data.rel.local
-#define SDATA_MAIN .sdata
-#define RODATA_MAIN .rodata
-#define BSS_MAIN .bss
-#define SBSS_MAIN .sbss
-#endif
/*
* GCC 4.5 and later have a 32 bytes section alignment for structures.
@@ -581,9 +598,8 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
* during second ld run in second ld pass when generating System.map
*
* TEXT_MAIN here will match symbols with a fixed pattern (for example,
- * .text.hot or .text.unlikely) if dead code elimination or
- * function-section is enabled. Match these symbols first before
- * TEXT_MAIN to ensure they are grouped together.
+ * .text.hot or .text.unlikely). Match those before TEXT_MAIN to ensure
+ * they get grouped together.
*
* Also placing .text.hot section at the beginning of a page, this
* would help the TLB performance.
@@ -729,16 +745,16 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
#define INIT_TEXT \
*(.init.text .init.text.*) \
- *(.text.startup)
+ *(.text.startup .text.startup.*)
#define EXIT_DATA \
*(.exit.data .exit.data.*) \
*(.fini_array .fini_array.*) \
- *(.dtors .dtors.*) \
+ *(.dtors .dtors.*)
#define EXIT_TEXT \
*(.exit.text) \
- *(.text.exit) \
+ *(.text.exit .text.exit.*)
#define EXIT_CALL \
*(.exitcall.exit)
@@ -955,7 +971,8 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
#define RUNTIME_CONST_VARIABLES \
RUNTIME_CONST(shift, d_hash_shift) \
- RUNTIME_CONST(ptr, dentry_hashtable)
+ RUNTIME_CONST(ptr, dentry_hashtable) \
+ RUNTIME_CONST(ptr, __dentry_cache)
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \