summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bitops/__ffs.h2
-rw-r--r--include/asm-generic/bitops/__fls.h2
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h2
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h2
-rw-r--r--include/asm-generic/bitops/builtin-fls.h2
-rw-r--r--include/asm-generic/bitops/ffs.h2
-rw-r--r--include/asm-generic/bitops/fls.h2
-rw-r--r--include/asm-generic/bitops/fls64.h4
-rw-r--r--include/asm-generic/thread_info_tif.h48
-rw-r--r--include/asm-generic/vdso/vsyscall.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/clocksource/arm_arch_timer.h5
-rw-r--r--include/crypto/sha2.h28
-rw-r--r--include/dt-bindings/interrupt-controller/aspeed-scu-ic.h14
-rw-r--r--include/linux/audit.h25
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/cc_platform.h8
-rw-r--r--include/linux/cdx/bitfield.h90
-rw-r--r--include/linux/cdx/edac_cdx_pcol.h28
-rw-r--r--include/linux/cdx/mcdi.h199
-rw-r--r--include/linux/cfi.h6
-rw-r--r--include/linux/cfi_types.h8
-rw-r--r--include/linux/cgroup-defs.h43
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_types.h23
-rw-r--r--include/linux/hrtimer.h14
-rw-r--r--include/linux/hrtimer_defs.h2
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/lsm_hooks.h3
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/pagewalk.h3
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/platform_data/dmtimer-omap.h4
-rw-r--r--include/linux/preempt.h11
-rw-r--r--include/linux/psp-platform-access.h2
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/resctrl.h148
-rw-r--r--include/linux/resctrl_types.h18
-rw-r--r--include/linux/rseq.h11
-rw-r--r--include/linux/sched.h118
-rw-r--r--include/linux/sched/ext.h6
-rw-r--r--include/linux/sched/signal.h4
-rw-r--r--include/linux/sched/topology.h29
-rw-r--r--include/linux/security.h6
-rw-r--r--include/linux/stddef.h24
-rw-r--r--include/linux/string_choices.h6
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/uprobes.h20
-rw-r--r--include/linux/workqueue.h32
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/psp-sfs.h87
-rw-r--r--include/vdso/datapage.h9
-rw-r--r--include/vdso/gettime.h1
-rw-r--r--include/vdso/jiffies.h2
-rw-r--r--include/xen/grant_table.h4
-rw-r--r--include/xen/mem-reservation.h4
-rw-r--r--include/xen/xen-ops.h7
-rw-r--r--include/xen/xen.h9
62 files changed, 1032 insertions, 141 deletions
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
index 2d08c750c8a7..3a899c626fdc 100644
--- a/include/asm-generic/bitops/__ffs.h
+++ b/include/asm-generic/bitops/__ffs.h
@@ -10,7 +10,7 @@
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int generic___ffs(unsigned long word)
+static __always_inline __attribute_const__ unsigned int generic___ffs(unsigned long word)
{
unsigned int num = 0;
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
index e974ec932ec1..35f33780ca6c 100644
--- a/include/asm-generic/bitops/__fls.h
+++ b/include/asm-generic/bitops/__fls.h
@@ -10,7 +10,7 @@
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int generic___fls(unsigned long word)
+static __always_inline __attribute_const__ unsigned int generic___fls(unsigned long word)
{
unsigned int num = BITS_PER_LONG - 1;
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
index cf4b3d33bf96..d3c3f567045d 100644
--- a/include/asm-generic/bitops/builtin-__ffs.h
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -8,7 +8,7 @@
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int __ffs(unsigned long word)
+static __always_inline __attribute_const__ unsigned int __ffs(unsigned long word)
{
return __builtin_ctzl(word);
}
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
index 6d72fc8a5259..7770c4f1bfcd 100644
--- a/include/asm-generic/bitops/builtin-__fls.h
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -8,7 +8,7 @@
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int __fls(unsigned long word)
+static __always_inline __attribute_const__ unsigned int __fls(unsigned long word)
{
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
}
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
index c8455cc28841..be707da8c7cd 100644
--- a/include/asm-generic/bitops/builtin-fls.h
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -9,7 +9,7 @@
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(unsigned int x)
+static __always_inline __attribute_const__ int fls(unsigned int x)
{
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
index 4c43f242daeb..5ff2b7fbda6d 100644
--- a/include/asm-generic/bitops/ffs.h
+++ b/include/asm-generic/bitops/ffs.h
@@ -10,7 +10,7 @@
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from ffz (man ffs).
*/
-static inline int generic_ffs(int x)
+static inline __attribute_const__ int generic_ffs(int x)
{
int r = 1;
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
index 26f3ce1dd6e4..8eed3437edb9 100644
--- a/include/asm-generic/bitops/fls.h
+++ b/include/asm-generic/bitops/fls.h
@@ -10,7 +10,7 @@
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int generic_fls(unsigned int x)
+static __always_inline __attribute_const__ int generic_fls(unsigned int x)
{
int r = 32;
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
index 866f2b2304ff..b5f58dd261a3 100644
--- a/include/asm-generic/bitops/fls64.h
+++ b/include/asm-generic/bitops/fls64.h
@@ -16,7 +16,7 @@
* at position 64.
*/
#if BITS_PER_LONG == 32
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
@@ -24,7 +24,7 @@ static __always_inline int fls64(__u64 x)
return fls(x);
}
#elif BITS_PER_LONG == 64
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
if (x == 0)
return 0;
diff --git a/include/asm-generic/thread_info_tif.h b/include/asm-generic/thread_info_tif.h
new file mode 100644
index 000000000000..ee3793e9b1a4
--- /dev/null
+++ b/include/asm-generic/thread_info_tif.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_THREAD_INFO_TIF_H_
+#define _ASM_GENERIC_THREAD_INFO_TIF_H_
+
+#include <vdso/bits.h>
+
+/* Bits 16-31 are reserved for architecture specific purposes */
+
+#define TIF_NOTIFY_RESUME 0 // callback before returning to user
+#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
+
+#define TIF_SIGPENDING 1 // signal pending
+#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
+
+#define TIF_NOTIFY_SIGNAL 2 // signal notifications exist
+#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
+
+#define TIF_MEMDIE 3 // is terminating due to OOM killer
+#define _TIF_MEMDIE BIT(TIF_MEMDIE)
+
+#define TIF_NEED_RESCHED 4 // rescheduling necessary
+#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
+
+#ifdef HAVE_TIF_NEED_RESCHED_LAZY
+# define TIF_NEED_RESCHED_LAZY 5 // Lazy rescheduling needed
+# define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY)
+#endif
+
+#ifdef HAVE_TIF_POLLING_NRFLAG
+# define TIF_POLLING_NRFLAG 6 // idle is polling for TIF_NEED_RESCHED
+# define _TIF_POLLING_NRFLAG BIT(TIF_POLLING_NRFLAG)
+#endif
+
+#define TIF_USER_RETURN_NOTIFY 7 // notify kernel of userspace return
+#define _TIF_USER_RETURN_NOTIFY BIT(TIF_USER_RETURN_NOTIFY)
+
+#define TIF_UPROBE 8 // breakpointed or singlestepping
+#define _TIF_UPROBE BIT(TIF_UPROBE)
+
+#define TIF_PATCH_PENDING 9 // pending live patching update
+#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
+
+#ifdef HAVE_TIF_RESTORE_SIGMASK
+# define TIF_RESTORE_SIGMASK 10 // Restore signal mask in do_signal() */
+# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
+#endif
+
+#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index 7fc0b560007d..5c6d9799f4e7 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -4,8 +4,6 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_GENERIC_VDSO_DATA_STORE
-
#ifndef __arch_get_vdso_u_time_data
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
@@ -20,8 +18,6 @@ static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(vo
}
#endif
-#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */
-
#ifndef __arch_update_vdso_clock
static __always_inline void __arch_update_vdso_clock(struct vdso_clock *vc)
{
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ae2d2359b79e..a65a87366c48 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -157,7 +157,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
#define PATCHABLE_DISCARDS *(__patchable_function_entries)
#endif
-#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+#ifndef CONFIG_ARCH_SUPPORTS_CFI
/*
* Simply points to ftrace_stub, but with the proper protocol.
* Defined by the linker script in linux/vmlinux.lds.h
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index ce6521ad04d1..2eda895f19f5 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -9,9 +9,6 @@
#include <linux/timecounter.h>
#include <linux/types.h>
-#define ARCH_TIMER_TYPE_CP15 BIT(0)
-#define ARCH_TIMER_TYPE_MEM BIT(1)
-
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
@@ -51,8 +48,6 @@ enum arch_timer_spi_nr {
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
-#define ARCH_TIMER_MEM_PHYS_ACCESS 2
-#define ARCH_TIMER_MEM_VIRT_ACCESS 3
#define ARCH_TIMER_MEM_MAX_FRAMES 8
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index 15e461e568cc..e5dafb935cc8 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -376,6 +376,34 @@ void sha256_final(struct sha256_ctx *ctx, u8 out[SHA256_DIGEST_SIZE]);
void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]);
/**
+ * sha256_finup_2x() - Compute two SHA-256 digests from a common initial
+ * context. On some CPUs, this is faster than sequentially
+ * computing each digest.
+ * @ctx: an optional initial context, which may have already processed data. If
+ * NULL, a default initial context is used (equivalent to sha256_init()).
+ * @data1: data for the first message
+ * @data2: data for the second message
+ * @len: the length of each of @data1 and @data2, in bytes
+ * @out1: (output) the first SHA-256 message digest
+ * @out2: (output) the second SHA-256 message digest
+ *
+ * Context: Any context.
+ */
+void sha256_finup_2x(const struct sha256_ctx *ctx, const u8 *data1,
+ const u8 *data2, size_t len, u8 out1[SHA256_DIGEST_SIZE],
+ u8 out2[SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256_finup_2x_is_optimized() - Check if sha256_finup_2x() is using a real
+ * interleaved implementation, as opposed to a
+ * sequential fallback
+ * @return: true if optimized
+ *
+ * Context: Any context.
+ */
+bool sha256_finup_2x_is_optimized(void);
+
+/**
* struct hmac_sha256_key - Prepared key for HMAC-SHA256
* @key: private
*/
diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
index f315d5a7f5ee..7dd04424afcc 100644
--- a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
+++ b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
@@ -20,4 +20,18 @@
#define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI 0
#define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO 1
+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_HI_TO_LO 2
+
+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_HI_TO_LO 2
+
+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_HI_TO_LO 2
+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_LO_TO_HI 1
+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_HI_TO_LO 0
+
+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_LO_TO_HI 1
+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_HI_TO_LO 0
+
#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index a394614ccd0b..536f8ee8da81 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -37,6 +37,8 @@ struct audit_watch;
struct audit_tree;
struct sk_buff;
struct kern_ipc_perm;
+struct lsm_id;
+struct lsm_prop;
struct audit_krule {
u32 pflags;
@@ -147,6 +149,10 @@ extern unsigned compat_signal_class[];
#define AUDIT_TTY_ENABLE BIT(0)
#define AUDIT_TTY_LOG_PASSWD BIT(1)
+/* bit values for audit_cfg_lsm */
+#define AUDIT_CFG_LSM_SECCTX_SUBJECT BIT(0)
+#define AUDIT_CFG_LSM_SECCTX_OBJECT BIT(1)
+
struct filename;
#define AUDIT_OFF 0
@@ -185,6 +191,8 @@ extern void audit_log_path_denied(int type,
const char *operation);
extern void audit_log_lost(const char *message);
+extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
+extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab);
@@ -210,6 +218,8 @@ extern u32 audit_enabled;
extern int audit_signal_info(int sig, struct task_struct *t);
+extern void audit_cfg_lsm(const struct lsm_id *lsmid, int flags);
+
#else /* CONFIG_AUDIT */
static inline __printf(4, 5)
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
@@ -245,6 +255,16 @@ static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
static inline void audit_log_path_denied(int type, const char *operation)
{ }
+static inline int audit_log_subj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
+static inline int audit_log_obj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
@@ -269,6 +289,9 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
return 0;
}
+static inline void audit_cfg_lsm(const struct lsm_id *lsmid, int flags)
+{ }
+
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
@@ -527,7 +550,7 @@ static inline void audit_log_kern_module(const char *name)
static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{
- if (!audit_dummy_context())
+ if (audit_enabled)
__audit_fanotify(response, friar);
}
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 9be2d50da09a..ea7898cc5903 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -267,7 +267,7 @@ static inline int parity8(u8 val)
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
-static inline unsigned int __ffs64(u64 word)
+static inline __attribute_const__ unsigned int __ffs64(u64 word)
{
#if BITS_PER_LONG == 32
if (((u32)word) == 0UL)
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index 0bf7d33a1048..7fcec025c5e0 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -96,6 +96,14 @@ enum cc_attr {
* enabled to run SEV-SNP guests.
*/
CC_ATTR_HOST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_SNP_SECURE_AVIC: Secure AVIC mode is active.
+ *
+ * The host kernel is running with the necessary features enabled
+ * to run SEV-SNP guests with full Secure AVIC capabilities.
+ */
+ CC_ATTR_SNP_SECURE_AVIC,
};
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
diff --git a/include/linux/cdx/bitfield.h b/include/linux/cdx/bitfield.h
new file mode 100644
index 000000000000..567f8ec47582
--- /dev/null
+++ b/include/linux/cdx/bitfield.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_BITFIELD_H
+#define CDX_BITFIELD_H
+
+#include <linux/bitfield.h>
+
+/* Lowest bit numbers and widths */
+#define CDX_DWORD_LBN 0
+#define CDX_DWORD_WIDTH 32
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define CDX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+struct cdx_dword {
+ __le32 cdx_u32;
+};
+
+/* Value expanders for printk */
+#define CDX_DWORD_VAL(dword) \
+ ((unsigned int)le32_to_cpu((dword).cdx_u32))
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define CDX_DWORD_FIELD(dword, field) \
+ (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
+ le32_to_cpu((dword).cdx_u32)))
+
+/*
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELD(field, value) \
+ (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
+ CDX_LOW_BIT(field)), value))
+
+/*
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELDS(field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7) \
+ (CDX_INSERT_FIELD(field1, (value1)) | \
+ CDX_INSERT_FIELD(field2, (value2)) | \
+ CDX_INSERT_FIELD(field3, (value3)) | \
+ CDX_INSERT_FIELD(field4, (value4)) | \
+ CDX_INSERT_FIELD(field5, (value5)) | \
+ CDX_INSERT_FIELD(field6, (value6)) | \
+ CDX_INSERT_FIELD(field7, (value7)))
+
+#define CDX_POPULATE_DWORD(dword, ...) \
+ (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
+
+/* Populate a dword field with various numbers of arguments */
+#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
+#define CDX_POPULATE_DWORD_6(dword, ...) \
+ CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_5(dword, ...) \
+ CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_4(dword, ...) \
+ CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_3(dword, ...) \
+ CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_2(dword, ...) \
+ CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_1(dword, ...) \
+ CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_SET_DWORD(dword) \
+ CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
+
+#endif /* CDX_BITFIELD_H */
diff --git a/include/linux/cdx/edac_cdx_pcol.h b/include/linux/cdx/edac_cdx_pcol.h
new file mode 100644
index 000000000000..749db33bb482
--- /dev/null
+++ b/include/linux/cdx/edac_cdx_pcol.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Driver for AMD network controllers and boards
+ *
+ * Copyright (C) 2021, Xilinx, Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef MC_CDX_PCOL_H
+#define MC_CDX_PCOL_H
+#include <linux/cdx/mcdi.h>
+
+#define MC_CMD_EDAC_GET_DDR_CONFIG_OUT_WORD_LENGTH_LEN 4
+/* Number of registers for the DDR controller */
+#define MC_CMD_GET_DDR_CONFIG_OFST 4
+#define MC_CMD_GET_DDR_CONFIG_LEN 4
+
+/***********************************/
+/* MC_CMD_EDAC_GET_DDR_CONFIG
+ * Provides detailed configuration for the DDR controller of the given index.
+ */
+#define MC_CMD_EDAC_GET_DDR_CONFIG 0x3
+
+/* MC_CMD_EDAC_GET_DDR_CONFIG_IN msgrequest */
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_OFST 0
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_LEN 4
+
+#endif /* MC_CDX_PCOL_H */
diff --git a/include/linux/cdx/mcdi.h b/include/linux/cdx/mcdi.h
new file mode 100644
index 000000000000..74075305cba4
--- /dev/null
+++ b/include/linux/cdx/mcdi.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_H
+#define CDX_MCDI_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "linux/cdx/bitfield.h"
+
+/**
+ * enum cdx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum cdx_mcdi_mode {
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
+#define MCDI_RPC_POST_RST_TIME (10 * HZ)
+
+/**
+ * enum cdx_mcdi_cmd_state - State for an individual MCDI command
+ * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
+ * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
+ * as MC have too many outstanding commands. Command will be retried once
+ * another command returns.
+ * @MCDI_STATE_RUNNING: Command was accepted and is running.
+ * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
+ * the command.
+ * @MCDI_STATE_FINISHED: Processing of this command has completed.
+ */
+
+enum cdx_mcdi_cmd_state {
+ MCDI_STATE_QUEUED,
+ MCDI_STATE_RETRY,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_CANCELLED,
+ MCDI_STATE_FINISHED,
+};
+
+/**
+ * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
+ * with CDX controller.
+ * @mcdi: MCDI interface
+ * @mcdi_ops: MCDI operations
+ * @r5_rproc : R5 Remoteproc device handle
+ * @rpdev: RPMsg device
+ * @ept: RPMsg endpoint
+ * @work: Post probe work
+ */
+struct cdx_mcdi {
+ /* MCDI interface */
+ struct cdx_mcdi_data *mcdi;
+ const struct cdx_mcdi_ops *mcdi_ops;
+
+ struct rproc *r5_rproc;
+ struct rpmsg_device *rpdev;
+ struct rpmsg_endpoint *ept;
+ struct work_struct work;
+};
+
+struct cdx_mcdi_ops {
+ void (*mcdi_request)(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+ unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
+};
+
+typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
+ unsigned long cookie, int rc,
+ struct cdx_dword *outbuf,
+ size_t outlen_actual);
+
+/**
+ * struct cdx_mcdi_cmd - An outstanding MCDI command
+ * @ref: Reference count. There will be one reference if the command is
+ * in the mcdi_iface cmd_list, another if it's on a cleanup list,
+ * and a third if it's queued in the work queue.
+ * @list: The data for this entry in mcdi->cmd_list
+ * @cleanup_list: The data for this entry in a cleanup list
+ * @work: The work item for this command, queued in mcdi->workqueue
+ * @mcdi: The mcdi_iface for this command
+ * @state: The state of this command
+ * @inlen: inbuf length
+ * @inbuf: Input buffer
+ * @quiet: Whether to silence errors
+ * @reboot_seen: Whether a reboot has been seen during this command,
+ * to prevent duplicates
+ * @seq: Sequence number
+ * @started: Jiffies this command was started at
+ * @cookie: Context for completion function
+ * @completer: Completion function
+ * @handle: Command handle
+ * @cmd: Command number
+ * @rc: Return code
+ * @outlen: Length of output buffer
+ * @outbuf: Output buffer
+ */
+struct cdx_mcdi_cmd {
+ struct kref ref;
+ struct list_head list;
+ struct list_head cleanup_list;
+ struct work_struct work;
+ struct cdx_mcdi_iface *mcdi;
+ enum cdx_mcdi_cmd_state state;
+ size_t inlen;
+ const struct cdx_dword *inbuf;
+ bool quiet;
+ bool reboot_seen;
+ u8 seq;
+ unsigned long started;
+ unsigned long cookie;
+ cdx_mcdi_async_completer *completer;
+ unsigned int handle;
+ unsigned int cmd;
+ int rc;
+ size_t outlen;
+ struct cdx_dword *outbuf;
+ /* followed by inbuf data if necessary */
+};
+
+/**
+ * struct cdx_mcdi_iface - MCDI protocol context
+ * @cdx: The associated NIC
+ * @iface_lock: Serialise access to this structure
+ * @outstanding_cleanups: Count of cleanups
+ * @cmd_list: List of outstanding and running commands
+ * @workqueue: Workqueue used for delayed processing
+ * @cmd_complete_wq: Waitqueue for command completion
+ * @db_held_by: Command the MC doorbell is in use by
+ * @seq_held_by: Command each sequence number is in use by
+ * @prev_handle: The last used command handle
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event
+ * @prev_seq: The last used sequence number
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ */
+struct cdx_mcdi_iface {
+ struct cdx_mcdi *cdx;
+ /* Serialise access */
+ struct mutex iface_lock;
+ unsigned int outstanding_cleanups;
+ struct list_head cmd_list;
+ struct workqueue_struct *workqueue;
+ wait_queue_head_t cmd_complete_wq;
+ struct cdx_mcdi_cmd *db_held_by;
+ struct cdx_mcdi_cmd *seq_held_by[16];
+ unsigned int prev_handle;
+ enum cdx_mcdi_mode mode;
+ u8 prev_seq;
+ bool new_epoch;
+};
+
+/**
+ * struct cdx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct cdx_mcdi_data {
+ struct cdx_mcdi_iface iface;
+ u32 fn_flags;
+};
+
+void cdx_mcdi_finish(struct cdx_mcdi *cdx);
+int cdx_mcdi_init(struct cdx_mcdi *cdx);
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
+ (_ofst))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
+#define MCDI_DWORD(_buf, _field) \
+ CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
+#endif /* CDX_MCDI_H */
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
index 52a98886a455..1fd22ea6eba4 100644
--- a/include/linux/cfi.h
+++ b/include/linux/cfi.h
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <asm/cfi.h>
-#ifdef CONFIG_CFI_CLANG
+#ifdef CONFIG_CFI
extern bool cfi_warn;
enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
@@ -52,7 +52,7 @@ static inline u32 cfi_get_func_hash(void *func)
extern u32 cfi_bpf_hash;
extern u32 cfi_bpf_subprog_hash;
-#else /* CONFIG_CFI_CLANG */
+#else /* CONFIG_CFI */
static inline int cfi_get_offset(void) { return 0; }
static inline u32 cfi_get_func_hash(void *func) { return 0; }
@@ -60,7 +60,7 @@ static inline u32 cfi_get_func_hash(void *func) { return 0; }
#define cfi_bpf_hash 0U
#define cfi_bpf_subprog_hash 0U
-#endif /* CONFIG_CFI_CLANG */
+#endif /* CONFIG_CFI */
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
bool is_cfi_trap(unsigned long addr);
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
index 685f7181780f..a86af9bc8bdc 100644
--- a/include/linux/cfi_types.h
+++ b/include/linux/cfi_types.h
@@ -8,7 +8,7 @@
#ifdef __ASSEMBLY__
#include <linux/linkage.h>
-#ifdef CONFIG_CFI_CLANG
+#ifdef CONFIG_CFI
/*
* Use the __kcfi_typeid_<function> type identifier symbol to
* annotate indirectly called assembly functions. The compiler emits
@@ -29,12 +29,12 @@
#define SYM_TYPED_START(name, linkage, align...) \
SYM_TYPED_ENTRY(name, linkage, align)
-#else /* CONFIG_CFI_CLANG */
+#else /* CONFIG_CFI */
#define SYM_TYPED_START(name, linkage, align...) \
SYM_START(name, linkage, align)
-#endif /* CONFIG_CFI_CLANG */
+#endif /* CONFIG_CFI */
#ifndef SYM_TYPED_FUNC_START
#define SYM_TYPED_FUNC_START(name) \
@@ -43,7 +43,7 @@
#else /* __ASSEMBLY__ */
-#ifdef CONFIG_CFI_CLANG
+#ifdef CONFIG_CFI
#define DEFINE_CFI_TYPE(name, func) \
/* \
* Force a reference to the function so the compiler generates \
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 6b93a64115fe..93318fce31f3 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -91,6 +91,12 @@ enum {
* cgroup_threadgroup_rwsem. This makes hot path operations such as
* forks and exits into the slow path and more expensive.
*
+ * Alleviate the contention between fork, exec, exit operations and
+ * writing to cgroup.procs by taking a per threadgroup rwsem instead of
+ * the global cgroup_threadgroup_rwsem. Fork and other operations
+ * from threads in different thread groups no longer contend with
+ * writing to cgroup.procs.
+ *
* The static usage pattern of creating a cgroup, enabling controllers,
* and then seeding it with CLONE_INTO_CGROUP doesn't require write
* locking cgroup_threadgroup_rwsem and thus doesn't benefit from
@@ -140,6 +146,17 @@ enum {
__CFTYPE_ADDED = (1 << 18),
};
+enum cgroup_attach_lock_mode {
+ /* Default */
+ CGRP_ATTACH_LOCK_GLOBAL,
+
+ /* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */
+ CGRP_ATTACH_LOCK_NONE,
+
+ /* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */
+ CGRP_ATTACH_LOCK_PER_THREADGROUP,
+};
+
/*
* cgroup_file is the handle for a file instance created in a cgroup which
* is used, for example, to generate file changed notifications. This can
@@ -433,6 +450,23 @@ struct cgroup_freezer_state {
* frozen, SIGSTOPped, and PTRACEd.
*/
int nr_frozen_tasks;
+
+ /* Freeze time data consistency protection */
+ seqcount_t freeze_seq;
+
+ /*
+ * Most recent time the cgroup was requested to freeze.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 freeze_start_nsec;
+
+ /*
+ * Total duration the cgroup has spent freezing.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 frozen_nsec;
};
struct cgroup {
@@ -746,7 +780,6 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
- void (*post_attach)(void);
int (*can_fork)(struct task_struct *task,
struct css_set *cset);
void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
@@ -822,6 +855,7 @@ struct cgroup_subsys {
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+extern bool cgroup_enable_per_threadgroup_rwsem;
struct cgroup_of_peak {
unsigned long value;
@@ -833,11 +867,14 @@ struct cgroup_of_peak {
* @tsk: target task
*
* Allows cgroup operations to synchronize against threadgroup changes
- * using a percpu_rw_semaphore.
+ * using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when
+ * favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
+ if (cgroup_enable_per_threadgroup_rwsem)
+ down_read(&tsk->signal->cgroup_threadgroup_rwsem);
}
/**
@@ -848,6 +885,8 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
+ if (cgroup_enable_per_threadgroup_rwsem)
+ up_read(&tsk->signal->cgroup_threadgroup_rwsem);
percpu_up_read(&cgroup_threadgroup_rwsem);
}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index bab98357960d..7e292bdff2c1 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -355,6 +355,11 @@ static inline bool css_is_dying(struct cgroup_subsys_state *css)
return css->flags & CSS_DYING;
}
+static inline bool css_is_online(struct cgroup_subsys_state *css)
+{
+ return css->flags & CSS_ONLINE;
+}
+
static inline bool css_is_self(struct cgroup_subsys_state *css)
{
if (css == &css->cgroup->self) {
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 64ff73c533e5..5b45ea7dff3e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -248,7 +248,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif /* __KERNEL__ */
-#if defined(CONFIG_CFI_CLANG) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
+#if defined(CONFIG_CFI) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
/*
* Force a reference to the external symbol so the compiler generates
* __kcfi_typid.
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 16755431fc11..2f3e80bf9f35 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -330,6 +330,29 @@ struct ftrace_likely_data {
#endif
/*
+ * The assume attribute is used to indicate that a certain condition is
+ * assumed to be true. If this condition is violated at runtime, the behavior
+ * is undefined. Compilers may or may not use this indication to generate
+ * optimized code.
+ *
+ * Note that the clang documentation states that optimizers may react
+ * differently to this attribute, and this may even have a negative
+ * performance impact. Therefore this attribute should be used with care.
+ *
+ * Optional: only supported since gcc >= 13
+ * Optional: only supported since clang >= 19
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#index-assume-statement-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id13
+ *
+ */
+#ifdef CONFIG_CC_HAS_ASSUME
+# define __assume(expr) __attribute__((__assume__(expr)))
+#else
+# define __assume(expr)
+#endif
+
+/*
* Optional: only supported since gcc >= 15
* Optional: only supported since clang >= 18
*
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 1ef867bb8c44..2cf1bf65b225 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -154,14 +154,11 @@ static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
return ktime_to_ns(timer->node.expires);
}
-static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
-{
- return ktime_sub(timer->node.expires, timer->base->get_time());
-}
+ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
- return timer->base->get_time();
+ return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
}
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
@@ -200,8 +197,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
static inline ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
- return __hrtimer_expires_remaining_adjusted(timer,
- timer->base->get_time());
+ return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer));
}
#ifdef CONFIG_TIMERFD
@@ -363,7 +359,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
- return hrtimer_forward(timer, timer->base->get_time(), interval);
+ return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval);
}
/* Precise sleep: */
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index 84a5045f80f3..aa49ffa130e5 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -41,7 +41,6 @@
* @seq: seqcount around __run_hrtimer
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
- * @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
@@ -51,7 +50,6 @@ struct hrtimer_clock_base {
seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
- ktime_t (*get_time)(void);
ktime_t offset;
} __hrtimer_clock_base_align;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1d6b606a81ef..c67e76fbcc07 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -669,6 +669,8 @@ extern int irq_chip_set_parent_state(struct irq_data *data,
extern int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
+extern void irq_chip_shutdown_parent(struct irq_data *data);
+extern unsigned int irq_chip_startup_parent(struct irq_data *data);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
@@ -976,10 +978,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY
-void irq_init_desc(unsigned int irq);
-#endif
-
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 91b20788273d..0d1927da8055 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -61,7 +61,7 @@
extern void register_refined_jiffies(long clock_tick_rate);
-/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
+/* TICK_USEC is the time between ticks in usec */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 090d1d3e19fe..79ec5a2bdcca 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -116,6 +116,9 @@ struct lsm_blob_sizes {
int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
int lbs_tun_dev;
int lbs_bdev;
+ int lbs_bpf_map;
+ int lbs_bpf_prog;
+ int lbs_bpf_token;
};
/*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index a643fae8a349..7f625c35128b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1107,6 +1107,11 @@ struct mm_struct {
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
+ /* the ABI-related flags from the ELF header. Used for core dump */
+ unsigned long saved_e_flags;
+#endif
+
struct percpu_counter rss_stat[NR_MM_COUNTERS];
struct linux_binfmt *binfmt;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index e5e86a8529fb..d415dd15a0a9 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -431,8 +431,6 @@ struct msi_domain_info;
* function.
* @domain_free_irqs: Optional function to override the default free
* function.
- * @msi_post_free: Optional function which is invoked after freeing
- * all interrupts.
* @msi_translate: Optional translate callback to support the odd wire to
* MSI bridges, e.g. MBIGEN
*
@@ -473,8 +471,6 @@ struct msi_domain_ops {
struct device *dev, int nvec);
void (*domain_free_irqs)(struct irq_domain *domain,
struct device *dev);
- void (*msi_post_free)(struct irq_domain *domain,
- struct device *dev);
int (*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec,
irq_hw_number_t *hwirq, unsigned int *type);
};
@@ -568,6 +564,8 @@ enum {
MSI_FLAG_PARENT_PM_DEV = (1 << 8),
/* Support for parent mask/unmask */
MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
+ /* Support for parent startup/shutdown */
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT = (1 << 10),
/* Mask for the generic functionality */
MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 682472c15495..88e18615dd72 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -134,6 +134,9 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
int walk_kernel_page_table_range(unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
pgd_t *pgd, void *private);
+int walk_kernel_page_table_range_lockless(unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd, void *private);
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ec9d96025683..fd1d91017b99 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -859,7 +859,7 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
- atomic_t mmap_count;
+ refcount_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
@@ -1719,7 +1719,7 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
+get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index 95d852aef130..726d89143842 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -36,9 +36,13 @@ struct omap_dm_timer_ops {
int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
int toggle, int trigger, int autoreload);
int (*get_pwm_status)(struct omap_dm_timer *timer);
+ int (*set_cap)(struct omap_dm_timer *timer,
+ int autoreload, bool config_period);
+ int (*get_cap_status)(struct omap_dm_timer *timer);
int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
unsigned int (*read_counter)(struct omap_dm_timer *timer);
+ unsigned int (*read_cap)(struct omap_dm_timer *timer, bool is_period);
int (*write_counter)(struct omap_dm_timer *timer,
unsigned int value);
unsigned int (*read_status)(struct omap_dm_timer *timer);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 1fad1c8a4c76..102202185d7a 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -372,7 +372,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
/*
* Migrate-Disable and why it is undesired.
*
- * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * When a preempted task becomes eligible to run under the ideal model (IOW it
* becomes one of the M highest priority tasks), it might still have to wait
* for the preemptee's migrate_disable() section to complete. Thereby suffering
* a reduction in bandwidth in the exact duration of the migrate_disable()
@@ -387,7 +387,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* - a lower priority tasks; which under preempt_disable() could've instantly
* migrated away when another CPU becomes available, is now constrained
* by the ability to push the higher priority task away, which might itself be
- * in a migrate_disable() section, reducing it's available bandwidth.
+ * in a migrate_disable() section, reducing its available bandwidth.
*
* IOW it trades latency / moves the interference term, but it stays in the
* system, and as long as it remains unbounded, the system is not fully
@@ -399,7 +399,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
* number of primitives into becoming preemptible, they would also allow
* migration. This turns out to break a bunch of per-cpu usage. To this end,
- * all these primitives employ migirate_disable() to restore this implicit
+ * all these primitives employ migrate_disable() to restore this implicit
* assumption.
*
* This is a 'temporary' work-around at best. The correct solution is getting
@@ -407,7 +407,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* per-cpu locking or short preempt-disable regions.
*
* The end goal must be to get rid of migrate_disable(), alternatively we need
- * a schedulability theory that does not depend on abritrary migration.
+ * a schedulability theory that does not depend on arbitrary migration.
*
*
* Notes on the implementation.
@@ -424,8 +424,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* work-conserving schedulers.
*
*/
-extern void migrate_disable(void);
-extern void migrate_enable(void);
/**
* preempt_disable_nested - Disable preemption inside a normally preempt disabled section
@@ -471,7 +469,6 @@ static __always_inline void preempt_enable_nested(void)
DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
-DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
#ifdef CONFIG_PREEMPT_DYNAMIC
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
index 1504fb012c05..540abf7de048 100644
--- a/include/linux/psp-platform-access.h
+++ b/include/linux/psp-platform-access.h
@@ -7,6 +7,8 @@
enum psp_platform_access_msg {
PSP_CMD_NONE = 0x0,
+ PSP_SFS_GET_FW_VERSIONS,
+ PSP_SFS_UPDATE,
PSP_CMD_HSTI_QUERY = 0x14,
PSP_I2C_REQ_BUS_CMD = 0x64,
PSP_DYNAMIC_BOOST_GET_NONCE,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 120536f4c6eb..8f346c847ee5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -24,7 +24,7 @@
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <linux/irqflags.h>
-#include <linux/preempt.h>
+#include <linux/sched.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <linux/cleanup.h>
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 6fb4894b8cfd..a7d92718b653 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -157,27 +157,42 @@ struct rdt_ctrl_domain {
};
/**
+ * struct mbm_cntr_cfg - Assignable counter configuration.
+ * @evtid: MBM event to which the counter is assigned. Only valid
+ * if @rdtgroup is not NULL.
+ * @rdtgrp: resctrl group assigned to the counter. NULL if the
+ * counter is free.
+ */
+struct mbm_cntr_cfg {
+ enum resctrl_event_id evtid;
+ struct rdtgroup *rdtgrp;
+};
+
+/**
* struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
* @hdr: common header for different domain types
* @ci_id: cache info id for this domain
* @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
- * @mbm_total: saved state for MBM total bandwidth
- * @mbm_local: saved state for MBM local bandwidth
+ * @mbm_states: Per-event pointer to the MBM event's saved state.
+ * An MBM event's state is an array of struct mbm_state
+ * indexed by RMID on x86 or combined CLOSID, RMID on Arm.
* @mbm_over: worker to periodically read MBM h/w counters
* @cqm_limbo: worker to periodically read CQM h/w counters
* @mbm_work_cpu: worker CPU for MBM h/w counters
* @cqm_work_cpu: worker CPU for CQM h/w counters
+ * @cntr_cfg: array of assignable counters' configuration (indexed
+ * by counter ID)
*/
struct rdt_mon_domain {
struct rdt_domain_hdr hdr;
unsigned int ci_id;
unsigned long *rmid_busy_llc;
- struct mbm_state *mbm_total;
- struct mbm_state *mbm_local;
+ struct mbm_state *mbm_states[QOS_NUM_L3_MBM_EVENTS];
struct delayed_work mbm_over;
struct delayed_work cqm_limbo;
int mbm_work_cpu;
int cqm_work_cpu;
+ struct mbm_cntr_cfg *cntr_cfg;
};
/**
@@ -256,39 +271,52 @@ enum resctrl_schema_fmt {
};
/**
+ * struct resctrl_mon - Monitoring related data of a resctrl resource.
+ * @num_rmid: Number of RMIDs available.
+ * @mbm_cfg_mask: Memory transactions that can be tracked when bandwidth
+ * monitoring events can be configured.
+ * @num_mbm_cntrs: Number of assignable counters.
+ * @mbm_cntr_assignable:Is system capable of supporting counter assignment?
+ * @mbm_assign_on_mkdir:True if counters should automatically be assigned to MBM
+ * events of monitor groups created via mkdir.
+ */
+struct resctrl_mon {
+ int num_rmid;
+ unsigned int mbm_cfg_mask;
+ int num_mbm_cntrs;
+ bool mbm_cntr_assignable;
+ bool mbm_assign_on_mkdir;
+};
+
+/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
- * @num_rmid: Number of RMIDs available
* @ctrl_scope: Scope of this resource for control functions
* @mon_scope: Scope of this resource for monitor functions
* @cache: Cache allocation related data
* @membw: If the component has bandwidth controls, their properties.
+ * @mon: Monitoring related data.
* @ctrl_domains: RCU list of all control domains for this resource
* @mon_domains: RCU list of all monitor domains for this resource
* @name: Name to use in "schemata" file.
* @schema_fmt: Which format string and parser is used for this schema.
- * @evt_list: List of monitoring events
- * @mbm_cfg_mask: Bandwidth sources that can be tracked when bandwidth
- * monitoring events can be configured.
* @cdp_capable: Is the CDP feature available on this resource
*/
struct rdt_resource {
int rid;
bool alloc_capable;
bool mon_capable;
- int num_rmid;
enum resctrl_scope ctrl_scope;
enum resctrl_scope mon_scope;
struct resctrl_cache cache;
struct resctrl_membw membw;
+ struct resctrl_mon mon;
struct list_head ctrl_domains;
struct list_head mon_domains;
char *name;
enum resctrl_schema_fmt schema_fmt;
- struct list_head evt_list;
- unsigned int mbm_cfg_mask;
bool cdp_capable;
};
@@ -372,8 +400,29 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+void resctrl_enable_mon_event(enum resctrl_event_id eventid);
+
+bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
+
bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
+static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid)
+{
+ return (eventid >= QOS_L3_MBM_TOTAL_EVENT_ID &&
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID);
+}
+
+u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid);
+
+/* Iterate over all memory bandwidth events */
+#define for_each_mbm_event_id(eventid) \
+ for (eventid = QOS_L3_MBM_TOTAL_EVENT_ID; \
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID; eventid++)
+
+/* Iterate over memory bandwidth arrays in domain structures */
+#define for_each_mbm_idx(idx) \
+ for (idx = 0; idx < QOS_NUM_L3_MBM_EVENTS; idx++)
+
/**
* resctrl_arch_mon_event_config_write() - Write the config for an event.
* @config_info: struct resctrl_mon_config_info describing the resource, domain
@@ -416,6 +465,26 @@ static inline u32 resctrl_get_config_index(u32 closid,
bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+/**
+ * resctrl_arch_mbm_cntr_assign_enabled() - Check if MBM counter assignment
+ * mode is enabled.
+ * @r: Pointer to the resource structure.
+ *
+ * Return:
+ * true if the assignment mode is enabled, false otherwise.
+ */
+bool resctrl_arch_mbm_cntr_assign_enabled(struct rdt_resource *r);
+
+/**
+ * resctrl_arch_mbm_cntr_assign_set() - Configure the MBM counter assignment mode.
+ * @r: Pointer to the resource structure.
+ * @enable: Set to true to enable, false to disable the assignment mode.
+ *
+ * Return:
+ * 0 on success, < 0 on error.
+ */
+int resctrl_arch_mbm_cntr_assign_set(struct rdt_resource *r, bool enable);
+
/*
* Update the ctrl_val and apply this config right now.
* Must be called on one of the domain's CPUs.
@@ -528,6 +597,63 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *
*/
void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
+/**
+ * resctrl_arch_config_cntr() - Configure the counter with its new RMID
+ * and event details.
+ * @r: Resource structure.
+ * @d: The domain in which counter with ID @cntr_id should be configured.
+ * @evtid: Monitoring event type (e.g., QOS_L3_MBM_TOTAL_EVENT_ID
+ * or QOS_L3_MBM_LOCAL_EVENT_ID).
+ * @rmid: RMID.
+ * @closid: CLOSID.
+ * @cntr_id: Counter ID to configure.
+ * @assign: True to assign the counter or update an existing assignment,
+ * false to unassign the counter.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ enum resctrl_event_id evtid, u32 rmid, u32 closid,
+ u32 cntr_id, bool assign);
+
+/**
+ * resctrl_arch_cntr_read() - Read the event data corresponding to the counter ID
+ * assigned to the RMID, event pair for this resource
+ * and domain.
+ * @r: Resource that the counter should be read from.
+ * @d: Domain that the counter should be read from.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to read.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ * @val: Result of the counter read in bytes.
+ *
+ * Called on a CPU that belongs to domain @d when "mbm_event" mode is enabled.
+ * Called from a non-migrateable process context via smp_call_on_cpu() unless all
+ * CPUs are nohz_full, in which case it is called via IPI (smp_call_function_any()).
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid, u64 *val);
+
+/**
+ * resctrl_arch_reset_cntr() - Reset any private state associated with counter ID.
+ * @r: The domain's resource.
+ * @d: The counter ID's domain.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to reset.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid);
+
extern unsigned int resctrl_rmid_realloc_threshold;
extern unsigned int resctrl_rmid_realloc_limit;
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
index a25fb9c4070d..acfe07860b34 100644
--- a/include/linux/resctrl_types.h
+++ b/include/linux/resctrl_types.h
@@ -34,11 +34,18 @@
/* Max event bits supported */
#define MAX_EVT_CONFIG_BITS GENMASK(6, 0)
-/*
- * Event IDs, the values match those used to program IA32_QM_EVTSEL before
- * reading IA32_QM_CTR on RDT systems.
- */
+/* Number of memory transactions that an MBM event can be configured with */
+#define NUM_MBM_TRANSACTIONS 7
+
+/* Event IDs */
enum resctrl_event_id {
+ /* Must match value of first event below */
+ QOS_FIRST_EVENT = 0x01,
+
+ /*
+ * These values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
QOS_L3_OCCUP_EVENT_ID = 0x01,
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
@@ -47,4 +54,7 @@ enum resctrl_event_id {
QOS_NUM_EVENTS,
};
+#define QOS_NUM_L3_MBM_EVENTS (QOS_L3_MBM_LOCAL_EVENT_ID - QOS_L3_MBM_TOTAL_EVENT_ID + 1)
+#define MBM_STATE_IDX(evt) ((evt) - QOS_L3_MBM_TOTAL_EVENT_ID)
+
#endif /* __LINUX_RESCTRL_TYPES_H */
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index a96fd345aa38..69553e7c14c1 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -7,6 +7,12 @@
#include <linux/preempt.h>
#include <linux/sched.h>
+#ifdef CONFIG_MEMBARRIER
+# define RSEQ_EVENT_GUARD irq
+#else
+# define RSEQ_EVENT_GUARD preempt
+#endif
+
/*
* Map the event mask on the user-space ABI enum rseq_cs_flags
* for direct mask checks.
@@ -41,9 +47,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig,
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
+ scoped_guard(RSEQ_EVENT_GUARD)
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
rseq_handle_notify_resume(ksig, regs);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e4ce0a76831e..cbb7340c5866 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -49,6 +49,9 @@
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
+#ifndef COMPILE_OFFSETS
+#include <generated/rq-offsets.h>
+#endif
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -881,6 +884,11 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
+#ifdef CONFIG_CFS_BANDWIDTH
+ struct callback_head sched_throttle_work;
+ struct list_head throttle_node;
+ bool throttled;
+#endif
#endif
@@ -2310,4 +2318,114 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
+#ifndef MODULE
+#ifndef COMPILE_OFFSETS
+
+extern void ___migrate_enable(void);
+
+struct rq;
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+/*
+ * The "struct rq" is not available here, so we can't access the
+ * "runqueues" with this_cpu_ptr(), as the compilation will fail in
+ * this_cpu_ptr() -> raw_cpu_ptr() -> __verify_pcpu_ptr():
+ * typeof((ptr) + 0)
+ *
+ * So use arch_raw_cpu_ptr()/PERCPU_PTR() directly here.
+ */
+#ifdef CONFIG_SMP
+#define this_rq_raw() arch_raw_cpu_ptr(&runqueues)
+#else
+#define this_rq_raw() PERCPU_PTR(&runqueues)
+#endif
+#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))
+
+static inline void __migrate_enable(void)
+{
+ struct task_struct *p = current;
+
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Check both overflow from migrate_disable() and superfluous
+ * migrate_enable().
+ */
+ if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
+ return;
+#endif
+
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
+ }
+
+ /*
+ * Ensure stop_task runs either before or after this, and that
+ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
+ */
+ guard(preempt)();
+ if (unlikely(p->cpus_ptr != &p->cpus_mask))
+ ___migrate_enable();
+ /*
+ * Mustn't clear migration_disabled() until cpus_ptr points back at the
+ * regular cpus_mask, otherwise things that race (eg.
+ * select_fallback_rq) get confused.
+ */
+ barrier();
+ p->migration_disabled = 0;
+ this_rq_pinned()--;
+}
+
+static inline void __migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (p->migration_disabled) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ *Warn about overflow half-way through the range.
+ */
+ WARN_ON_ONCE((s16)p->migration_disabled < 0);
+#endif
+ p->migration_disabled++;
+ return;
+ }
+
+ guard(preempt)();
+ this_rq_pinned()++;
+ p->migration_disabled = 1;
+}
+#else /* !COMPILE_OFFSETS */
+static inline void __migrate_disable(void) { }
+static inline void __migrate_enable(void) { }
+#endif /* !COMPILE_OFFSETS */
+
+/*
+ * So that it is possible to not export the runqueues variable, define and
+ * export migrate_enable/migrate_disable in kernel/sched/core.c too, and use
+ * them for the modules. The macro "INSTANTIATE_EXPORTED_MIGRATE_DISABLE" will
+ * be defined in kernel/sched/core.c.
+ */
+#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
+static inline void migrate_disable(void)
+{
+ __migrate_disable();
+}
+
+static inline void migrate_enable(void)
+{
+ __migrate_enable();
+}
+#else /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
+
+#else /* MODULE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* MODULE */
+
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
#endif
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index 7047101dbf58..d82b7a9b0658 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -108,7 +108,11 @@ enum scx_kf_mask {
SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
/* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
- /* ops.dequeue (in REST) may be nested inside DISPATCH */
+ /*
+ * ops.dispatch() may release rq lock temporarily and thus ENQUEUE and
+ * SELECT_CPU may be nested inside. ops.dequeue (in REST) may also be
+ * nested inside DISPATCH.
+ */
SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 1ef1edbaaf79..7d6449982822 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -226,6 +226,10 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
+#ifdef CONFIG_CGROUPS
+ struct rw_semaphore cgroup_threadgroup_rwsem;
+#endif
+
/*
* Thread is the potential origin of an oom condition; kill first on
* oom
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 5263746b63e8..bbcfdf12aa6e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -30,33 +30,24 @@ struct sd_flag_debug {
};
extern const struct sd_flag_debug sd_flag_debug[];
+struct sched_domain_topology_level;
+
#ifdef CONFIG_SCHED_SMT
-static inline int cpu_smt_flags(void)
-{
- return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
-}
+extern int cpu_smt_flags(void);
+extern const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_CLUSTER
-static inline int cpu_cluster_flags(void)
-{
- return SD_CLUSTER | SD_SHARE_LLC;
-}
+extern int cpu_cluster_flags(void);
+extern const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_MC
-static inline int cpu_core_flags(void)
-{
- return SD_SHARE_LLC;
-}
+extern int cpu_core_flags(void);
+extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
-#ifdef CONFIG_NUMA
-static inline int cpu_numa_flags(void)
-{
- return SD_NUMA;
-}
-#endif
+extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu);
extern int arch_asym_cpu_priority(int cpu);
@@ -172,7 +163,7 @@ bool cpus_equal_capacity(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
bool cpus_share_resources(int this_cpu, int that_cpu);
-typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+typedef const struct cpumask *(*sched_domain_mask_f)(struct sched_domain_topology_level *tl, int cpu);
typedef int (*sched_domain_flags_f)(void);
struct sd_data {
diff --git a/include/linux/security.h b/include/linux/security.h
index 9a1d4a6c8673..bd33f194c94a 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -567,7 +567,8 @@ int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, struct lsm_context *cp);
-int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp);
+int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp,
+ int lsmid);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(struct lsm_context *cp);
void security_inode_invalidate_secctx(struct inode *inode);
@@ -1551,7 +1552,8 @@ static inline int security_secid_to_secctx(u32 secid, struct lsm_context *cp)
}
static inline int security_lsmprop_to_secctx(struct lsm_prop *prop,
- struct lsm_context *cp)
+ struct lsm_context *cp,
+ int lsmid)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index dab49e2ec8c0..80b6bfb944f0 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -94,7 +94,8 @@ enum {
__DECLARE_FLEX_ARRAY(TYPE, NAME)
/**
- * TRAILING_OVERLAP() - Overlap a flexible-array member with trailing members.
+ * __TRAILING_OVERLAP() - Overlap a flexible-array member with trailing
+ * members.
*
* Creates a union between a flexible-array member (FAM) in a struct and a set
* of additional members that would otherwise follow it.
@@ -102,15 +103,30 @@ enum {
* @TYPE: Flexible structure type name, including "struct" keyword.
* @NAME: Name for a variable to define.
* @FAM: The flexible-array member within @TYPE
+ * @ATTRS: Any struct attributes (usually empty)
* @MEMBERS: Trailing overlapping members.
*/
-#define TRAILING_OVERLAP(TYPE, NAME, FAM, MEMBERS) \
+#define __TRAILING_OVERLAP(TYPE, NAME, FAM, ATTRS, MEMBERS) \
union { \
TYPE NAME; \
struct { \
- unsigned char __offset_to_##FAM[offsetof(TYPE, FAM)]; \
+ unsigned char __offset_to_FAM[offsetof(TYPE, FAM)]; \
MEMBERS \
- }; \
+ } ATTRS; \
}
+/**
+ * TRAILING_OVERLAP() - Overlap a flexible-array member with trailing members.
+ *
+ * Creates a union between a flexible-array member (FAM) in a struct and a set
+ * of additional members that would otherwise follow it.
+ *
+ * @TYPE: Flexible structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @FAM: The flexible-array member within @TYPE
+ * @MEMBERS: Trailing overlapping members.
+ */
+#define TRAILING_OVERLAP(TYPE, NAME, FAM, MEMBERS) \
+ __TRAILING_OVERLAP(TYPE, NAME, FAM, /* no attrs */, MEMBERS)
+
#endif
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
index f3ba4f52ff26..6c4077be7742 100644
--- a/include/linux/string_choices.h
+++ b/include/linux/string_choices.h
@@ -17,6 +17,12 @@
#include <linux/types.h>
+static inline const char *str_assert_deassert(bool v)
+{
+ return v ? "assert" : "deassert";
+}
+#define str_deassert_assert(v) str_assert_deassert(!(v))
+
static inline const char *str_enable_disable(bool v)
{
return v ? "enable" : "disable";
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 77f45e5d4413..66c06fcdfe19 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1005,6 +1005,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
asmlinkage long sys_uretprobe(void);
+asmlinkage long sys_uprobe(void);
+
/* pciconfig: alpha, arm, arm64, ia64, sparc */
asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn,
unsigned long off, unsigned long len,
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 33b7fda97d39..6575af39fd10 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -260,7 +260,7 @@ static inline bool topology_is_primary_thread(unsigned int cpu)
#endif
-static inline const struct cpumask *cpu_cpu_mask(int cpu)
+static inline const struct cpumask *cpu_node_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 915303a82d84..ee3d36eda45d 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -17,6 +17,7 @@
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/seqlock.h>
+#include <linux/mutex.h>
struct uprobe;
struct vm_area_struct;
@@ -185,8 +186,14 @@ struct xol_area;
struct uprobes_state {
struct xol_area *xol_area;
+#ifdef CONFIG_X86_64
+ struct hlist_head head_tramps;
+#endif
};
+typedef int (*uprobe_write_verify_t)(struct page *page, unsigned long vaddr,
+ uprobe_opcode_t *insn, int nbytes, void *data);
+
extern void __init uprobes_init(void);
extern int set_swbp(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
extern int set_orig_insn(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
@@ -194,7 +201,11 @@ extern bool is_swbp_insn(uprobe_opcode_t *insn);
extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
-extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t);
+extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t,
+ bool is_register);
+extern int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma, const unsigned long opcode_vaddr,
+ uprobe_opcode_t *insn, int nbytes, uprobe_write_verify_t verify, bool is_register, bool do_update_ref_ctr,
+ void *data);
extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
@@ -224,8 +235,13 @@ extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
extern void uprobe_handle_trampoline(struct pt_regs *regs);
-extern void *arch_uprobe_trampoline(unsigned long *psize);
+extern void *arch_uretprobe_trampoline(unsigned long *psize);
extern unsigned long uprobe_get_trampoline_vaddr(void);
+extern void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len);
+extern void arch_uprobe_clear_state(struct mm_struct *mm);
+extern void arch_uprobe_init_state(struct mm_struct *mm);
+extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
+extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 45d5dd470ff6..dabc351cc127 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -410,7 +410,7 @@ enum wq_flags {
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
/* BH wq only allows the following flags */
- __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI,
+ __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI | WQ_PERCPU,
};
enum wq_consts {
@@ -434,10 +434,10 @@ enum wq_consts {
* short queue flush time. Don't queue works which can run for too
* long.
*
- * system_highpri_wq is similar to system_wq but for work items which
+ * system_highpri_wq is similar to system_percpu_wq but for work items which
* require WQ_HIGHPRI.
*
- * system_long_wq is similar to system_wq but may host long running
+ * system_long_wq is similar to system_percpu_wq but may host long running
* works. Queue flushing might take relatively long.
*
* system_dfl_wq is unbound workqueue. Workers are not bound to
@@ -445,13 +445,13 @@ enum wq_consts {
* executed immediately as long as max_active limit is not reached and
* resources are available.
*
- * system_freezable_wq is equivalent to system_wq except that it's
+ * system_freezable_wq is equivalent to system_percpu_wq except that it's
* freezable.
*
* *_power_efficient_wq are inclined towards saving power and converted
* into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
* they are same as their non-power-efficient counterparts - e.g.
- * system_power_efficient_wq is identical to system_wq if
+ * system_power_efficient_wq is identical to system_percpu_wq if
* 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
*
* system_bh[_highpri]_wq are convenience interface to softirq. BH work items
@@ -502,7 +502,7 @@ void workqueue_softirq_dead(unsigned int cpu);
* min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
* that the sum of per-node max_active's may be larger than @max_active.
*
- * For detailed information on %WQ_* flags, please refer to
+ * For detailed information on %WQ_\* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
* RETURNS:
@@ -570,7 +570,7 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
- alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
+ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_PERCPU, 1, (name))
#define create_freezable_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
WQ_MEM_RECLAIM, 1, (name))
@@ -708,7 +708,7 @@ static inline bool mod_delayed_work(struct workqueue_struct *wq,
*/
static inline bool schedule_work_on(int cpu, struct work_struct *work)
{
- return queue_work_on(cpu, system_wq, work);
+ return queue_work_on(cpu, system_percpu_wq, work);
}
/**
@@ -727,7 +727,7 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
*/
static inline bool schedule_work(struct work_struct *work)
{
- return queue_work(system_wq, work);
+ return queue_work(system_percpu_wq, work);
}
/**
@@ -770,21 +770,21 @@ extern void __warn_flushing_systemwide_wq(void)
#define flush_scheduled_work() \
({ \
__warn_flushing_systemwide_wq(); \
- __flush_workqueue(system_wq); \
+ __flush_workqueue(system_percpu_wq); \
})
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
\
- if ((__builtin_constant_p(_wq == system_wq) && \
- _wq == system_wq) || \
+ if ((__builtin_constant_p(_wq == system_percpu_wq) && \
+ _wq == system_percpu_wq) || \
(__builtin_constant_p(_wq == system_highpri_wq) && \
_wq == system_highpri_wq) || \
(__builtin_constant_p(_wq == system_long_wq) && \
_wq == system_long_wq) || \
- (__builtin_constant_p(_wq == system_unbound_wq) && \
- _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_dfl_wq) && \
+ _wq == system_dfl_wq) || \
(__builtin_constant_p(_wq == system_freezable_wq) && \
_wq == system_freezable_wq) || \
(__builtin_constant_p(_wq == system_power_efficient_wq) && \
@@ -807,7 +807,7 @@ extern void __warn_flushing_systemwide_wq(void)
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
- return queue_delayed_work_on(cpu, system_wq, dwork, delay);
+ return queue_delayed_work_on(cpu, system_percpu_wq, dwork, delay);
}
/**
@@ -821,7 +821,7 @@ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
static inline bool schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
- return queue_delayed_work(system_wq, dwork, delay);
+ return queue_delayed_work(system_percpu_wq, dwork, delay);
}
#ifndef CONFIG_SMP
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 9a4ecc9f6dc5..14a1c1fe013a 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -148,6 +148,8 @@
#define AUDIT_IPE_POLICY_LOAD 1422 /* IPE policy load */
#define AUDIT_LANDLOCK_ACCESS 1423 /* Landlock denial */
#define AUDIT_LANDLOCK_DOMAIN 1424 /* Landlock domain status */
+#define AUDIT_MAC_TASK_CONTEXTS 1425 /* Multiple LSM task contexts */
+#define AUDIT_MAC_OBJ_CONTEXTS 1426 /* Multiple LSM objext contexts */
#define AUDIT_FIRST_KERN_ANOM_MSG 1700
#define AUDIT_LAST_KERN_ANOM_MSG 1799
diff --git a/include/uapi/linux/psp-sfs.h b/include/uapi/linux/psp-sfs.h
new file mode 100644
index 000000000000..94e51670383c
--- /dev/null
+++ b/include/uapi/linux/psp-sfs.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Userspace interface for AMD Seamless Firmware Servicing (SFS)
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#ifndef __PSP_SFS_USER_H__
+#define __PSP_SFS_USER_H__
+
+#include <linux/types.h>
+
+/**
+ * SFS: AMD Seamless Firmware Support (SFS) interface
+ */
+
+#define PAYLOAD_NAME_SIZE 64
+#define TEE_EXT_CMD_BUFFER_SIZE 4096
+
+/**
+ * struct sfs_user_get_fw_versions - get current level of base firmware (output).
+ * @blob: current level of base firmware for ASP and patch levels (input/output).
+ * @sfs_status: 32-bit SFS status value (output).
+ * @sfs_extended_status: 32-bit SFS extended status value (output).
+ */
+struct sfs_user_get_fw_versions {
+ __u8 blob[TEE_EXT_CMD_BUFFER_SIZE];
+ __u32 sfs_status;
+ __u32 sfs_extended_status;
+} __packed;
+
+/**
+ * struct sfs_user_update_package - update SFS package (input).
+ * @payload_name: name of SFS package to load, verify and execute (input).
+ * @sfs_status: 32-bit SFS status value (output).
+ * @sfs_extended_status: 32-bit SFS extended status value (output).
+ */
+struct sfs_user_update_package {
+ char payload_name[PAYLOAD_NAME_SIZE];
+ __u32 sfs_status;
+ __u32 sfs_extended_status;
+} __packed;
+
+/**
+ * Seamless Firmware Support (SFS) IOC
+ *
+ * possible return codes for all SFS IOCTLs:
+ * 0: success
+ * -EINVAL: invalid input
+ * -E2BIG: excess data passed
+ * -EFAULT: failed to copy to/from userspace
+ * -EBUSY: mailbox in recovery or in use
+ * -ENODEV: driver not bound with PSP device
+ * -EACCES: request isn't authorized
+ * -EINVAL: invalid parameter
+ * -ETIMEDOUT: request timed out
+ * -EAGAIN: invalid request for state machine
+ * -ENOENT: not implemented
+ * -ENFILE: overflow
+ * -EPERM: invalid signature
+ * -EIO: PSP I/O error
+ */
+#define SFS_IOC_TYPE 'S'
+
+/**
+ * SFSIOCFWVERS - returns blob containing FW versions
+ * ASP provides the current level of Base Firmware for the ASP
+ * and the other microprocessors as well as current patch
+ * level(s).
+ */
+#define SFSIOCFWVERS _IOWR(SFS_IOC_TYPE, 0x1, struct sfs_user_get_fw_versions)
+
+/**
+ * SFSIOCUPDATEPKG - updates package/payload
+ * ASP loads, verifies and executes the SFS package.
+ * By default, the SFS package/payload is loaded from
+ * /lib/firmware/amd, but alternative firmware loading
+ * path can be specified using kernel parameter
+ * firmware_class.path or the firmware loading path
+ * can be customized using sysfs file:
+ * /sys/module/firmware_class/parameters/path.
+ */
+#define SFSIOCUPDATEPKG _IOWR(SFS_IOC_TYPE, 0x2, struct sfs_user_update_package)
+
+#endif /* __PSP_SFS_USER_H__ */
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 02533038640e..23c39b96190f 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -31,7 +31,7 @@ struct arch_vdso_time_data {};
#if defined(CONFIG_ARCH_HAS_VDSO_ARCH_DATA)
#include <asm/vdso/arch_data.h>
-#elif defined(CONFIG_GENERIC_VDSO_DATA_STORE)
+#else
struct vdso_arch_data {
/* Needed for the generic code, never actually used at runtime */
char __unused;
@@ -164,7 +164,6 @@ struct vdso_rng_data {
* With the hidden visibility, the compiler simply generates a PC-relative
* relocation, and this is what we need.
*/
-#ifdef CONFIG_GENERIC_VDSO_DATA_STORE
extern struct vdso_time_data vdso_u_time_data __attribute__((visibility("hidden")));
extern struct vdso_rng_data vdso_u_rng_data __attribute__((visibility("hidden")));
extern struct vdso_arch_data vdso_u_arch_data __attribute__((visibility("hidden")));
@@ -185,8 +184,6 @@ enum vdso_pages {
VDSO_NR_PAGES
};
-#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */
-
/*
* The generic vDSO implementation requires that gettimeofday.h
* provides:
@@ -196,11 +193,7 @@ enum vdso_pages {
* - clock_gettime_fallback(): fallback for clock_gettime.
* - clock_getres_fallback(): fallback for clock_getres.
*/
-#ifdef ENABLE_COMPAT_VDSO
-#include <asm/vdso/compat_gettimeofday.h>
-#else
#include <asm/vdso/gettimeofday.h>
-#endif /* ENABLE_COMPAT_VDSO */
#else /* !__ASSEMBLY__ */
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
index c50d152e7b3e..9ac161866653 100644
--- a/include/vdso/gettime.h
+++ b/include/vdso/gettime.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
struct __kernel_timespec;
+struct __kernel_old_timeval;
struct timezone;
#if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
diff --git a/include/vdso/jiffies.h b/include/vdso/jiffies.h
index 2f9d596c8b29..8ca04a141412 100644
--- a/include/vdso/jiffies.h
+++ b/include/vdso/jiffies.h
@@ -5,7 +5,7 @@
#include <asm/param.h> /* for HZ */
#include <vdso/time64.h>
-/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
+/* TICK_NSEC is the time between ticks in nsec */
#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
#endif /* __VDSO_JIFFIES_H */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index e279be353e3f..69ac6d80a006 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -164,7 +164,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
{
if (flags & GNTMAP_contains_pte)
map->host_addr = addr;
- else if (xen_feature(XENFEAT_auto_translated_physmap))
+ else if (!xen_pv_domain())
map->host_addr = __pa(addr);
else
map->host_addr = addr;
@@ -181,7 +181,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
{
if (flags & GNTMAP_contains_pte)
unmap->host_addr = addr;
- else if (xen_feature(XENFEAT_auto_translated_physmap))
+ else if (!xen_pv_domain())
unmap->host_addr = __pa(addr);
else
unmap->host_addr = addr;
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
index a2ab516fcd2c..3cbe3df0dfd4 100644
--- a/include/xen/mem-reservation.h
+++ b/include/xen/mem-reservation.h
@@ -39,7 +39,7 @@ static inline void xenmem_reservation_va_mapping_update(unsigned long count,
xen_pfn_t *frames)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
- if (!xen_feature(XENFEAT_auto_translated_physmap))
+ if (xen_pv_domain())
__xenmem_reservation_va_mapping_update(count, pages, frames);
#endif
}
@@ -48,7 +48,7 @@ static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
struct page **pages)
{
#ifdef CONFIG_XEN_HAVE_PVMMU
- if (!xen_feature(XENFEAT_auto_translated_physmap))
+ if (xen_pv_domain())
__xenmem_reservation_va_mapping_reset(count, pages);
#endif
}
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 9e2a769b0d96..496e6013c689 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -6,6 +6,7 @@
#include <linux/notifier.h>
#include <linux/efi.h>
#include <linux/virtio_anchor.h>
+#include <xen/xen.h>
#include <xen/features.h>
#include <asm/xen/interface.h>
#include <xen/interface/vcpu.h>
@@ -116,7 +117,7 @@ static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned int domid,
struct page **pages)
{
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ if (!xen_pv_domain())
return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
prot, domid, pages);
@@ -150,7 +151,7 @@ static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
int nr, int *err_ptr,
pgprot_t prot, unsigned int domid)
{
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ if (!xen_pv_domain())
return -EOPNOTSUPP;
return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
@@ -175,7 +176,7 @@ static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
pgprot_t prot, unsigned int domid,
struct page **pages)
{
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ if (!xen_pv_domain())
return -EOPNOTSUPP;
return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
diff --git a/include/xen/xen.h b/include/xen/xen.h
index a1e5b3f18d69..61854e3f2837 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -22,8 +22,15 @@ extern bool xen_pvh;
#define xen_pvh 0
#endif
+#ifdef CONFIG_X86
+#include <asm/cpufeature.h>
+
+#define xen_pv_domain() (cpu_feature_enabled(X86_FEATURE_XENPV))
+#else
+#define xen_pv_domain() 0
+#endif
+
#define xen_domain() (xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_pvh_domain() (xen_pvh)