summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/audit.h25
-rw-r--r--include/linux/bpf.h73
-rw-r--r--include/linux/bpf_verifier.h65
-rw-r--r--include/linux/btf.h2
-rw-r--r--include/linux/cc_platform.h8
-rw-r--r--include/linux/cdx/bitfield.h90
-rw-r--r--include/linux/cdx/edac_cdx_pcol.h28
-rw-r--r--include/linux/cdx/mcdi.h199
-rw-r--r--include/linux/cgroup-defs.h43
-rw-r--r--include/linux/cgroup.h6
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/hrtimer.h14
-rw-r--r--include/linux/hrtimer_defs.h2
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/lsm_hooks.h3
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/platform_data/dmtimer-omap.h4
-rw-r--r--include/linux/preempt.h11
-rw-r--r--include/linux/psp-platform-access.h2
-rw-r--r--include/linux/rcupdate.h16
-rw-r--r--include/linux/resctrl.h148
-rw-r--r--include/linux/resctrl_types.h18
-rw-r--r--include/linux/rseq.h11
-rw-r--r--include/linux/sched.h118
-rw-r--r--include/linux/sched/ext.h6
-rw-r--r--include/linux/sched/signal.h4
-rw-r--r--include/linux/sched/topology.h29
-rw-r--r--include/linux/security.h6
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/tnum.h6
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/uprobes.h20
-rw-r--r--include/linux/verification.h1
-rw-r--r--include/linux/workqueue.h32
36 files changed, 894 insertions, 131 deletions
diff --git a/include/linux/audit.h b/include/linux/audit.h
index a394614ccd0b..536f8ee8da81 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -37,6 +37,8 @@ struct audit_watch;
struct audit_tree;
struct sk_buff;
struct kern_ipc_perm;
+struct lsm_id;
+struct lsm_prop;
struct audit_krule {
u32 pflags;
@@ -147,6 +149,10 @@ extern unsigned compat_signal_class[];
#define AUDIT_TTY_ENABLE BIT(0)
#define AUDIT_TTY_LOG_PASSWD BIT(1)
+/* bit values for audit_cfg_lsm */
+#define AUDIT_CFG_LSM_SECCTX_SUBJECT BIT(0)
+#define AUDIT_CFG_LSM_SECCTX_OBJECT BIT(1)
+
struct filename;
#define AUDIT_OFF 0
@@ -185,6 +191,8 @@ extern void audit_log_path_denied(int type,
const char *operation);
extern void audit_log_lost(const char *message);
+extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
+extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab);
@@ -210,6 +218,8 @@ extern u32 audit_enabled;
extern int audit_signal_info(int sig, struct task_struct *t);
+extern void audit_cfg_lsm(const struct lsm_id *lsmid, int flags);
+
#else /* CONFIG_AUDIT */
static inline __printf(4, 5)
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
@@ -245,6 +255,16 @@ static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
static inline void audit_log_path_denied(int type, const char *operation)
{ }
+static inline int audit_log_subj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
+static inline int audit_log_obj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
@@ -269,6 +289,9 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
return 0;
}
+static inline void audit_cfg_lsm(const struct lsm_id *lsmid, int flags)
+{ }
+
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
@@ -527,7 +550,7 @@ static inline void audit_log_kern_module(const char *name)
static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{
- if (!audit_dummy_context())
+ if (audit_enabled)
__audit_fanotify(response, friar);
}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index cc700925b802..a98c83346134 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -7,6 +7,7 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/filter.h>
+#include <crypto/sha2.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
@@ -109,6 +110,7 @@ struct bpf_map_ops {
long (*map_pop_elem)(struct bpf_map *map, void *value);
long (*map_peek_elem)(struct bpf_map *map, void *value);
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
+ int (*map_get_hash)(struct bpf_map *map, u32 hash_buf_size, void *hash_buf);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -206,6 +208,7 @@ enum btf_field_type {
BPF_WORKQUEUE = (1 << 10),
BPF_UPTR = (1 << 11),
BPF_RES_SPIN_LOCK = (1 << 12),
+ BPF_TASK_WORK = (1 << 13),
};
enum bpf_cgroup_storage_type {
@@ -259,6 +262,7 @@ struct btf_record {
int timer_off;
int wq_off;
int refcount_off;
+ int task_work_off;
struct btf_field fields[];
};
@@ -285,9 +289,11 @@ struct bpf_map_owner {
bool xdp_has_frags;
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
const struct btf_type *attach_func_proto;
+ enum bpf_attach_type expected_attach_type;
};
struct bpf_map {
+ u8 sha[SHA256_DIGEST_SIZE];
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
@@ -328,6 +334,7 @@ struct bpf_map {
atomic64_t sleepable_refcnt;
s64 __percpu *elem_count;
u64 cookie; /* write-once */
+ char *excl_prog_sha;
};
static inline const char *btf_field_type_name(enum btf_field_type type)
@@ -358,6 +365,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "bpf_rb_node";
case BPF_REFCOUNT:
return "bpf_refcount";
+ case BPF_TASK_WORK:
+ return "bpf_task_work";
default:
WARN_ON_ONCE(1);
return "unknown";
@@ -396,6 +405,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_rb_node);
case BPF_REFCOUNT:
return sizeof(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return sizeof(struct bpf_task_work);
default:
WARN_ON_ONCE(1);
return 0;
@@ -428,6 +439,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_rb_node);
case BPF_REFCOUNT:
return __alignof__(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return __alignof__(struct bpf_task_work);
default:
WARN_ON_ONCE(1);
return 0;
@@ -459,6 +472,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
case BPF_UPTR:
+ case BPF_TASK_WORK:
break;
default:
WARN_ON_ONCE(1);
@@ -595,6 +609,7 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
void bpf_wq_cancel_and_free(void *timer);
+void bpf_task_work_cancel_and_free(void *timer);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
@@ -767,12 +782,15 @@ enum bpf_type_flag {
*/
MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS),
+ /* DYNPTR points to skb_metadata_end()-skb_metadata_len() */
+ DYNPTR_TYPE_SKB_META = BIT(19 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
- | DYNPTR_TYPE_XDP)
+ | DYNPTR_TYPE_XDP | DYNPTR_TYPE_SKB_META)
/* Max number of base types. */
#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
@@ -1110,7 +1128,7 @@ struct bpf_prog_offload {
*/
#define MAX_BPF_FUNC_REG_ARGS 5
-/* The argument is a structure. */
+/* The argument is a structure or a union. */
#define BTF_FMODEL_STRUCT_ARG BIT(0)
/* The argument is signed. */
@@ -1358,6 +1376,8 @@ enum bpf_dynptr_type {
BPF_DYNPTR_TYPE_SKB,
/* Underlying data is a xdp_buff */
BPF_DYNPTR_TYPE_XDP,
+ /* Points to skb_metadata_end()-skb_metadata_len() */
+ BPF_DYNPTR_TYPE_SKB_META,
};
int bpf_dynptr_check_size(u32 size);
@@ -1619,6 +1639,7 @@ struct bpf_prog_aux {
bool priv_stack_requested;
bool changes_pkt_data;
bool might_sleep;
+ bool kprobe_write_ctx;
u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
struct bpf_arena *arena;
@@ -1628,6 +1649,7 @@ struct bpf_prog_aux {
/* function name for valid attach_btf_id */
const char *attach_func_name;
struct bpf_prog **func;
+ struct bpf_prog_aux *main_prog_aux;
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
struct bpf_kfunc_desc_tab *kfunc_tab;
@@ -1711,7 +1733,10 @@ struct bpf_prog {
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
+ union {
+ u8 digest[SHA256_DIGEST_SIZE];
+ u8 tag[BPF_TAG_SIZE];
+ };
struct bpf_prog_stats __percpu *stats;
int __percpu *active;
unsigned int (*bpf_func)(const void *ctx,
@@ -1985,6 +2010,7 @@ static inline void bpf_module_put(const void *data, struct module *owner)
module_put(owner);
}
int bpf_struct_ops_link_create(union bpf_attr *attr);
+u32 bpf_struct_ops_id(const void *kdata);
#ifdef CONFIG_NET
/* Define it here to avoid the use of forward declaration */
@@ -2411,6 +2437,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
+void bpf_obj_free_task_work(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
@@ -2697,7 +2724,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);
-int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
@@ -2874,6 +2901,7 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+void bpf_prog_report_arena_violation(bool write, unsigned long addr, unsigned long fault_ip);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -3161,6 +3189,11 @@ static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
{
}
+
+static inline void bpf_prog_report_arena_violation(bool write, unsigned long addr,
+ unsigned long fault_ip)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
static __always_inline int
@@ -3403,6 +3436,38 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+#if defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL)
+
+struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags);
+struct bpf_key *bpf_lookup_system_key(u64 id);
+void bpf_key_put(struct bpf_key *bkey);
+int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring);
+
+#else
+static inline struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
+{
+ return NULL;
+}
+
+static inline struct bpf_key *bpf_lookup_system_key(u64 id)
+{
+ return NULL;
+}
+
+static inline void bpf_key_put(struct bpf_key *bkey)
+{
+}
+
+static inline int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL) */
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 94defa405c85..4c497e839526 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -26,28 +26,6 @@
/* Patch buffer size */
#define INSN_BUF_SIZE 32
-/* Liveness marks, used for registers and spilled-regs (in stack slots).
- * Read marks propagate upwards until they find a write mark; they record that
- * "one of this state's descendants read this reg" (and therefore the reg is
- * relevant for states_equal() checks).
- * Write marks collect downwards and do not propagate; they record that "the
- * straight-line code that reached this state (from its parent) wrote this reg"
- * (and therefore that reads propagated from this state or its descendants
- * should not propagate to its parent).
- * A state with a write mark can receive read marks; it just won't propagate
- * them to its parent, since the write mark is a property, not of the state,
- * but of the link between it and its parent. See mark_reg_read() and
- * mark_stack_slot_read() in kernel/bpf/verifier.c.
- */
-enum bpf_reg_liveness {
- REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
- REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
- REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
- REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
- REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
- REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
-};
-
#define ITER_PREFIX "bpf_iter_"
enum bpf_iter_state {
@@ -212,8 +190,6 @@ struct bpf_reg_state {
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
- /* parentage chain for liveness checking */
- struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
@@ -226,7 +202,6 @@ struct bpf_reg_state {
* patching which only happens after main verification finished.
*/
s32 subreg_def;
- enum bpf_reg_liveness live;
/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
bool precise;
};
@@ -445,6 +420,7 @@ struct bpf_verifier_state {
bool speculative;
bool in_sleepable;
+ bool cleaned;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
@@ -665,6 +641,7 @@ struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
+ u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
u16 stack_depth; /* max. stack depth used by this function */
u16 stack_extra;
/* offsets in range [stack_depth .. fastcall_stack_off)
@@ -744,6 +721,8 @@ struct bpf_scc_info {
struct bpf_scc_visit visits[];
};
+struct bpf_liveness;
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
@@ -794,7 +773,10 @@ struct bpf_verifier_env {
struct {
int *insn_state;
int *insn_stack;
- /* vector of instruction indexes sorted in post-order */
+ /*
+ * vector of instruction indexes sorted in post-order, grouped by subprogram,
+ * see bpf_subprog_info->postorder_start.
+ */
int *insn_postorder;
int cur_stack;
/* current position in the insn_postorder vector */
@@ -842,6 +824,7 @@ struct bpf_verifier_env {
struct bpf_insn insn_buf[INSN_BUF_SIZE];
struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
struct bpf_scc_callchain callchain_buf;
+ struct bpf_liveness *liveness;
/* array of pointers to bpf_scc_info indexed by SCC id */
struct bpf_scc_info **scc_info;
u32 scc_cnt;
@@ -875,13 +858,15 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
#define verifier_bug_if(cond, env, fmt, args...) \
({ \
bool __cond = (cond); \
- if (unlikely(__cond)) { \
- BPF_WARN_ONCE(1, "verifier bug: " fmt "(" #cond ")\n", ##args); \
- bpf_log(&env->log, "verifier bug: " fmt "(" #cond ")\n", ##args); \
- } \
+ if (unlikely(__cond)) \
+ verifier_bug(env, fmt " (" #cond ")", ##args); \
(__cond); \
})
-#define verifier_bug(env, fmt, args...) verifier_bug_if(1, env, fmt, ##args)
+#define verifier_bug(env, fmt, args...) \
+ ({ \
+ BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
+ bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
+ })
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
@@ -962,6 +947,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
case BPF_PROG_TYPE_STRUCT_OPS:
return prog->aux->jits_use_priv_stack;
case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_SYSCALL:
return false;
default:
return true;
@@ -1062,4 +1048,21 @@ void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifie
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno);
+struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
+int bpf_jmp_offset(struct bpf_insn *insn);
+int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2]);
+void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
+bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
+
+int bpf_stack_liveness_init(struct bpf_verifier_env *env);
+void bpf_stack_liveness_free(struct bpf_verifier_env *env);
+int bpf_update_live_stack(struct bpf_verifier_env *env);
+int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
+void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
+int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
+int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
+int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
+bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
+void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 9eda6b113f9b..f06976ffb63f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -86,7 +86,7 @@
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
-#define __bpf_kfunc __used __retain noinline
+#define __bpf_kfunc __used __retain __noclone noinline
#define __bpf_kfunc_start_defs() \
__diag_push(); \
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index 0bf7d33a1048..7fcec025c5e0 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -96,6 +96,14 @@ enum cc_attr {
* enabled to run SEV-SNP guests.
*/
CC_ATTR_HOST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_SNP_SECURE_AVIC: Secure AVIC mode is active.
+ *
+ * The host kernel is running with the necessary features enabled
+ * to run SEV-SNP guests with full Secure AVIC capabilities.
+ */
+ CC_ATTR_SNP_SECURE_AVIC,
};
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
diff --git a/include/linux/cdx/bitfield.h b/include/linux/cdx/bitfield.h
new file mode 100644
index 000000000000..567f8ec47582
--- /dev/null
+++ b/include/linux/cdx/bitfield.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_BITFIELD_H
+#define CDX_BITFIELD_H
+
+#include <linux/bitfield.h>
+
+/* Lowest bit numbers and widths */
+#define CDX_DWORD_LBN 0
+#define CDX_DWORD_WIDTH 32
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define CDX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+struct cdx_dword {
+ __le32 cdx_u32;
+};
+
+/* Value expanders for printk */
+#define CDX_DWORD_VAL(dword) \
+ ((unsigned int)le32_to_cpu((dword).cdx_u32))
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define CDX_DWORD_FIELD(dword, field) \
+ (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
+ le32_to_cpu((dword).cdx_u32)))
+
+/*
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELD(field, value) \
+ (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
+ CDX_LOW_BIT(field)), value))
+
+/*
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELDS(field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7) \
+ (CDX_INSERT_FIELD(field1, (value1)) | \
+ CDX_INSERT_FIELD(field2, (value2)) | \
+ CDX_INSERT_FIELD(field3, (value3)) | \
+ CDX_INSERT_FIELD(field4, (value4)) | \
+ CDX_INSERT_FIELD(field5, (value5)) | \
+ CDX_INSERT_FIELD(field6, (value6)) | \
+ CDX_INSERT_FIELD(field7, (value7)))
+
+#define CDX_POPULATE_DWORD(dword, ...) \
+ (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
+
+/* Populate a dword field with various numbers of arguments */
+#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
+#define CDX_POPULATE_DWORD_6(dword, ...) \
+ CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_5(dword, ...) \
+ CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_4(dword, ...) \
+ CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_3(dword, ...) \
+ CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_2(dword, ...) \
+ CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_1(dword, ...) \
+ CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_SET_DWORD(dword) \
+ CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
+
+#endif /* CDX_BITFIELD_H */
diff --git a/include/linux/cdx/edac_cdx_pcol.h b/include/linux/cdx/edac_cdx_pcol.h
new file mode 100644
index 000000000000..749db33bb482
--- /dev/null
+++ b/include/linux/cdx/edac_cdx_pcol.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Driver for AMD network controllers and boards
+ *
+ * Copyright (C) 2021, Xilinx, Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef MC_CDX_PCOL_H
+#define MC_CDX_PCOL_H
+#include <linux/cdx/mcdi.h>
+
+#define MC_CMD_EDAC_GET_DDR_CONFIG_OUT_WORD_LENGTH_LEN 4
+/* Number of registers for the DDR controller */
+#define MC_CMD_GET_DDR_CONFIG_OFST 4
+#define MC_CMD_GET_DDR_CONFIG_LEN 4
+
+/***********************************/
+/* MC_CMD_EDAC_GET_DDR_CONFIG
+ * Provides detailed configuration for the DDR controller of the given index.
+ */
+#define MC_CMD_EDAC_GET_DDR_CONFIG 0x3
+
+/* MC_CMD_EDAC_GET_DDR_CONFIG_IN msgrequest */
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_OFST 0
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_LEN 4
+
+#endif /* MC_CDX_PCOL_H */
diff --git a/include/linux/cdx/mcdi.h b/include/linux/cdx/mcdi.h
new file mode 100644
index 000000000000..74075305cba4
--- /dev/null
+++ b/include/linux/cdx/mcdi.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_H
+#define CDX_MCDI_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "linux/cdx/bitfield.h"
+
+/**
+ * enum cdx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum cdx_mcdi_mode {
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
+#define MCDI_RPC_POST_RST_TIME (10 * HZ)
+
+/**
+ * enum cdx_mcdi_cmd_state - State for an individual MCDI command
+ * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
+ * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
+ * as MC have too many outstanding commands. Command will be retried once
+ * another command returns.
+ * @MCDI_STATE_RUNNING: Command was accepted and is running.
+ * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
+ * the command.
+ * @MCDI_STATE_FINISHED: Processing of this command has completed.
+ */
+
+enum cdx_mcdi_cmd_state {
+ MCDI_STATE_QUEUED,
+ MCDI_STATE_RETRY,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_CANCELLED,
+ MCDI_STATE_FINISHED,
+};
+
+/**
+ * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
+ * with CDX controller.
+ * @mcdi: MCDI interface
+ * @mcdi_ops: MCDI operations
+ * @r5_rproc : R5 Remoteproc device handle
+ * @rpdev: RPMsg device
+ * @ept: RPMsg endpoint
+ * @work: Post probe work
+ */
+struct cdx_mcdi {
+ /* MCDI interface */
+ struct cdx_mcdi_data *mcdi;
+ const struct cdx_mcdi_ops *mcdi_ops;
+
+ struct rproc *r5_rproc;
+ struct rpmsg_device *rpdev;
+ struct rpmsg_endpoint *ept;
+ struct work_struct work;
+};
+
+struct cdx_mcdi_ops {
+ void (*mcdi_request)(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+ unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
+};
+
+typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
+ unsigned long cookie, int rc,
+ struct cdx_dword *outbuf,
+ size_t outlen_actual);
+
+/**
+ * struct cdx_mcdi_cmd - An outstanding MCDI command
+ * @ref: Reference count. There will be one reference if the command is
+ * in the mcdi_iface cmd_list, another if it's on a cleanup list,
+ * and a third if it's queued in the work queue.
+ * @list: The data for this entry in mcdi->cmd_list
+ * @cleanup_list: The data for this entry in a cleanup list
+ * @work: The work item for this command, queued in mcdi->workqueue
+ * @mcdi: The mcdi_iface for this command
+ * @state: The state of this command
+ * @inlen: inbuf length
+ * @inbuf: Input buffer
+ * @quiet: Whether to silence errors
+ * @reboot_seen: Whether a reboot has been seen during this command,
+ * to prevent duplicates
+ * @seq: Sequence number
+ * @started: Jiffies this command was started at
+ * @cookie: Context for completion function
+ * @completer: Completion function
+ * @handle: Command handle
+ * @cmd: Command number
+ * @rc: Return code
+ * @outlen: Length of output buffer
+ * @outbuf: Output buffer
+ */
+struct cdx_mcdi_cmd {
+ struct kref ref;
+ struct list_head list;
+ struct list_head cleanup_list;
+ struct work_struct work;
+ struct cdx_mcdi_iface *mcdi;
+ enum cdx_mcdi_cmd_state state;
+ size_t inlen;
+ const struct cdx_dword *inbuf;
+ bool quiet;
+ bool reboot_seen;
+ u8 seq;
+ unsigned long started;
+ unsigned long cookie;
+ cdx_mcdi_async_completer *completer;
+ unsigned int handle;
+ unsigned int cmd;
+ int rc;
+ size_t outlen;
+ struct cdx_dword *outbuf;
+ /* followed by inbuf data if necessary */
+};
+
+/**
+ * struct cdx_mcdi_iface - MCDI protocol context
+ * @cdx: The associated NIC
+ * @iface_lock: Serialise access to this structure
+ * @outstanding_cleanups: Count of cleanups
+ * @cmd_list: List of outstanding and running commands
+ * @workqueue: Workqueue used for delayed processing
+ * @cmd_complete_wq: Waitqueue for command completion
+ * @db_held_by: Command the MC doorbell is in use by
+ * @seq_held_by: Command each sequence number is in use by
+ * @prev_handle: The last used command handle
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event
+ * @prev_seq: The last used sequence number
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ */
+struct cdx_mcdi_iface {
+ struct cdx_mcdi *cdx;
+ /* Serialise access */
+ struct mutex iface_lock;
+ unsigned int outstanding_cleanups;
+ struct list_head cmd_list;
+ struct workqueue_struct *workqueue;
+ wait_queue_head_t cmd_complete_wq;
+ struct cdx_mcdi_cmd *db_held_by;
+ struct cdx_mcdi_cmd *seq_held_by[16];
+ unsigned int prev_handle;
+ enum cdx_mcdi_mode mode;
+ u8 prev_seq;
+ bool new_epoch;
+};
+
+/**
+ * struct cdx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct cdx_mcdi_data {
+ struct cdx_mcdi_iface iface;
+ u32 fn_flags;
+};
+
+void cdx_mcdi_finish(struct cdx_mcdi *cdx);
+int cdx_mcdi_init(struct cdx_mcdi *cdx);
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
+ (_ofst))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
+#define MCDI_DWORD(_buf, _field) \
+ CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
+#endif /* CDX_MCDI_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 6b93a64115fe..93318fce31f3 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -91,6 +91,12 @@ enum {
* cgroup_threadgroup_rwsem. This makes hot path operations such as
* forks and exits into the slow path and more expensive.
*
+ * Alleviate the contention between fork, exec, exit operations and
+ * writing to cgroup.procs by taking a per threadgroup rwsem instead of
+ * the global cgroup_threadgroup_rwsem. Fork and other operations
+ * from threads in different thread groups no longer contend with
+ * writing to cgroup.procs.
+ *
* The static usage pattern of creating a cgroup, enabling controllers,
* and then seeding it with CLONE_INTO_CGROUP doesn't require write
* locking cgroup_threadgroup_rwsem and thus doesn't benefit from
@@ -140,6 +146,17 @@ enum {
__CFTYPE_ADDED = (1 << 18),
};
+enum cgroup_attach_lock_mode {
+ /* Default */
+ CGRP_ATTACH_LOCK_GLOBAL,
+
+ /* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */
+ CGRP_ATTACH_LOCK_NONE,
+
+ /* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */
+ CGRP_ATTACH_LOCK_PER_THREADGROUP,
+};
+
/*
* cgroup_file is the handle for a file instance created in a cgroup which
* is used, for example, to generate file changed notifications. This can
@@ -433,6 +450,23 @@ struct cgroup_freezer_state {
* frozen, SIGSTOPped, and PTRACEd.
*/
int nr_frozen_tasks;
+
+ /* Freeze time data consistency protection */
+ seqcount_t freeze_seq;
+
+ /*
+ * Most recent time the cgroup was requested to freeze.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 freeze_start_nsec;
+
+ /*
+ * Total duration the cgroup has spent freezing.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 frozen_nsec;
};
struct cgroup {
@@ -746,7 +780,6 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
- void (*post_attach)(void);
int (*can_fork)(struct task_struct *task,
struct css_set *cset);
void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
@@ -822,6 +855,7 @@ struct cgroup_subsys {
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+extern bool cgroup_enable_per_threadgroup_rwsem;
struct cgroup_of_peak {
unsigned long value;
@@ -833,11 +867,14 @@ struct cgroup_of_peak {
* @tsk: target task
*
* Allows cgroup operations to synchronize against threadgroup changes
- * using a percpu_rw_semaphore.
+ * using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when
+ * favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
+ if (cgroup_enable_per_threadgroup_rwsem)
+ down_read(&tsk->signal->cgroup_threadgroup_rwsem);
}
/**
@@ -848,6 +885,8 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
+ if (cgroup_enable_per_threadgroup_rwsem)
+ up_read(&tsk->signal->cgroup_threadgroup_rwsem);
percpu_up_read(&cgroup_threadgroup_rwsem);
}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index bab98357960d..6ed477338b16 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -355,6 +355,11 @@ static inline bool css_is_dying(struct cgroup_subsys_state *css)
return css->flags & CSS_DYING;
}
+static inline bool css_is_online(struct cgroup_subsys_state *css)
+{
+ return css->flags & CSS_ONLINE;
+}
+
static inline bool css_is_self(struct cgroup_subsys_state *css)
{
if (css == &css->cgroup->self) {
@@ -651,6 +656,7 @@ static inline void cgroup_kthread_ready(void)
}
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *__cgroup_get_from_id(u64 id);
struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1e7fd3ee759e..f5c859b8131a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -78,6 +78,9 @@ struct ctl_table_header;
/* unused opcode to mark special atomic instruction */
#define BPF_PROBE_ATOMIC 0xe0
+/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
+#define BPF_PROBE_MEM32SX 0xc0
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
@@ -997,12 +1000,6 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
-static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
-{
- return round_up(bpf_prog_insn_size(prog) +
- sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
-}
-
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),
@@ -1296,7 +1293,7 @@ void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ pr_err("flen=%u proglen=%u pass=%u image=%p from=%s pid=%d\n", flen,
proglen, pass, image, current->comm, task_pid_nr(current));
if (image)
@@ -1784,6 +1781,7 @@ int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
void *buf, unsigned long len, bool flush);
+void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset);
#else /* CONFIG_NET */
static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
void *to, u32 len)
@@ -1818,6 +1816,11 @@ static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, voi
unsigned long len, bool flush)
{
}
+
+static inline void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_NET */
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 1ef867bb8c44..2cf1bf65b225 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -154,14 +154,11 @@ static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
return ktime_to_ns(timer->node.expires);
}
-static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
-{
- return ktime_sub(timer->node.expires, timer->base->get_time());
-}
+ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
- return timer->base->get_time();
+ return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
}
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
@@ -200,8 +197,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
static inline ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
- return __hrtimer_expires_remaining_adjusted(timer,
- timer->base->get_time());
+ return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer));
}
#ifdef CONFIG_TIMERFD
@@ -363,7 +359,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
- return hrtimer_forward(timer, timer->base->get_time(), interval);
+ return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval);
}
/* Precise sleep: */
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index 84a5045f80f3..aa49ffa130e5 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -41,7 +41,6 @@
* @seq: seqcount around __run_hrtimer
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
- * @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
@@ -51,7 +50,6 @@ struct hrtimer_clock_base {
seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
- ktime_t (*get_time)(void);
ktime_t offset;
} __hrtimer_clock_base_align;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1d6b606a81ef..c67e76fbcc07 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -669,6 +669,8 @@ extern int irq_chip_set_parent_state(struct irq_data *data,
extern int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
+extern void irq_chip_shutdown_parent(struct irq_data *data);
+extern unsigned int irq_chip_startup_parent(struct irq_data *data);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
@@ -976,10 +978,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY
-void irq_init_desc(unsigned int irq);
-#endif
-
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 91b20788273d..0d1927da8055 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -61,7 +61,7 @@
extern void register_refined_jiffies(long clock_tick_rate);
-/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
+/* TICK_USEC is the time between ticks in usec */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 090d1d3e19fe..79ec5a2bdcca 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -116,6 +116,9 @@ struct lsm_blob_sizes {
int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
int lbs_tun_dev;
int lbs_bdev;
+ int lbs_bpf_map;
+ int lbs_bpf_prog;
+ int lbs_bpf_token;
};
/*
diff --git a/include/linux/msi.h b/include/linux/msi.h
index faac634ac230..d415dd15a0a9 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -564,6 +564,8 @@ enum {
MSI_FLAG_PARENT_PM_DEV = (1 << 8),
/* Support for parent mask/unmask */
MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
+ /* Support for parent startup/shutdown */
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT = (1 << 10),
/* Mask for the generic functionality */
MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ec9d96025683..fd1d91017b99 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -859,7 +859,7 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
- atomic_t mmap_count;
+ refcount_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
@@ -1719,7 +1719,7 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
+get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index 95d852aef130..726d89143842 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -36,9 +36,13 @@ struct omap_dm_timer_ops {
int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
int toggle, int trigger, int autoreload);
int (*get_pwm_status)(struct omap_dm_timer *timer);
+ int (*set_cap)(struct omap_dm_timer *timer,
+ int autoreload, bool config_period);
+ int (*get_cap_status)(struct omap_dm_timer *timer);
int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
unsigned int (*read_counter)(struct omap_dm_timer *timer);
+ unsigned int (*read_cap)(struct omap_dm_timer *timer, bool is_period);
int (*write_counter)(struct omap_dm_timer *timer,
unsigned int value);
unsigned int (*read_status)(struct omap_dm_timer *timer);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 1fad1c8a4c76..102202185d7a 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -372,7 +372,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
/*
* Migrate-Disable and why it is undesired.
*
- * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * When a preempted task becomes eligible to run under the ideal model (IOW it
* becomes one of the M highest priority tasks), it might still have to wait
* for the preemptee's migrate_disable() section to complete. Thereby suffering
* a reduction in bandwidth in the exact duration of the migrate_disable()
@@ -387,7 +387,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* - a lower priority tasks; which under preempt_disable() could've instantly
* migrated away when another CPU becomes available, is now constrained
* by the ability to push the higher priority task away, which might itself be
- * in a migrate_disable() section, reducing it's available bandwidth.
+ * in a migrate_disable() section, reducing its available bandwidth.
*
* IOW it trades latency / moves the interference term, but it stays in the
* system, and as long as it remains unbounded, the system is not fully
@@ -399,7 +399,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
* number of primitives into becoming preemptible, they would also allow
* migration. This turns out to break a bunch of per-cpu usage. To this end,
- * all these primitives employ migirate_disable() to restore this implicit
+ * all these primitives employ migrate_disable() to restore this implicit
* assumption.
*
* This is a 'temporary' work-around at best. The correct solution is getting
@@ -407,7 +407,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* per-cpu locking or short preempt-disable regions.
*
* The end goal must be to get rid of migrate_disable(), alternatively we need
- * a schedulability theory that does not depend on abritrary migration.
+ * a schedulability theory that does not depend on arbitrary migration.
*
*
* Notes on the implementation.
@@ -424,8 +424,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* work-conserving schedulers.
*
*/
-extern void migrate_disable(void);
-extern void migrate_enable(void);
/**
* preempt_disable_nested - Disable preemption inside a normally preempt disabled section
@@ -471,7 +469,6 @@ static __always_inline void preempt_enable_nested(void)
DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
-DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
#ifdef CONFIG_PREEMPT_DYNAMIC
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
index 1504fb012c05..540abf7de048 100644
--- a/include/linux/psp-platform-access.h
+++ b/include/linux/psp-platform-access.h
@@ -7,6 +7,8 @@
enum psp_platform_access_msg {
PSP_CMD_NONE = 0x0,
+ PSP_SFS_GET_FW_VERSIONS,
+ PSP_SFS_UPDATE,
PSP_CMD_HSTI_QUERY = 0x14,
PSP_I2C_REQ_BUS_CMD = 0x64,
PSP_DYNAMIC_BOOST_GET_NONCE,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 120536f4c6eb..f67f96711f0d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -24,7 +24,7 @@
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <linux/irqflags.h>
-#include <linux/preempt.h>
+#include <linux/sched.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <linux/cleanup.h>
@@ -962,6 +962,20 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
preempt_enable_notrace();
}
+static __always_inline void rcu_read_lock_dont_migrate(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
+ migrate_disable();
+ rcu_read_lock();
+}
+
+static inline void rcu_read_unlock_migrate(void)
+{
+ rcu_read_unlock();
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
+ migrate_enable();
+}
+
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
* @p: The pointer to be initialized.
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 6fb4894b8cfd..a7d92718b653 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -157,27 +157,42 @@ struct rdt_ctrl_domain {
};
/**
+ * struct mbm_cntr_cfg - Assignable counter configuration.
+ * @evtid: MBM event to which the counter is assigned. Only valid
+ * if @rdtgroup is not NULL.
+ * @rdtgrp: resctrl group assigned to the counter. NULL if the
+ * counter is free.
+ */
+struct mbm_cntr_cfg {
+ enum resctrl_event_id evtid;
+ struct rdtgroup *rdtgrp;
+};
+
+/**
* struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
* @hdr: common header for different domain types
* @ci_id: cache info id for this domain
* @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
- * @mbm_total: saved state for MBM total bandwidth
- * @mbm_local: saved state for MBM local bandwidth
+ * @mbm_states: Per-event pointer to the MBM event's saved state.
+ * An MBM event's state is an array of struct mbm_state
+ * indexed by RMID on x86 or combined CLOSID, RMID on Arm.
* @mbm_over: worker to periodically read MBM h/w counters
* @cqm_limbo: worker to periodically read CQM h/w counters
* @mbm_work_cpu: worker CPU for MBM h/w counters
* @cqm_work_cpu: worker CPU for CQM h/w counters
+ * @cntr_cfg: array of assignable counters' configuration (indexed
+ * by counter ID)
*/
struct rdt_mon_domain {
struct rdt_domain_hdr hdr;
unsigned int ci_id;
unsigned long *rmid_busy_llc;
- struct mbm_state *mbm_total;
- struct mbm_state *mbm_local;
+ struct mbm_state *mbm_states[QOS_NUM_L3_MBM_EVENTS];
struct delayed_work mbm_over;
struct delayed_work cqm_limbo;
int mbm_work_cpu;
int cqm_work_cpu;
+ struct mbm_cntr_cfg *cntr_cfg;
};
/**
@@ -256,39 +271,52 @@ enum resctrl_schema_fmt {
};
/**
+ * struct resctrl_mon - Monitoring related data of a resctrl resource.
+ * @num_rmid: Number of RMIDs available.
+ * @mbm_cfg_mask: Memory transactions that can be tracked when bandwidth
+ * monitoring events can be configured.
+ * @num_mbm_cntrs: Number of assignable counters.
+ * @mbm_cntr_assignable:Is system capable of supporting counter assignment?
+ * @mbm_assign_on_mkdir:True if counters should automatically be assigned to MBM
+ * events of monitor groups created via mkdir.
+ */
+struct resctrl_mon {
+ int num_rmid;
+ unsigned int mbm_cfg_mask;
+ int num_mbm_cntrs;
+ bool mbm_cntr_assignable;
+ bool mbm_assign_on_mkdir;
+};
+
+/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
- * @num_rmid: Number of RMIDs available
* @ctrl_scope: Scope of this resource for control functions
* @mon_scope: Scope of this resource for monitor functions
* @cache: Cache allocation related data
* @membw: If the component has bandwidth controls, their properties.
+ * @mon: Monitoring related data.
* @ctrl_domains: RCU list of all control domains for this resource
* @mon_domains: RCU list of all monitor domains for this resource
* @name: Name to use in "schemata" file.
* @schema_fmt: Which format string and parser is used for this schema.
- * @evt_list: List of monitoring events
- * @mbm_cfg_mask: Bandwidth sources that can be tracked when bandwidth
- * monitoring events can be configured.
* @cdp_capable: Is the CDP feature available on this resource
*/
struct rdt_resource {
int rid;
bool alloc_capable;
bool mon_capable;
- int num_rmid;
enum resctrl_scope ctrl_scope;
enum resctrl_scope mon_scope;
struct resctrl_cache cache;
struct resctrl_membw membw;
+ struct resctrl_mon mon;
struct list_head ctrl_domains;
struct list_head mon_domains;
char *name;
enum resctrl_schema_fmt schema_fmt;
- struct list_head evt_list;
- unsigned int mbm_cfg_mask;
bool cdp_capable;
};
@@ -372,8 +400,29 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+void resctrl_enable_mon_event(enum resctrl_event_id eventid);
+
+bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
+
bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
+static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid)
+{
+ return (eventid >= QOS_L3_MBM_TOTAL_EVENT_ID &&
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID);
+}
+
+u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid);
+
+/* Iterate over all memory bandwidth events */
+#define for_each_mbm_event_id(eventid) \
+ for (eventid = QOS_L3_MBM_TOTAL_EVENT_ID; \
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID; eventid++)
+
+/* Iterate over memory bandwidth arrays in domain structures */
+#define for_each_mbm_idx(idx) \
+ for (idx = 0; idx < QOS_NUM_L3_MBM_EVENTS; idx++)
+
/**
* resctrl_arch_mon_event_config_write() - Write the config for an event.
* @config_info: struct resctrl_mon_config_info describing the resource, domain
@@ -416,6 +465,26 @@ static inline u32 resctrl_get_config_index(u32 closid,
bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+/**
+ * resctrl_arch_mbm_cntr_assign_enabled() - Check if MBM counter assignment
+ * mode is enabled.
+ * @r: Pointer to the resource structure.
+ *
+ * Return:
+ * true if the assignment mode is enabled, false otherwise.
+ */
+bool resctrl_arch_mbm_cntr_assign_enabled(struct rdt_resource *r);
+
+/**
+ * resctrl_arch_mbm_cntr_assign_set() - Configure the MBM counter assignment mode.
+ * @r: Pointer to the resource structure.
+ * @enable: Set to true to enable, false to disable the assignment mode.
+ *
+ * Return:
+ * 0 on success, < 0 on error.
+ */
+int resctrl_arch_mbm_cntr_assign_set(struct rdt_resource *r, bool enable);
+
/*
* Update the ctrl_val and apply this config right now.
* Must be called on one of the domain's CPUs.
@@ -528,6 +597,63 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *
*/
void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
+/**
+ * resctrl_arch_config_cntr() - Configure the counter with its new RMID
+ * and event details.
+ * @r: Resource structure.
+ * @d: The domain in which counter with ID @cntr_id should be configured.
+ * @evtid: Monitoring event type (e.g., QOS_L3_MBM_TOTAL_EVENT_ID
+ * or QOS_L3_MBM_LOCAL_EVENT_ID).
+ * @rmid: RMID.
+ * @closid: CLOSID.
+ * @cntr_id: Counter ID to configure.
+ * @assign: True to assign the counter or update an existing assignment,
+ * false to unassign the counter.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ enum resctrl_event_id evtid, u32 rmid, u32 closid,
+ u32 cntr_id, bool assign);
+
+/**
+ * resctrl_arch_cntr_read() - Read the event data corresponding to the counter ID
+ * assigned to the RMID, event pair for this resource
+ * and domain.
+ * @r: Resource that the counter should be read from.
+ * @d: Domain that the counter should be read from.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to read.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ * @val: Result of the counter read in bytes.
+ *
+ * Called on a CPU that belongs to domain @d when "mbm_event" mode is enabled.
+ * Called from a non-migrateable process context via smp_call_on_cpu() unless all
+ * CPUs are nohz_full, in which case it is called via IPI (smp_call_function_any()).
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid, u64 *val);
+
+/**
+ * resctrl_arch_reset_cntr() - Reset any private state associated with counter ID.
+ * @r: The domain's resource.
+ * @d: The counter ID's domain.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to reset.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid);
+
extern unsigned int resctrl_rmid_realloc_threshold;
extern unsigned int resctrl_rmid_realloc_limit;
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
index a25fb9c4070d..acfe07860b34 100644
--- a/include/linux/resctrl_types.h
+++ b/include/linux/resctrl_types.h
@@ -34,11 +34,18 @@
/* Max event bits supported */
#define MAX_EVT_CONFIG_BITS GENMASK(6, 0)
-/*
- * Event IDs, the values match those used to program IA32_QM_EVTSEL before
- * reading IA32_QM_CTR on RDT systems.
- */
+/* Number of memory transactions that an MBM event can be configured with */
+#define NUM_MBM_TRANSACTIONS 7
+
+/* Event IDs */
enum resctrl_event_id {
+ /* Must match value of first event below */
+ QOS_FIRST_EVENT = 0x01,
+
+ /*
+ * These values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
QOS_L3_OCCUP_EVENT_ID = 0x01,
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
@@ -47,4 +54,7 @@ enum resctrl_event_id {
QOS_NUM_EVENTS,
};
+#define QOS_NUM_L3_MBM_EVENTS (QOS_L3_MBM_LOCAL_EVENT_ID - QOS_L3_MBM_TOTAL_EVENT_ID + 1)
+#define MBM_STATE_IDX(evt) ((evt) - QOS_L3_MBM_TOTAL_EVENT_ID)
+
#endif /* __LINUX_RESCTRL_TYPES_H */
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index a96fd345aa38..69553e7c14c1 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -7,6 +7,12 @@
#include <linux/preempt.h>
#include <linux/sched.h>
+#ifdef CONFIG_MEMBARRIER
+# define RSEQ_EVENT_GUARD irq
+#else
+# define RSEQ_EVENT_GUARD preempt
+#endif
+
/*
* Map the event mask on the user-space ABI enum rseq_cs_flags
* for direct mask checks.
@@ -41,9 +47,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig,
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
+ scoped_guard(RSEQ_EVENT_GUARD)
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
rseq_handle_notify_resume(ksig, regs);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e4ce0a76831e..cbb7340c5866 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -49,6 +49,9 @@
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
+#ifndef COMPILE_OFFSETS
+#include <generated/rq-offsets.h>
+#endif
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -881,6 +884,11 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
+#ifdef CONFIG_CFS_BANDWIDTH
+ struct callback_head sched_throttle_work;
+ struct list_head throttle_node;
+ bool throttled;
+#endif
#endif
@@ -2310,4 +2318,114 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
+#ifndef MODULE
+#ifndef COMPILE_OFFSETS
+
+extern void ___migrate_enable(void);
+
+struct rq;
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+/*
+ * The "struct rq" is not available here, so we can't access the
+ * "runqueues" with this_cpu_ptr(), as the compilation will fail in
+ * this_cpu_ptr() -> raw_cpu_ptr() -> __verify_pcpu_ptr():
+ * typeof((ptr) + 0)
+ *
+ * So use arch_raw_cpu_ptr()/PERCPU_PTR() directly here.
+ */
+#ifdef CONFIG_SMP
+#define this_rq_raw() arch_raw_cpu_ptr(&runqueues)
+#else
+#define this_rq_raw() PERCPU_PTR(&runqueues)
+#endif
+#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))
+
+static inline void __migrate_enable(void)
+{
+ struct task_struct *p = current;
+
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Check both overflow from migrate_disable() and superfluous
+ * migrate_enable().
+ */
+ if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
+ return;
+#endif
+
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
+ }
+
+ /*
+ * Ensure stop_task runs either before or after this, and that
+ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
+ */
+ guard(preempt)();
+ if (unlikely(p->cpus_ptr != &p->cpus_mask))
+ ___migrate_enable();
+ /*
+ * Mustn't clear migration_disabled() until cpus_ptr points back at the
+ * regular cpus_mask, otherwise things that race (eg.
+ * select_fallback_rq) get confused.
+ */
+ barrier();
+ p->migration_disabled = 0;
+ this_rq_pinned()--;
+}
+
+static inline void __migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (p->migration_disabled) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ *Warn about overflow half-way through the range.
+ */
+ WARN_ON_ONCE((s16)p->migration_disabled < 0);
+#endif
+ p->migration_disabled++;
+ return;
+ }
+
+ guard(preempt)();
+ this_rq_pinned()++;
+ p->migration_disabled = 1;
+}
+#else /* !COMPILE_OFFSETS */
+static inline void __migrate_disable(void) { }
+static inline void __migrate_enable(void) { }
+#endif /* !COMPILE_OFFSETS */
+
+/*
+ * So that it is possible to not export the runqueues variable, define and
+ * export migrate_enable/migrate_disable in kernel/sched/core.c too, and use
+ * them for the modules. The macro "INSTANTIATE_EXPORTED_MIGRATE_DISABLE" will
+ * be defined in kernel/sched/core.c.
+ */
+#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
+static inline void migrate_disable(void)
+{
+ __migrate_disable();
+}
+
+static inline void migrate_enable(void)
+{
+ __migrate_enable();
+}
+#else /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
+
+#else /* MODULE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* MODULE */
+
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
#endif
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index 7047101dbf58..d82b7a9b0658 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -108,7 +108,11 @@ enum scx_kf_mask {
SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
/* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
- /* ops.dequeue (in REST) may be nested inside DISPATCH */
+ /*
+ * ops.dispatch() may release rq lock temporarily and thus ENQUEUE and
+ * SELECT_CPU may be nested inside. ops.dequeue (in REST) may also be
+ * nested inside DISPATCH.
+ */
SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 1ef1edbaaf79..7d6449982822 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -226,6 +226,10 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
+#ifdef CONFIG_CGROUPS
+ struct rw_semaphore cgroup_threadgroup_rwsem;
+#endif
+
/*
* Thread is the potential origin of an oom condition; kill first on
* oom
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 5263746b63e8..bbcfdf12aa6e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -30,33 +30,24 @@ struct sd_flag_debug {
};
extern const struct sd_flag_debug sd_flag_debug[];
+struct sched_domain_topology_level;
+
#ifdef CONFIG_SCHED_SMT
-static inline int cpu_smt_flags(void)
-{
- return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
-}
+extern int cpu_smt_flags(void);
+extern const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_CLUSTER
-static inline int cpu_cluster_flags(void)
-{
- return SD_CLUSTER | SD_SHARE_LLC;
-}
+extern int cpu_cluster_flags(void);
+extern const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_MC
-static inline int cpu_core_flags(void)
-{
- return SD_SHARE_LLC;
-}
+extern int cpu_core_flags(void);
+extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
-#ifdef CONFIG_NUMA
-static inline int cpu_numa_flags(void)
-{
- return SD_NUMA;
-}
-#endif
+extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu);
extern int arch_asym_cpu_priority(int cpu);
@@ -172,7 +163,7 @@ bool cpus_equal_capacity(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
bool cpus_share_resources(int this_cpu, int that_cpu);
-typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+typedef const struct cpumask *(*sched_domain_mask_f)(struct sched_domain_topology_level *tl, int cpu);
typedef int (*sched_domain_flags_f)(void);
struct sd_data {
diff --git a/include/linux/security.h b/include/linux/security.h
index 9a1d4a6c8673..bd33f194c94a 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -567,7 +567,8 @@ int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, struct lsm_context *cp);
-int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp);
+int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp,
+ int lsmid);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(struct lsm_context *cp);
void security_inode_invalidate_secctx(struct inode *inode);
@@ -1551,7 +1552,8 @@ static inline int security_secid_to_secctx(u32 secid, struct lsm_context *cp)
}
static inline int security_lsmprop_to_secctx(struct lsm_prop *prop,
- struct lsm_context *cp)
+ struct lsm_context *cp,
+ int lsmid)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 77f45e5d4413..66c06fcdfe19 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1005,6 +1005,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
asmlinkage long sys_uretprobe(void);
+asmlinkage long sys_uprobe(void);
+
/* pciconfig: alpha, arm, arm64, ia64, sparc */
asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn,
unsigned long off, unsigned long len,
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 57ed3035cc30..c52b862dad45 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -51,9 +51,15 @@ struct tnum tnum_xor(struct tnum a, struct tnum b);
/* Multiply two tnums, return @a * @b */
struct tnum tnum_mul(struct tnum a, struct tnum b);
+/* Return true if the known bits of both tnums have the same value */
+bool tnum_overlap(struct tnum a, struct tnum b);
+
/* Return a tnum representing numbers satisfying both @a and @b */
struct tnum tnum_intersect(struct tnum a, struct tnum b);
+/* Returns a tnum representing numbers satisfying either @a or @b */
+struct tnum tnum_union(struct tnum t1, struct tnum t2);
+
/* Return @a with all but the lowest @size bytes cleared */
struct tnum tnum_cast(struct tnum a, u8 size);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 33b7fda97d39..6575af39fd10 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -260,7 +260,7 @@ static inline bool topology_is_primary_thread(unsigned int cpu)
#endif
-static inline const struct cpumask *cpu_cpu_mask(int cpu)
+static inline const struct cpumask *cpu_node_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 915303a82d84..ee3d36eda45d 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -17,6 +17,7 @@
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/seqlock.h>
+#include <linux/mutex.h>
struct uprobe;
struct vm_area_struct;
@@ -185,8 +186,14 @@ struct xol_area;
struct uprobes_state {
struct xol_area *xol_area;
+#ifdef CONFIG_X86_64
+ struct hlist_head head_tramps;
+#endif
};
+typedef int (*uprobe_write_verify_t)(struct page *page, unsigned long vaddr,
+ uprobe_opcode_t *insn, int nbytes, void *data);
+
extern void __init uprobes_init(void);
extern int set_swbp(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
extern int set_orig_insn(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
@@ -194,7 +201,11 @@ extern bool is_swbp_insn(uprobe_opcode_t *insn);
extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
-extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t);
+extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t,
+ bool is_register);
+extern int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma, const unsigned long opcode_vaddr,
+ uprobe_opcode_t *insn, int nbytes, uprobe_write_verify_t verify, bool is_register, bool do_update_ref_ctr,
+ void *data);
extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
@@ -224,8 +235,13 @@ extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
extern void uprobe_handle_trampoline(struct pt_regs *regs);
-extern void *arch_uprobe_trampoline(unsigned long *psize);
+extern void *arch_uretprobe_trampoline(unsigned long *psize);
extern unsigned long uprobe_get_trampoline_vaddr(void);
+extern void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len);
+extern void arch_uprobe_clear_state(struct mm_struct *mm);
+extern void arch_uprobe_init_state(struct mm_struct *mm);
+extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
+extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
diff --git a/include/linux/verification.h b/include/linux/verification.h
index 4f3022d081c3..dec7f2beabfd 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -36,6 +36,7 @@ enum key_being_used_for {
VERIFYING_KEY_SIGNATURE,
VERIFYING_KEY_SELF_SIGNATURE,
VERIFYING_UNSPECIFIED_SIGNATURE,
+ VERIFYING_BPF_SIGNATURE,
NR__KEY_BEING_USED_FOR
};
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 45d5dd470ff6..dabc351cc127 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -410,7 +410,7 @@ enum wq_flags {
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
/* BH wq only allows the following flags */
- __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI,
+ __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI | WQ_PERCPU,
};
enum wq_consts {
@@ -434,10 +434,10 @@ enum wq_consts {
* short queue flush time. Don't queue works which can run for too
* long.
*
- * system_highpri_wq is similar to system_wq but for work items which
+ * system_highpri_wq is similar to system_percpu_wq but for work items which
* require WQ_HIGHPRI.
*
- * system_long_wq is similar to system_wq but may host long running
+ * system_long_wq is similar to system_percpu_wq but may host long running
* works. Queue flushing might take relatively long.
*
* system_dfl_wq is unbound workqueue. Workers are not bound to
@@ -445,13 +445,13 @@ enum wq_consts {
* executed immediately as long as max_active limit is not reached and
* resources are available.
*
- * system_freezable_wq is equivalent to system_wq except that it's
+ * system_freezable_wq is equivalent to system_percpu_wq except that it's
* freezable.
*
* *_power_efficient_wq are inclined towards saving power and converted
* into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
* they are same as their non-power-efficient counterparts - e.g.
- * system_power_efficient_wq is identical to system_wq if
+ * system_power_efficient_wq is identical to system_percpu_wq if
* 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
*
* system_bh[_highpri]_wq are convenience interface to softirq. BH work items
@@ -502,7 +502,7 @@ void workqueue_softirq_dead(unsigned int cpu);
* min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
* that the sum of per-node max_active's may be larger than @max_active.
*
- * For detailed information on %WQ_* flags, please refer to
+ * For detailed information on %WQ_\* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
* RETURNS:
@@ -570,7 +570,7 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
- alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
+ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_PERCPU, 1, (name))
#define create_freezable_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
WQ_MEM_RECLAIM, 1, (name))
@@ -708,7 +708,7 @@ static inline bool mod_delayed_work(struct workqueue_struct *wq,
*/
static inline bool schedule_work_on(int cpu, struct work_struct *work)
{
- return queue_work_on(cpu, system_wq, work);
+ return queue_work_on(cpu, system_percpu_wq, work);
}
/**
@@ -727,7 +727,7 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
*/
static inline bool schedule_work(struct work_struct *work)
{
- return queue_work(system_wq, work);
+ return queue_work(system_percpu_wq, work);
}
/**
@@ -770,21 +770,21 @@ extern void __warn_flushing_systemwide_wq(void)
#define flush_scheduled_work() \
({ \
__warn_flushing_systemwide_wq(); \
- __flush_workqueue(system_wq); \
+ __flush_workqueue(system_percpu_wq); \
})
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
\
- if ((__builtin_constant_p(_wq == system_wq) && \
- _wq == system_wq) || \
+ if ((__builtin_constant_p(_wq == system_percpu_wq) && \
+ _wq == system_percpu_wq) || \
(__builtin_constant_p(_wq == system_highpri_wq) && \
_wq == system_highpri_wq) || \
(__builtin_constant_p(_wq == system_long_wq) && \
_wq == system_long_wq) || \
- (__builtin_constant_p(_wq == system_unbound_wq) && \
- _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_dfl_wq) && \
+ _wq == system_dfl_wq) || \
(__builtin_constant_p(_wq == system_freezable_wq) && \
_wq == system_freezable_wq) || \
(__builtin_constant_p(_wq == system_power_efficient_wq) && \
@@ -807,7 +807,7 @@ extern void __warn_flushing_systemwide_wq(void)
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
- return queue_delayed_work_on(cpu, system_wq, dwork, delay);
+ return queue_delayed_work_on(cpu, system_percpu_wq, dwork, delay);
}
/**
@@ -821,7 +821,7 @@ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
static inline bool schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
- return queue_delayed_work(system_wq, dwork, delay);
+ return queue_delayed_work(system_percpu_wq, dwork, delay);
}
#ifndef CONFIG_SMP