summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2026-01-28 11:42:17 -0800
committerAndrii Nakryiko <andrii@kernel.org>2026-01-28 11:45:00 -0800
commitba225407f80ee28f42301002bd5fda8fffd85615 (patch)
tree2515d6389f15eaa4d760ca44c43fa5ac3a486b99 /kernel/trace
parentae23bc81ddf7c17b663c4ed1b21e35527b0a7131 (diff)
parent424f6a3610965d125634bc65c5d6d506582d4f7e (diff)
Merge branch 'ftrace-bpf-use-single-direct-ops-for-bpf-trampolines'
Jiri Olsa says: ==================== ftrace,bpf: Use single direct ops for bpf trampolines hi, while poking the multi-tracing interface I ended up with just one ftrace_ops object to attach all trampolines. This change allows to use less direct API calls during the attachment changes in the future code, so in effect speeding up the attachment. In current code we get a speed up from using just a single ftrace_ops object. - with current code: Performance counter stats for 'bpftrace -e fentry:vmlinux:ksys_* {} -c true': 6,364,157,902 cycles:k 828,728,902 cycles:u 1,064,803,824 instructions:u # 1.28 insn per cycle 23,797,500,067 instructions:k # 3.74 insn per cycle 4.416004987 seconds time elapsed 0.164121000 seconds user 1.289550000 seconds sys - with the fix: Performance counter stats for 'bpftrace -e fentry:vmlinux:ksys_* {} -c true': 6,535,857,905 cycles:k 810,809,429 cycles:u 1,064,594,027 instructions:u # 1.31 insn per cycle 23,962,552,894 instructions:k # 3.67 insn per cycle 1.666961239 seconds time elapsed 0.157412000 seconds user 1.283396000 seconds sys The speedup seems to be related to the fact that with single ftrace_ops object we don't call ftrace_shutdown anymore (we use ftrace_update_ops instead) and we skip the synchronize rcu calls (each ~100ms) at the end of that function. rfc: https://lore.kernel.org/bpf/20250729102813.1531457-1-jolsa@kernel.org/ v1: https://lore.kernel.org/bpf/20250923215147.1571952-1-jolsa@kernel.org/ v2: https://lore.kernel.org/bpf/20251113123750.2507435-1-jolsa@kernel.org/ v3: https://lore.kernel.org/bpf/20251120212402.466524-1-jolsa@kernel.org/ v4: https://lore.kernel.org/bpf/20251203082402.78816-1-jolsa@kernel.org/ v5: https://lore.kernel.org/bpf/20251215211402.353056-10-jolsa@kernel.org/ v6 changes: - rename add_hash_entry_direct to add_ftrace_hash_entry_direct [Steven] - factor hash_add/hash_sub [Steven] - add kerneldoc header for update_ftrace_direct_* functions [Steven] - few assorted smaller fixes [Steven] - added missing direct_ops wrappers for !CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS case [Steven] v5 changes: - do not export ftrace_hash object [Steven] - fix update_ftrace_direct_add new_filter_hash leak [ci] v4 changes: - rebased on top of bpf-next/master (with jmp attach changes) added patch 1 to deal with that - added extra checks for update_ftrace_direct_del/mod to address the ci bot review v3 changes: - rebased on top of bpf-next/master - fixed update_ftrace_direct_del cleanup path - added missing inline to update_ftrace_direct_* stubs v2 changes: - rebased on top fo bpf-next/master plus Song's livepatch fixes [1] - renamed the API functions [2] [Steven] - do not export the new api [Steven] - kept the original direct interface: I'm not sure if we want to melt both *_ftrace_direct and the new interface into single one. It's bit different in semantic (hence the name change as Steven suggested [2]) and I don't think the changes are not that big so we could easily keep both APIs. v1 changes: - make the change x86 specific, after discussing with Mark options for arm64 [Mark] thanks, jirka [1] https://lore.kernel.org/bpf/20251027175023.1521602-1-song@kernel.org/ [2] https://lore.kernel.org/bpf/20250924050415.4aefcb91@batman.local.home/ --- ==================== Link: https://patch.msgid.link/20251230145010.103439-1-jolsa@kernel.org Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig3
-rw-r--r--kernel/trace/ftrace.c406
2 files changed, 385 insertions, 24 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index bfa2ec46e075..d7042a09fe46 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -50,6 +50,9 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS
config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
bool
+config HAVE_SINGLE_FTRACE_DIRECT_OPS
+ bool
+
config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ef2d5dca6f70..8574932e66b6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -68,7 +68,6 @@
})
/* hash bits for specific function selection */
-#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -1211,8 +1210,8 @@ static void __add_hash_entry(struct ftrace_hash *hash,
hash->count++;
}
-static struct ftrace_func_entry *
-add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+struct ftrace_func_entry *
+add_ftrace_hash_entry_direct(struct ftrace_hash *hash, unsigned long ip, unsigned long direct)
{
struct ftrace_func_entry *entry;
@@ -1221,11 +1220,18 @@ add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
return NULL;
entry->ip = ip;
+ entry->direct = direct;
__add_hash_entry(hash, entry);
return entry;
}
+static struct ftrace_func_entry *
+add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+{
+ return add_ftrace_hash_entry_direct(hash, ip, 0);
+}
+
static void
free_hash_entry(struct ftrace_hash *hash,
struct ftrace_func_entry *entry)
@@ -1284,7 +1290,7 @@ static void clear_ftrace_mod_list(struct list_head *head)
mutex_unlock(&ftrace_lock);
}
-static void free_ftrace_hash(struct ftrace_hash *hash)
+void free_ftrace_hash(struct ftrace_hash *hash)
{
if (!hash || hash == EMPTY_HASH)
return;
@@ -1324,7 +1330,7 @@ void ftrace_free_filter(struct ftrace_ops *ops)
}
EXPORT_SYMBOL_GPL(ftrace_free_filter);
-static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
+struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
struct ftrace_hash *hash;
int size;
@@ -1398,7 +1404,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
- if (add_hash_entry(new_hash, entry->ip) == NULL)
+ if (add_ftrace_hash_entry_direct(new_hash, entry->ip, entry->direct) == NULL)
goto free_hash;
}
}
@@ -2069,7 +2075,7 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
*/
if (!ops->ops_func)
return -EBUSY;
- ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
+ ret = ops->ops_func(ops, rec->ip, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
if (ret)
return ret;
} else if (is_ipmodify) {
@@ -2625,8 +2631,13 @@ unsigned long ftrace_find_rec_direct(unsigned long ip)
static void call_direct_funcs(unsigned long ip, unsigned long pip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
- unsigned long addr = READ_ONCE(ops->direct_call);
+ unsigned long addr;
+#ifdef CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS
+ addr = ftrace_find_rec_direct(ip);
+#else
+ addr = READ_ONCE(ops->direct_call);
+#endif
if (!addr)
return;
@@ -6046,15 +6057,8 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
if (ftrace_hash_empty(hash))
return -EINVAL;
- /* This is a "raw" address, and this should never happen. */
- if (WARN_ON_ONCE(ftrace_is_jmp(addr)))
- return -EINVAL;
-
mutex_lock(&direct_mutex);
- if (ops->flags & FTRACE_OPS_FL_JMP)
- addr = ftrace_jmp_set(addr);
-
/* Make sure requested entries are not already registered.. */
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
@@ -6175,13 +6179,6 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
lockdep_assert_held_once(&direct_mutex);
- /* This is a "raw" address, and this should never happen. */
- if (WARN_ON_ONCE(ftrace_is_jmp(addr)))
- return -EINVAL;
-
- if (ops->flags & FTRACE_OPS_FL_JMP)
- addr = ftrace_jmp_set(addr);
-
/* Enable the tmp_ops to have the same functions as the direct ops */
ftrace_ops_init(&tmp_ops);
tmp_ops.func_hash = ops->func_hash;
@@ -6286,6 +6283,367 @@ int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
return err;
}
EXPORT_SYMBOL_GPL(modify_ftrace_direct);
+
+static unsigned long hash_count(struct ftrace_hash *hash)
+{
+ return hash ? hash->count : 0;
+}
+
+/**
+ * hash_add - adds two struct ftrace_hash and returns the result
+ * @a: struct ftrace_hash object
+ * @b: struct ftrace_hash object
+ *
+ * Returns struct ftrace_hash object on success, NULL on error.
+ */
+static struct ftrace_hash *hash_add(struct ftrace_hash *a, struct ftrace_hash *b)
+{
+ struct ftrace_func_entry *entry;
+ struct ftrace_hash *add;
+ int size;
+
+ size = hash_count(a) + hash_count(b);
+ if (size > 32)
+ size = 32;
+
+ add = alloc_and_copy_ftrace_hash(fls(size), a);
+ if (!add)
+ return NULL;
+
+ size = 1 << b->size_bits;
+ for (int i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &b->buckets[i], hlist) {
+ if (add_ftrace_hash_entry_direct(add, entry->ip, entry->direct) == NULL) {
+ free_ftrace_hash(add);
+ return NULL;
+ }
+ }
+ }
+ return add;
+}
+
+/**
+ * update_ftrace_direct_add - Updates @ops by adding direct
+ * callers provided in @hash
+ * @ops: The address of the struct ftrace_ops object
+ * @hash: The address of the struct ftrace_hash object
+ *
+ * This is used to add custom direct callers (ip -> addr) to @ops,
+ * specified in @hash. The @ops will be either registered or updated.
+ *
+ * Returns: zero on success. Non zero on error, which includes:
+ * -EINVAL - The @hash is empty
+ */
+int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ struct ftrace_hash *old_direct_functions = NULL;
+ struct ftrace_hash *new_direct_functions;
+ struct ftrace_hash *old_filter_hash;
+ struct ftrace_hash *new_filter_hash = NULL;
+ struct ftrace_func_entry *entry;
+ int err = -EINVAL;
+ int size;
+ bool reg;
+
+ if (!hash_count(hash))
+ return -EINVAL;
+
+ mutex_lock(&direct_mutex);
+
+ /* Make sure requested entries are not already registered. */
+ size = 1 << hash->size_bits;
+ for (int i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ if (__ftrace_lookup_ip(direct_functions, entry->ip))
+ goto out_unlock;
+ }
+ }
+
+ old_filter_hash = ops->func_hash ? ops->func_hash->filter_hash : NULL;
+
+ /* If there's nothing in filter_hash we need to register the ops. */
+ reg = hash_count(old_filter_hash) == 0;
+ if (reg) {
+ if (ops->func || ops->trampoline)
+ goto out_unlock;
+ if (ops->flags & FTRACE_OPS_FL_ENABLED)
+ goto out_unlock;
+ }
+
+ err = -ENOMEM;
+ new_filter_hash = hash_add(old_filter_hash, hash);
+ if (!new_filter_hash)
+ goto out_unlock;
+
+ new_direct_functions = hash_add(direct_functions, hash);
+ if (!new_direct_functions)
+ goto out_unlock;
+
+ old_direct_functions = direct_functions;
+ rcu_assign_pointer(direct_functions, new_direct_functions);
+
+ if (reg) {
+ ops->func = call_direct_funcs;
+ ops->flags |= MULTI_FLAGS;
+ ops->trampoline = FTRACE_REGS_ADDR;
+ ops->local_hash.filter_hash = new_filter_hash;
+
+ err = register_ftrace_function_nolock(ops);
+ if (err) {
+ /* restore old filter on error */
+ ops->local_hash.filter_hash = old_filter_hash;
+
+ /* cleanup for possible another register call */
+ ops->func = NULL;
+ ops->trampoline = 0;
+ } else {
+ new_filter_hash = old_filter_hash;
+ }
+ } else {
+ err = ftrace_update_ops(ops, new_filter_hash, EMPTY_HASH);
+ /*
+ * new_filter_hash is dup-ed, so we need to release it anyway,
+ * old_filter_hash either stays on error or is already released
+ */
+ }
+
+ if (err) {
+ /* reset direct_functions and free the new one */
+ rcu_assign_pointer(direct_functions, old_direct_functions);
+ old_direct_functions = new_direct_functions;
+ }
+
+ out_unlock:
+ mutex_unlock(&direct_mutex);
+
+ if (old_direct_functions && old_direct_functions != EMPTY_HASH)
+ call_rcu_tasks(&old_direct_functions->rcu, register_ftrace_direct_cb);
+ free_ftrace_hash(new_filter_hash);
+
+ return err;
+}
+
+/**
+ * hash_sub - substracts @b from @a and returns the result
+ * @a: struct ftrace_hash object
+ * @b: struct ftrace_hash object
+ *
+ * Returns struct ftrace_hash object on success, NULL on error.
+ */
+static struct ftrace_hash *hash_sub(struct ftrace_hash *a, struct ftrace_hash *b)
+{
+ struct ftrace_func_entry *entry, *del;
+ struct ftrace_hash *sub;
+ int size;
+
+ sub = alloc_and_copy_ftrace_hash(a->size_bits, a);
+ if (!sub)
+ return NULL;
+
+ size = 1 << b->size_bits;
+ for (int i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &b->buckets[i], hlist) {
+ del = __ftrace_lookup_ip(sub, entry->ip);
+ if (WARN_ON_ONCE(!del)) {
+ free_ftrace_hash(sub);
+ return NULL;
+ }
+ remove_hash_entry(sub, del);
+ kfree(del);
+ }
+ }
+ return sub;
+}
+
+/**
+ * update_ftrace_direct_del - Updates @ops by removing its direct
+ * callers provided in @hash
+ * @ops: The address of the struct ftrace_ops object
+ * @hash: The address of the struct ftrace_hash object
+ *
+ * This is used to delete custom direct callers (ip -> addr) in
+ * @ops specified via @hash. The @ops will be either unregistered
+ * updated.
+ *
+ * Returns: zero on success. Non zero on error, which includes:
+ * -EINVAL - The @hash is empty
+ * -EINVAL - The @ops is not registered
+ */
+int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash)
+{
+ struct ftrace_hash *old_direct_functions = NULL;
+ struct ftrace_hash *new_direct_functions;
+ struct ftrace_hash *new_filter_hash = NULL;
+ struct ftrace_hash *old_filter_hash;
+ struct ftrace_func_entry *entry;
+ struct ftrace_func_entry *del;
+ unsigned long size;
+ int err = -EINVAL;
+
+ if (!hash_count(hash))
+ return -EINVAL;
+ if (check_direct_multi(ops))
+ return -EINVAL;
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return -EINVAL;
+ if (direct_functions == EMPTY_HASH)
+ return -EINVAL;
+
+ mutex_lock(&direct_mutex);
+
+ old_filter_hash = ops->func_hash ? ops->func_hash->filter_hash : NULL;
+
+ if (!hash_count(old_filter_hash))
+ goto out_unlock;
+
+ /* Make sure requested entries are already registered. */
+ size = 1 << hash->size_bits;
+ for (int i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ del = __ftrace_lookup_ip(direct_functions, entry->ip);
+ if (!del || del->direct != entry->direct)
+ goto out_unlock;
+ }
+ }
+
+ err = -ENOMEM;
+ new_filter_hash = hash_sub(old_filter_hash, hash);
+ if (!new_filter_hash)
+ goto out_unlock;
+
+ new_direct_functions = hash_sub(direct_functions, hash);
+ if (!new_direct_functions)
+ goto out_unlock;
+
+ /* If there's nothing left, we need to unregister the ops. */
+ if (ftrace_hash_empty(new_filter_hash)) {
+ err = unregister_ftrace_function(ops);
+ if (!err) {
+ /* cleanup for possible another register call */
+ ops->func = NULL;
+ ops->trampoline = 0;
+ ftrace_free_filter(ops);
+ ops->func_hash->filter_hash = NULL;
+ }
+ } else {
+ err = ftrace_update_ops(ops, new_filter_hash, EMPTY_HASH);
+ /*
+ * new_filter_hash is dup-ed, so we need to release it anyway,
+ * old_filter_hash either stays on error or is already released
+ */
+ }
+
+ if (err) {
+ /* free the new_direct_functions */
+ old_direct_functions = new_direct_functions;
+ } else {
+ rcu_assign_pointer(direct_functions, new_direct_functions);
+ }
+
+ out_unlock:
+ mutex_unlock(&direct_mutex);
+
+ if (old_direct_functions && old_direct_functions != EMPTY_HASH)
+ call_rcu_tasks(&old_direct_functions->rcu, register_ftrace_direct_cb);
+ free_ftrace_hash(new_filter_hash);
+
+ return err;
+}
+
+/**
+ * update_ftrace_direct_mod - Updates @ops by modifing its direct
+ * callers provided in @hash
+ * @ops: The address of the struct ftrace_ops object
+ * @hash: The address of the struct ftrace_hash object
+ * @do_direct_lock: If true lock the direct_mutex
+ *
+ * This is used to modify custom direct callers (ip -> addr) in
+ * @ops specified via @hash.
+ *
+ * This can be called from within ftrace ops_func callback with
+ * direct_mutex already locked, in which case @do_direct_lock
+ * needs to be false.
+ *
+ * Returns: zero on success. Non zero on error, which includes:
+ * -EINVAL - The @hash is empty
+ * -EINVAL - The @ops is not registered
+ */
+int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock)
+{
+ struct ftrace_func_entry *entry, *tmp;
+ static struct ftrace_ops tmp_ops = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_STUB,
+ };
+ struct ftrace_hash *orig_hash;
+ unsigned long size, i;
+ int err = -EINVAL;
+
+ if (!hash_count(hash))
+ return -EINVAL;
+ if (check_direct_multi(ops))
+ return -EINVAL;
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return -EINVAL;
+ if (direct_functions == EMPTY_HASH)
+ return -EINVAL;
+
+ /*
+ * We can be called from within ops_func callback with direct_mutex
+ * already taken.
+ */
+ if (do_direct_lock)
+ mutex_lock(&direct_mutex);
+
+ orig_hash = ops->func_hash ? ops->func_hash->filter_hash : NULL;
+ if (!orig_hash)
+ goto unlock;
+
+ /* Enable the tmp_ops to have the same functions as the direct ops */
+ ftrace_ops_init(&tmp_ops);
+ tmp_ops.func_hash = ops->func_hash;
+
+ err = register_ftrace_function_nolock(&tmp_ops);
+ if (err)
+ goto unlock;
+
+ /*
+ * Call __ftrace_hash_update_ipmodify() here, so that we can call
+ * ops->ops_func for the ops. This is needed because the above
+ * register_ftrace_function_nolock() worked on tmp_ops.
+ */
+ err = __ftrace_hash_update_ipmodify(ops, orig_hash, orig_hash, true);
+ if (err)
+ goto out;
+
+ /*
+ * Now the ftrace_ops_list_func() is called to do the direct callers.
+ * We can safely change the direct functions attached to each entry.
+ */
+ mutex_lock(&ftrace_lock);
+
+ size = 1 << hash->size_bits;
+ for (i = 0; i < size; i++) {
+ hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+ tmp = __ftrace_lookup_ip(direct_functions, entry->ip);
+ if (!tmp)
+ continue;
+ tmp->direct = entry->direct;
+ }
+ }
+
+ mutex_unlock(&ftrace_lock);
+
+out:
+ /* Removing the tmp_ops will add the updated direct callers to the functions */
+ unregister_ftrace_function(&tmp_ops);
+
+unlock:
+ if (do_direct_lock)
+ mutex_unlock(&direct_mutex);
+ return err;
+}
+
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
/**
@@ -8708,7 +9066,7 @@ static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
if (!op->ops_func)
return -EBUSY;
- ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
+ ret = op->ops_func(op, ip, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
if (ret)
return ret;
}
@@ -8755,7 +9113,7 @@ static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
/* The cleanup is optional, ignore any errors */
if (found_op && op->ops_func)
- op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
+ op->ops_func(op, ip, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
}
}
mutex_unlock(&direct_mutex);