summaryrefslogtreecommitdiff
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c722
1 files changed, 431 insertions, 291 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bc957a2507e2..4283ed4e8f59 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -50,6 +50,8 @@
#include <linux/irq_work.h>
#include <linux/workqueue.h>
#include <linux/sort.h>
+#include <linux/io.h> /* vmap_page_range() */
+#include <linux/fs_context.h>
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@@ -119,6 +121,7 @@ static int tracing_disabled = 1;
cpumask_var_t __read_mostly tracing_buffer_mask;
+#define MAX_TRACER_SIZE 100
/*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
*
@@ -141,7 +144,40 @@ cpumask_var_t __read_mostly tracing_buffer_mask;
char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
/* When set, tracing will stop when a WARN*() is hit */
-int __disable_trace_on_warning;
+static int __disable_trace_on_warning;
+
+int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+static const struct ctl_table trace_sysctl_table[] = {
+ {
+ .procname = "ftrace_dump_on_oops",
+ .data = &ftrace_dump_on_oops,
+ .maxlen = MAX_TRACER_SIZE,
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+ {
+ .procname = "traceoff_on_warning",
+ .data = &__disable_trace_on_warning,
+ .maxlen = sizeof(__disable_trace_on_warning),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "tracepoint_printk",
+ .data = &tracepoint_printk,
+ .maxlen = sizeof(tracepoint_printk),
+ .mode = 0644,
+ .proc_handler = tracepoint_printk_sysctl,
+ },
+};
+
+static int __init init_trace_sysctls(void)
+{
+ register_sysctl_init("kernel", trace_sysctl_table);
+ return 0;
+}
+subsys_initcall(init_trace_sysctls);
#ifdef CONFIG_TRACE_EVAL_MAP_FILE
/* Map of enums to their values, for "eval_map" file */
@@ -396,15 +432,13 @@ static void ftrace_exports(struct ring_buffer_event *event, int flag)
{
struct trace_export *export;
- preempt_disable_notrace();
+ guard(preempt_notrace)();
export = rcu_dereference_raw_check(ftrace_exports_list);
while (export) {
trace_process_export(export, event, flag);
export = rcu_dereference_raw_check(export->next);
}
-
- preempt_enable_notrace();
}
static inline void
@@ -461,27 +495,18 @@ int register_ftrace_export(struct trace_export *export)
if (WARN_ON_ONCE(!export->write))
return -1;
- mutex_lock(&ftrace_export_lock);
+ guard(mutex)(&ftrace_export_lock);
add_ftrace_export(&ftrace_exports_list, export);
- mutex_unlock(&ftrace_export_lock);
-
return 0;
}
EXPORT_SYMBOL_GPL(register_ftrace_export);
int unregister_ftrace_export(struct trace_export *export)
{
- int ret;
-
- mutex_lock(&ftrace_export_lock);
-
- ret = rm_ftrace_export(&ftrace_exports_list, export);
-
- mutex_unlock(&ftrace_export_lock);
-
- return ret;
+ guard(mutex)(&ftrace_export_lock);
+ return rm_ftrace_export(&ftrace_exports_list, export);
}
EXPORT_SYMBOL_GPL(unregister_ftrace_export);
@@ -492,7 +517,8 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
- TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
+ TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK | \
+ TRACE_ITER_COPY_MARKER)
/* trace_options that are only supported by global_trace */
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
@@ -500,7 +526,8 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
/* trace_flags that are default zero for instances */
#define ZEROED_TRACE_FLAGS \
- (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
+ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK | \
+ TRACE_ITER_COPY_MARKER)
/*
* The global_trace is the descriptor that holds the top-level tracing
@@ -512,6 +539,9 @@ static struct trace_array global_trace = {
static struct trace_array *printk_trace = &global_trace;
+/* List of trace_arrays interested in the top level trace_marker */
+static LIST_HEAD(marker_copies);
+
static __always_inline bool printk_binsafe(struct trace_array *tr)
{
/*
@@ -533,6 +563,28 @@ static void update_printk_trace(struct trace_array *tr)
tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
}
+/* Returns true if the status of tr changed */
+static bool update_marker_trace(struct trace_array *tr, int enabled)
+{
+ lockdep_assert_held(&event_mutex);
+
+ if (enabled) {
+ if (!list_empty(&tr->marker_list))
+ return false;
+
+ list_add_rcu(&tr->marker_list, &marker_copies);
+ tr->trace_flags |= TRACE_ITER_COPY_MARKER;
+ return true;
+ }
+
+ if (list_empty(&tr->marker_list))
+ return false;
+
+ list_del_init(&tr->marker_list);
+ tr->trace_flags &= ~TRACE_ITER_COPY_MARKER;
+ return true;
+}
+
void trace_set_ring_buffer_expanded(struct trace_array *tr)
{
if (!tr)
@@ -577,9 +629,8 @@ void trace_array_put(struct trace_array *this_tr)
if (!this_tr)
return;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
__trace_array_put(this_tr);
- mutex_unlock(&trace_types_lock);
}
EXPORT_SYMBOL_GPL(trace_array_put);
@@ -873,7 +924,6 @@ int tracing_is_enabled(void)
* return the mirror variable of the state of the ring buffer.
* It's a little racy, but we don't really care.
*/
- smp_rmb();
return !global_trace.buffer_disabled;
}
@@ -1044,8 +1094,6 @@ void tracer_tracing_on(struct trace_array *tr)
* important to be fast than accurate.
*/
tr->buffer_disabled = 0;
- /* Make the flag seen by readers */
- smp_wmb();
}
/**
@@ -1100,13 +1148,11 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
trace_ctx = tracing_gen_ctx();
buffer = tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
+ guard(ring_buffer_nest)(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
trace_ctx);
- if (!event) {
- size = 0;
- goto out;
- }
+ if (!event)
+ return 0;
entry = ring_buffer_event_data(event);
entry->ip = ip;
@@ -1122,8 +1168,6 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
- out:
- ring_buffer_nest_end(buffer);
return size;
}
EXPORT_SYMBOL_GPL(__trace_array_puts);
@@ -1153,7 +1197,6 @@ int __trace_bputs(unsigned long ip, const char *str)
struct bputs_entry *entry;
unsigned int trace_ctx;
int size = sizeof(struct bputs_entry);
- int ret = 0;
if (!printk_binsafe(tr))
return __trace_puts(ip, str, strlen(str));
@@ -1167,11 +1210,11 @@ int __trace_bputs(unsigned long ip, const char *str)
trace_ctx = tracing_gen_ctx();
buffer = tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
+ guard(ring_buffer_nest)(buffer);
event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
trace_ctx);
if (!event)
- goto out;
+ return 0;
entry = ring_buffer_event_data(event);
entry->ip = ip;
@@ -1180,10 +1223,7 @@ int __trace_bputs(unsigned long ip, const char *str)
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
- ret = 1;
- out:
- ring_buffer_nest_end(buffer);
- return ret;
+ return 1;
}
EXPORT_SYMBOL_GPL(__trace_bputs);
@@ -1372,13 +1412,8 @@ static int tracing_arm_snapshot_locked(struct trace_array *tr)
int tracing_arm_snapshot(struct trace_array *tr)
{
- int ret;
-
- mutex_lock(&trace_types_lock);
- ret = tracing_arm_snapshot_locked(tr);
- mutex_unlock(&trace_types_lock);
-
- return ret;
+ guard(mutex)(&trace_types_lock);
+ return tracing_arm_snapshot_locked(tr);
}
void tracing_disarm_snapshot(struct trace_array *tr)
@@ -1577,8 +1612,39 @@ void tracer_tracing_off(struct trace_array *tr)
* important to be fast than accurate.
*/
tr->buffer_disabled = 1;
- /* Make the flag seen by readers */
- smp_wmb();
+}
+
+/**
+ * tracer_tracing_disable() - temporary disable the buffer from write
+ * @tr: The trace array to disable its buffer for
+ *
+ * Expects trace_tracing_enable() to re-enable tracing.
+ * The difference between this and tracer_tracing_off() is that this
+ * is a counter and can nest, whereas, tracer_tracing_off() can
+ * be called multiple times and a single trace_tracing_on() will
+ * enable it.
+ */
+void tracer_tracing_disable(struct trace_array *tr)
+{
+ if (WARN_ON_ONCE(!tr->array_buffer.buffer))
+ return;
+
+ ring_buffer_record_disable(tr->array_buffer.buffer);
+}
+
+/**
+ * tracer_tracing_enable() - counter part of tracer_tracing_disable()
+ * @tr: The trace array that had tracer_tracincg_disable() called on it
+ *
+ * This is called after tracer_tracing_disable() has been called on @tr,
+ * when it's safe to re-enable tracing.
+ */
+void tracer_tracing_enable(struct trace_array *tr)
+{
+ if (WARN_ON_ONCE(!tr->array_buffer.buffer))
+ return;
+
+ ring_buffer_record_enable(tr->array_buffer.buffer);
}
/**
@@ -1750,7 +1816,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
ret = get_user(ch, ubuf++);
if (ret)
- goto out;
+ return ret;
read++;
cnt--;
@@ -1764,7 +1830,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
while (cnt && isspace(ch)) {
ret = get_user(ch, ubuf++);
if (ret)
- goto out;
+ return ret;
read++;
cnt--;
}
@@ -1774,8 +1840,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
/* only spaces were written */
if (isspace(ch) || !ch) {
*ppos += read;
- ret = read;
- goto out;
+ return read;
}
}
@@ -1783,13 +1848,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
while (cnt && !isspace(ch) && ch) {
if (parser->idx < parser->size - 1)
parser->buffer[parser->idx++] = ch;
- else {
- ret = -EINVAL;
- goto out;
- }
+ else
+ return -EINVAL;
+
ret = get_user(ch, ubuf++);
if (ret)
- goto out;
+ return ret;
read++;
cnt--;
}
@@ -1804,15 +1868,11 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
/* Make sure the parsed string always terminates with '\0'. */
parser->buffer[parser->idx] = 0;
} else {
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
*ppos += read;
- ret = read;
-
-out:
- return ret;
+ return read;
}
/* TODO add a seq_buf_to_buffer() */
@@ -2314,10 +2374,10 @@ int __init register_tracer(struct tracer *type)
mutex_unlock(&trace_types_lock);
if (ret || !default_bootup_tracer)
- goto out_unlock;
+ return ret;
if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
- goto out_unlock;
+ return 0;
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
/* Do we want this tracer to start on bootup? */
@@ -2329,8 +2389,7 @@ int __init register_tracer(struct tracer *type)
/* disable other selftests, since this will break it. */
disable_tracing_selftest("running a tracer");
- out_unlock:
- return ret;
+ return 0;
}
static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
@@ -2407,9 +2466,8 @@ void tracing_reset_all_online_cpus_unlocked(void)
void tracing_reset_all_online_cpus(void)
{
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
tracing_reset_all_online_cpus_unlocked();
- mutex_unlock(&trace_types_lock);
}
int is_tracing_stopped(void)
@@ -2420,18 +2478,17 @@ int is_tracing_stopped(void)
static void tracing_start_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
- unsigned long flags;
if (tracing_disabled)
return;
- raw_spin_lock_irqsave(&tr->start_lock, flags);
+ guard(raw_spinlock_irqsave)(&tr->start_lock);
if (--tr->stop_count) {
if (WARN_ON_ONCE(tr->stop_count < 0)) {
/* Someone screwed up their debugging */
tr->stop_count = 0;
}
- goto out;
+ return;
}
/* Prevent the buffers from switching */
@@ -2448,9 +2505,6 @@ static void tracing_start_tr(struct trace_array *tr)
#endif
arch_spin_unlock(&tr->max_lock);
-
- out:
- raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
@@ -2468,11 +2522,10 @@ void tracing_start(void)
static void tracing_stop_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
- unsigned long flags;
- raw_spin_lock_irqsave(&tr->start_lock, flags);
+ guard(raw_spinlock_irqsave)(&tr->start_lock);
if (tr->stop_count++)
- goto out;
+ return;
/* Prevent the buffers from switching */
arch_spin_lock(&tr->max_lock);
@@ -2488,9 +2541,6 @@ static void tracing_stop_tr(struct trace_array *tr)
#endif
arch_spin_unlock(&tr->max_lock);
-
- out:
- raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
@@ -2603,19 +2653,17 @@ void trace_buffered_event_enable(void)
per_cpu(trace_buffered_event, cpu) = event;
- preempt_disable();
- if (cpu == smp_processor_id() &&
- __this_cpu_read(trace_buffered_event) !=
- per_cpu(trace_buffered_event, cpu))
- WARN_ON_ONCE(1);
- preempt_enable();
+ scoped_guard(preempt,) {
+ if (cpu == smp_processor_id() &&
+ __this_cpu_read(trace_buffered_event) !=
+ per_cpu(trace_buffered_event, cpu))
+ WARN_ON_ONCE(1);
+ }
}
}
static void enable_trace_buffered_event(void *data)
{
- /* Probably not needed, but do it anyway */
- smp_rmb();
this_cpu_dec(trace_buffered_event_cnt);
}
@@ -2955,7 +3003,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
skip++;
#endif
- preempt_disable_notrace();
+ guard(preempt_notrace)();
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
@@ -3013,8 +3061,6 @@ static void __ftrace_trace_stack(struct trace_array *tr,
/* Again, don't let gcc optimize things here */
barrier();
__this_cpu_dec(ftrace_stack_reserve);
- preempt_enable_notrace();
-
}
static inline void ftrace_trace_stack(struct trace_array *tr,
@@ -3097,9 +3143,9 @@ ftrace_trace_userstack(struct trace_array *tr,
* prevent recursion, since the user stack tracing may
* trigger other kernel events.
*/
- preempt_disable();
+ guard(preempt)();
if (__this_cpu_read(user_stack_count))
- goto out;
+ return;
__this_cpu_inc(user_stack_count);
@@ -3117,8 +3163,6 @@ ftrace_trace_userstack(struct trace_array *tr,
out_drop_count:
__this_cpu_dec(user_stack_count);
- out:
- preempt_enable();
}
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
static void ftrace_trace_userstack(struct trace_array *tr,
@@ -3300,7 +3344,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
pause_graph_tracing();
trace_ctx = tracing_gen_ctx();
- preempt_disable_notrace();
+ guard(preempt_notrace)();
tbuffer = get_trace_buf();
if (!tbuffer) {
@@ -3315,26 +3359,23 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
- trace_ctx);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
- entry->fmt = fmt;
-
- memcpy(entry->buf, tbuffer, sizeof(u32) * len);
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
+ scoped_guard(ring_buffer_nest, buffer) {
+ event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
+ trace_ctx);
+ if (!event)
+ goto out_put;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->fmt = fmt;
-out:
- ring_buffer_nest_end(buffer);
+ memcpy(entry->buf, tbuffer, sizeof(u32) * len);
+ __buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
+ }
out_put:
put_trace_buf();
out_nobuffer:
- preempt_enable_notrace();
unpause_graph_tracing();
return len;
@@ -3358,7 +3399,7 @@ int __trace_array_vprintk(struct trace_buffer *buffer,
pause_graph_tracing();
trace_ctx = tracing_gen_ctx();
- preempt_disable_notrace();
+ guard(preempt_notrace)();
tbuffer = get_trace_buf();
@@ -3370,24 +3411,22 @@ int __trace_array_vprintk(struct trace_buffer *buffer,
len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
- ring_buffer_nest_start(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- trace_ctx);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- entry->ip = ip;
-
- memcpy(&entry->buf, tbuffer, len + 1);
- __buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
+ scoped_guard(ring_buffer_nest, buffer) {
+ event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+ trace_ctx);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ memcpy(&entry->buf, tbuffer, len + 1);
+ __buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
+ }
out:
- ring_buffer_nest_end(buffer);
put_trace_buf();
out_nobuffer:
- preempt_enable_notrace();
unpause_graph_tracing();
return len;
@@ -4639,21 +4678,15 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->array_buffer->buffer,
- cpu, GFP_KERNEL);
- }
- ring_buffer_read_prepare_sync();
- for_each_tracing_cpu(cpu) {
- ring_buffer_read_start(iter->buffer_iter[cpu]);
+ ring_buffer_read_start(iter->array_buffer->buffer,
+ cpu, GFP_KERNEL);
tracing_iter_reset(iter, cpu);
}
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->array_buffer->buffer,
- cpu, GFP_KERNEL);
- ring_buffer_read_prepare_sync();
- ring_buffer_read_start(iter->buffer_iter[cpu]);
+ ring_buffer_read_start(iter->array_buffer->buffer,
+ cpu, GFP_KERNEL);
tracing_iter_reset(iter, cpu);
}
@@ -4717,20 +4750,16 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
if (ret)
return ret;
- mutex_lock(&event_mutex);
+ guard(mutex)(&event_mutex);
/* Fail if the file is marked for removal */
if (file->flags & EVENT_FILE_FL_FREED) {
trace_array_put(file->tr);
- ret = -ENODEV;
+ return -ENODEV;
} else {
event_file_get(file);
}
- mutex_unlock(&event_mutex);
- if (ret)
- return ret;
-
filp->private_data = inode->i_private;
return 0;
@@ -5007,7 +5036,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct trace_array *tr = file_inode(filp)->i_private;
- char *mask_str;
+ char *mask_str __free(kfree) = NULL;
int len;
len = snprintf(NULL, 0, "%*pb\n",
@@ -5018,16 +5047,10 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf,
len = snprintf(mask_str, len, "%*pb\n",
cpumask_pr_args(tr->tracing_cpumask));
- if (len >= count) {
- count = -EINVAL;
- goto out_err;
- }
- count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
-
-out_err:
- kfree(mask_str);
+ if (len >= count)
+ return -EINVAL;
- return count;
+ return simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
}
int tracing_set_cpumask(struct trace_array *tr,
@@ -5047,7 +5070,6 @@ int tracing_set_cpumask(struct trace_array *tr,
*/
if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
#ifdef CONFIG_TRACER_MAX_TRACE
ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
@@ -5055,7 +5077,6 @@ int tracing_set_cpumask(struct trace_array *tr,
}
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
#ifdef CONFIG_TRACER_MAX_TRACE
ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
@@ -5188,7 +5209,8 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
if ((mask == TRACE_ITER_RECORD_TGID) ||
(mask == TRACE_ITER_RECORD_CMD) ||
- (mask == TRACE_ITER_TRACE_PRINTK))
+ (mask == TRACE_ITER_TRACE_PRINTK) ||
+ (mask == TRACE_ITER_COPY_MARKER))
lockdep_assert_held(&event_mutex);
/* do nothing if flag is already set */
@@ -5219,6 +5241,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
}
}
+ if (mask == TRACE_ITER_COPY_MARKER)
+ update_marker_trace(tr, enabled);
+
if (enabled)
tr->trace_flags |= mask;
else
@@ -5839,17 +5864,27 @@ static inline void trace_insert_eval_map_file(struct module *mod,
struct trace_eval_map **start, int len) { }
#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
-static void trace_insert_eval_map(struct module *mod,
- struct trace_eval_map **start, int len)
+static void
+trace_event_update_with_eval_map(struct module *mod,
+ struct trace_eval_map **start,
+ int len)
{
struct trace_eval_map **map;
- if (len <= 0)
- return;
+ /* Always run sanitizer only if btf_type_tag attr exists. */
+ if (len <= 0) {
+ if (!(IS_ENABLED(CONFIG_DEBUG_INFO_BTF) &&
+ IS_ENABLED(CONFIG_PAHOLE_HAS_BTF_TAG) &&
+ __has_attribute(btf_type_tag)))
+ return;
+ }
map = start;
- trace_event_eval_update(map, len);
+ trace_event_update_all(map, len);
+
+ if (len <= 0)
+ return;
trace_insert_eval_map_file(mod, start, len);
}
@@ -5862,9 +5897,9 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
char buf[MAX_TRACER_SIZE+2];
int r;
- mutex_lock(&trace_types_lock);
- r = sprintf(buf, "%s\n", tr->current_trace->name);
- mutex_unlock(&trace_types_lock);
+ scoped_guard(mutex, &trace_types_lock) {
+ r = sprintf(buf, "%s\n", tr->current_trace->name);
+ }
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
@@ -6003,6 +6038,7 @@ struct trace_mod_entry {
};
struct trace_scratch {
+ unsigned int clock_id;
unsigned long text_addr;
unsigned long nr_entries;
struct trace_mod_entry entries[];
@@ -6031,6 +6067,7 @@ unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr)
struct trace_module_delta *module_delta;
struct trace_scratch *tscratch;
struct trace_mod_entry *entry;
+ unsigned long raddr;
int idx = 0, nr_entries;
/* If we don't have last boot delta, return the address */
@@ -6042,8 +6079,12 @@ unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr)
tscratch = tr->scratch;
/* if there is no tscrach, module_delta must be NULL. */
module_delta = READ_ONCE(tr->module_delta);
- if (!module_delta || tscratch->entries[0].mod_addr > addr)
- return addr + tr->text_delta;
+ if (!module_delta || !tscratch->nr_entries ||
+ tscratch->entries[0].mod_addr > addr) {
+ raddr = addr + tr->text_delta;
+ return __is_kernel(raddr) || is_kernel_core_data(raddr) ||
+ is_kernel_rodata(raddr) ? raddr : addr;
+ }
/* Note that entries must be sorted. */
nr_entries = tscratch->nr_entries;
@@ -6113,6 +6154,7 @@ static void update_last_data(struct trace_array *tr)
if (tr->scratch) {
struct trace_scratch *tscratch = tr->scratch;
+ tscratch->clock_id = tr->clock_id;
memset(tscratch->entries, 0,
flex_array_size(tscratch, entries, tscratch->nr_entries));
tscratch->nr_entries = 0;
@@ -6159,15 +6201,13 @@ int tracing_update_buffers(struct trace_array *tr)
{
int ret = 0;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
update_last_data(tr);
if (!tr->ring_buffer_expanded)
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
- mutex_unlock(&trace_types_lock);
-
return ret;
}
@@ -6198,7 +6238,7 @@ static bool tracer_options_updated;
static void add_tracer_options(struct trace_array *tr, struct tracer *t)
{
/* Only enable if the directory has been created already. */
- if (!tr->dir)
+ if (!tr->dir && !(tr->flags & TRACE_ARRAY_FL_GLOBAL))
return;
/* Only create trace option files after update_tracer_options finish */
@@ -6464,7 +6504,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (ret)
return ret;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
cpu = tracing_get_cpu(inode);
ret = open_pipe_on_cpu(tr, cpu);
if (ret)
@@ -6508,7 +6548,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
tr->trace_ref++;
- mutex_unlock(&trace_types_lock);
return ret;
fail:
@@ -6517,7 +6556,6 @@ fail_alloc_iter:
close_pipe_on_cpu(tr, cpu);
fail_pipe_on_cpu:
__trace_array_put(tr);
- mutex_unlock(&trace_types_lock);
return ret;
}
@@ -6526,14 +6564,13 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
struct trace_iterator *iter = file->private_data;
struct trace_array *tr = inode->i_private;
- mutex_lock(&trace_types_lock);
-
- tr->trace_ref--;
+ scoped_guard(mutex, &trace_types_lock) {
+ tr->trace_ref--;
- if (iter->trace->pipe_close)
- iter->trace->pipe_close(iter);
- close_pipe_on_cpu(tr, iter->cpu_file);
- mutex_unlock(&trace_types_lock);
+ if (iter->trace->pipe_close)
+ iter->trace->pipe_close(iter);
+ close_pipe_on_cpu(tr, iter->cpu_file);
+ }
free_trace_iter_content(iter);
kfree(iter);
@@ -6607,6 +6644,22 @@ static int tracing_wait_pipe(struct file *filp)
return 1;
}
+static bool update_last_data_if_empty(struct trace_array *tr)
+{
+ if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
+ return false;
+
+ if (!ring_buffer_empty(tr->array_buffer.buffer))
+ return false;
+
+ /*
+ * If the buffer contains the last boot data and all per-cpu
+ * buffers are empty, reset it from the kernel side.
+ */
+ update_last_data(tr);
+ return true;
+}
+
/*
* Consumer reader.
*/
@@ -6638,6 +6691,9 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
}
waitagain:
+ if (update_last_data_if_empty(iter->tr))
+ return 0;
+
sret = tracing_wait_pipe(filp);
if (sret <= 0)
return sret;
@@ -6820,13 +6876,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
/* Copy the data into the page, so we can start over. */
ret = trace_seq_to_buffer(&iter->seq,
page_address(spd.pages[i]),
- trace_seq_used(&iter->seq));
+ min((size_t)trace_seq_used(&iter->seq),
+ (size_t)PAGE_SIZE));
if (ret < 0) {
__free_page(spd.pages[i]);
break;
}
spd.partial[i].offset = 0;
- spd.partial[i].len = trace_seq_used(&iter->seq);
+ spd.partial[i].len = ret;
trace_seq_init(&iter->seq);
}
@@ -7096,11 +7153,9 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
#define TRACE_MARKER_MAX_SIZE 4096
-static ssize_t
-tracing_mark_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *fpos)
+static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user *ubuf,
+ size_t cnt, unsigned long ip)
{
- struct trace_array *tr = filp->private_data;
struct ring_buffer_event *event;
enum event_trigger_type tt = ETT_NONE;
struct trace_buffer *buffer;
@@ -7114,18 +7169,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
#define FAULTED_STR "<faulted>"
#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
- if (tracing_disabled)
- return -EINVAL;
-
- if (!(tr->trace_flags & TRACE_ITER_MARKERS))
- return -EINVAL;
-
- if ((ssize_t)cnt < 0)
- return -EINVAL;
-
- if (cnt > TRACE_MARKER_MAX_SIZE)
- cnt = TRACE_MARKER_MAX_SIZE;
-
meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */
again:
size = cnt + meta_size;
@@ -7158,7 +7201,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
}
entry = ring_buffer_event_data(event);
- entry->ip = _THIS_IP_;
+ entry->ip = ip;
len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
if (len) {
@@ -7191,18 +7234,12 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
}
static ssize_t
-tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
{
struct trace_array *tr = filp->private_data;
- struct ring_buffer_event *event;
- struct trace_buffer *buffer;
- struct raw_data_entry *entry;
- ssize_t written;
- int size;
- int len;
-
-#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
+ ssize_t written = -ENODEV;
+ unsigned long ip;
if (tracing_disabled)
return -EINVAL;
@@ -7210,10 +7247,42 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
if (!(tr->trace_flags & TRACE_ITER_MARKERS))
return -EINVAL;
- /* The marker must at least have a tag id */
- if (cnt < sizeof(unsigned int))
+ if ((ssize_t)cnt < 0)
return -EINVAL;
+ if (cnt > TRACE_MARKER_MAX_SIZE)
+ cnt = TRACE_MARKER_MAX_SIZE;
+
+ /* The selftests expect this function to be the IP address */
+ ip = _THIS_IP_;
+
+ /* The global trace_marker can go to multiple instances */
+ if (tr == &global_trace) {
+ guard(rcu)();
+ list_for_each_entry_rcu(tr, &marker_copies, marker_list) {
+ written = write_marker_to_buffer(tr, ubuf, cnt, ip);
+ if (written < 0)
+ break;
+ }
+ } else {
+ written = write_marker_to_buffer(tr, ubuf, cnt, ip);
+ }
+
+ return written;
+}
+
+static ssize_t write_raw_marker_to_buffer(struct trace_array *tr,
+ const char __user *ubuf, size_t cnt)
+{
+ struct ring_buffer_event *event;
+ struct trace_buffer *buffer;
+ struct raw_data_entry *entry;
+ ssize_t written;
+ int size;
+ int len;
+
+#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
+
size = sizeof(*entry) + cnt;
if (cnt < FAULT_SIZE_ID)
size += FAULT_SIZE_ID - cnt;
@@ -7244,6 +7313,40 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
return written;
}
+static ssize_t
+tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *fpos)
+{
+ struct trace_array *tr = filp->private_data;
+ ssize_t written = -ENODEV;
+
+#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
+
+ if (tracing_disabled)
+ return -EINVAL;
+
+ if (!(tr->trace_flags & TRACE_ITER_MARKERS))
+ return -EINVAL;
+
+ /* The marker must at least have a tag id */
+ if (cnt < sizeof(unsigned int))
+ return -EINVAL;
+
+ /* The global trace_marker_raw can go to multiple instances */
+ if (tr == &global_trace) {
+ guard(rcu)();
+ list_for_each_entry_rcu(tr, &marker_copies, marker_list) {
+ written = write_raw_marker_to_buffer(tr, ubuf, cnt);
+ if (written < 0)
+ break;
+ }
+ } else {
+ written = write_raw_marker_to_buffer(tr, ubuf, cnt);
+ }
+
+ return written;
+}
+
static int tracing_clock_show(struct seq_file *m, void *v)
{
struct trace_array *tr = m->private;
@@ -7270,7 +7373,7 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr)
if (i == ARRAY_SIZE(trace_clocks))
return -EINVAL;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
tr->clock_id = i;
@@ -7288,7 +7391,11 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr)
tracing_reset_online_cpus(&tr->max_buffer);
#endif
- mutex_unlock(&trace_types_lock);
+ if (tr->scratch && !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) {
+ struct trace_scratch *tscratch = tr->scratch;
+
+ tscratch->clock_id = i;
+ }
return 0;
}
@@ -7341,15 +7448,13 @@ static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
{
struct trace_array *tr = m->private;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
seq_puts(m, "delta [absolute]\n");
else
seq_puts(m, "[delta] absolute\n");
- mutex_unlock(&trace_types_lock);
-
return 0;
}
@@ -7937,14 +8042,14 @@ static void clear_tracing_err_log(struct trace_array *tr)
{
struct tracing_log_err *err, *next;
- mutex_lock(&tracing_err_log_lock);
+ guard(mutex)(&tracing_err_log_lock);
+
list_for_each_entry_safe(err, next, &tr->err_log, list) {
list_del(&err->list);
free_tracing_log_err(err);
}
tr->n_err_log_entries = 0;
- mutex_unlock(&tracing_err_log_lock);
}
static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
@@ -8163,6 +8268,9 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (ret < 0) {
if (trace_empty(iter) && !iter->closed) {
+ if (update_last_data_if_empty(iter->tr))
+ return 0;
+
if ((filp->f_flags & O_NONBLOCK))
return -EAGAIN;
@@ -8212,7 +8320,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
struct ftrace_buffer_info *info = file->private_data;
struct trace_iterator *iter = &info->iter;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
iter->tr->trace_ref--;
@@ -8223,8 +8331,6 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
info->spare_cpu, info->spare);
kvfree(info);
- mutex_unlock(&trace_types_lock);
-
return 0;
}
@@ -8432,14 +8538,13 @@ static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned
* An ioctl call with cmd 0 to the ring buffer file will wake up all
* waiters
*/
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
/* Make sure the waiters see the new wait_index */
(void)atomic_fetch_inc_release(&iter->wait_index);
ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
- mutex_unlock(&trace_types_lock);
return 0;
}
@@ -8500,8 +8605,8 @@ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
struct trace_iterator *iter = &info->iter;
int ret = 0;
- /* Currently the boot mapped buffer is not supported for mmap */
- if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
+ /* A memmap'ed buffer is not supported for user space mmap */
+ if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP)
return -ENODEV;
ret = get_snapshot_map(iter->tr);
@@ -8780,12 +8885,12 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
out_reg:
ret = tracing_arm_snapshot(tr);
if (ret < 0)
- goto out;
+ return ret;
ret = register_ftrace_function_probe(glob, tr, ops, count);
if (ret < 0)
tracing_disarm_snapshot(tr);
- out:
+
return ret < 0 ? ret : 0;
}
@@ -8804,13 +8909,13 @@ static inline __init int register_snapshot_cmd(void) { return 0; }
static struct dentry *tracing_get_dentry(struct trace_array *tr)
{
- if (WARN_ON(!tr->dir))
- return ERR_PTR(-ENODEV);
-
/* Top directory uses NULL as the parent */
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return NULL;
+ if (WARN_ON(!tr->dir))
+ return ERR_PTR(-ENODEV);
+
/* All sub buffers have a descriptor */
return tr->dir;
}
@@ -8929,10 +9034,9 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
return -EINVAL;
if (!!(topt->flags->val & topt->opt->bit) != val) {
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
ret = __set_tracer_option(topt->tr, topt->flags,
topt->opt, !val);
- mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
@@ -9241,7 +9345,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
return ret;
if (buffer) {
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
if (!!val == tracer_tracing_is_on(tr)) {
val = 0; /* do nothing */
} else if (val) {
@@ -9255,7 +9359,6 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
/* Wake up any waiters */
ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
}
- mutex_unlock(&trace_types_lock);
}
(*ppos)++;
@@ -9509,6 +9612,15 @@ static void setup_trace_scratch(struct trace_array *tr,
/* Scan modules to make text delta for modules. */
module_for_each_mod(make_mod_delta, tr);
+
+ /* Set trace_clock as the same of the previous boot. */
+ if (tscratch->clock_id != tr->clock_id) {
+ if (tscratch->clock_id >= ARRAY_SIZE(trace_clocks) ||
+ tracing_set_clock(tr, trace_clocks[tscratch->clock_id].name) < 0) {
+ pr_info("the previous trace_clock info is not valid.");
+ goto reset;
+ }
+ }
return;
reset:
/* Invalid trace modules */
@@ -9604,13 +9716,11 @@ static void free_trace_buffers(struct trace_array *tr)
return;
free_trace_buffer(&tr->array_buffer);
+ kfree(tr->module_delta);
#ifdef CONFIG_TRACER_MAX_TRACE
free_trace_buffer(&tr->max_buffer);
#endif
-
- if (tr->range_addr_start)
- vunmap((void *)tr->range_addr_start);
}
static void init_trace_flags_index(struct trace_array *tr)
@@ -9632,10 +9742,9 @@ static void __update_tracer_options(struct trace_array *tr)
static void update_tracer_options(struct trace_array *tr)
{
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
tracer_options_updated = true;
__update_tracer_options(tr);
- mutex_unlock(&trace_types_lock);
}
/* Must have trace_types_lock held */
@@ -9657,11 +9766,10 @@ struct trace_array *trace_array_find_get(const char *instance)
{
struct trace_array *tr;
- mutex_lock(&trace_types_lock);
+ guard(mutex)(&trace_types_lock);
tr = trace_array_find(instance);
if (tr)
tr->ref++;
- mutex_unlock(&trace_types_lock);
return tr;
}
@@ -9735,6 +9843,7 @@ trace_array_create_systems(const char *name, const char *systems,
INIT_LIST_HEAD(&tr->events);
INIT_LIST_HEAD(&tr->hist_vars);
INIT_LIST_HEAD(&tr->err_log);
+ INIT_LIST_HEAD(&tr->marker_list);
#ifdef CONFIG_MODULES
INIT_LIST_HEAD(&tr->mod_events);
@@ -9803,30 +9912,35 @@ static int instance_mkdir(const char *name)
return ret;
}
-static u64 map_pages(u64 start, u64 size)
+#ifdef CONFIG_MMU
+static u64 map_pages(unsigned long start, unsigned long size)
{
- struct page **pages;
- phys_addr_t page_start;
- unsigned int page_count;
- unsigned int i;
- void *vaddr;
-
- page_count = DIV_ROUND_UP(size, PAGE_SIZE);
+ unsigned long vmap_start, vmap_end;
+ struct vm_struct *area;
+ int ret;
- page_start = start;
- pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
return 0;
- for (i = 0; i < page_count; i++) {
- phys_addr_t addr = page_start + i * PAGE_SIZE;
- pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ vmap_start = (unsigned long) area->addr;
+ vmap_end = vmap_start + size;
+
+ ret = vmap_page_range(vmap_start, vmap_end,
+ start, pgprot_nx(PAGE_KERNEL));
+ if (ret < 0) {
+ free_vm_area(area);
+ return 0;
}
- vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- return (u64)(unsigned long)vaddr;
+ return (u64)vmap_start;
+}
+#else
+static inline u64 map_pages(unsigned long start, unsigned long size)
+{
+ return 0;
}
+#endif
/**
* trace_array_get_by_name - Create/Lookup a trace array, given its name.
@@ -9889,6 +10003,9 @@ static int __remove_instance(struct trace_array *tr)
if (printk_trace == tr)
update_printk_trace(&global_trace);
+ if (update_marker_trace(tr, 0))
+ synchronize_rcu();
+
tracing_set_nop(tr);
clear_ftrace_function_probes(tr);
event_trace_del_tracer(tr);
@@ -10060,10 +10177,13 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
ftrace_init_tracefs(tr, d_tracer);
}
+#ifdef CONFIG_TRACEFS_AUTOMOUNT_DEPRECATED
static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
{
struct vfsmount *mnt;
struct file_system_type *type;
+ struct fs_context *fc;
+ int ret;
/*
* To maintain backward compatibility for tools that mount
@@ -10073,14 +10193,25 @@ static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
type = get_fs_type("tracefs");
if (!type)
return NULL;
- mnt = vfs_submount(mntpt, type, "tracefs", NULL);
+
+ fc = fs_context_for_submount(type, mntpt);
put_filesystem(type);
- if (IS_ERR(mnt))
- return NULL;
- mntget(mnt);
+ if (IS_ERR(fc))
+ return ERR_CAST(fc);
+ pr_warn("NOTICE: Automounting of tracing to debugfs is deprecated and will be removed in 2030\n");
+
+ ret = vfs_parse_fs_string(fc, "source",
+ "tracefs", strlen("tracefs"));
+ if (!ret)
+ mnt = fc_mount(fc);
+ else
+ mnt = ERR_PTR(ret);
+
+ put_fs_context(fc);
return mnt;
}
+#endif
/**
* tracing_init_dentry - initialize top level trace array
@@ -10105,6 +10236,7 @@ int tracing_init_dentry(void)
if (WARN_ON(!tracefs_initialized()))
return -ENODEV;
+#ifdef CONFIG_TRACEFS_AUTOMOUNT_DEPRECATED
/*
* As there may still be users that expect the tracing
* files to exist in debugfs/tracing, we must automount
@@ -10113,6 +10245,7 @@ int tracing_init_dentry(void)
*/
tr->dir = debugfs_create_automount("tracing", NULL,
trace_automount, NULL);
+#endif
return 0;
}
@@ -10129,7 +10262,7 @@ static void __init eval_map_work_func(struct work_struct *work)
int len;
len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
- trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
+ trace_event_update_with_eval_map(NULL, __start_ftrace_eval_maps, len);
}
static int __init trace_eval_init(void)
@@ -10167,7 +10300,7 @@ bool module_exists(const char *module)
{
/* All modules have the symbol __this_module */
static const char this_mod[] = "__this_module";
- char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
+ char modname[MODULE_NAME_LEN + sizeof(this_mod) + 2];
unsigned long val;
int n;
@@ -10182,9 +10315,6 @@ bool module_exists(const char *module)
static void trace_module_add_evals(struct module *mod)
{
- if (!mod->num_trace_evals)
- return;
-
/*
* Modules with bad taint do not have events created, do
* not bother with enums either.
@@ -10192,7 +10322,8 @@ static void trace_module_add_evals(struct module *mod)
if (trace_module_has_bad_taint(mod))
return;
- trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
+ /* Even if no trace_evals, this need to sanitize field types. */
+ trace_event_update_with_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
}
#ifdef CONFIG_TRACE_EVAL_MAP_FILE
@@ -10437,7 +10568,7 @@ static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_m
static struct trace_iterator iter;
unsigned int old_userobj;
unsigned long flags;
- int cnt = 0, cpu;
+ int cnt = 0;
/*
* Always turn off tracing when we dump.
@@ -10454,9 +10585,8 @@ static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_m
/* Simulate the iterator */
trace_init_iter(&iter, tr);
- for_each_tracing_cpu(cpu) {
- atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
- }
+ /* While dumping, do not allow the buffer to be enable */
+ tracer_tracing_disable(tr);
old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -10515,9 +10645,7 @@ static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_m
tr->trace_flags |= old_userobj;
- for_each_tracing_cpu(cpu) {
- atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
- }
+ tracer_tracing_enable(tr);
local_irq_restore(flags);
}
@@ -10599,7 +10727,8 @@ ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos,
int (*createfn)(const char *))
{
- char *kbuf, *buf, *tmp;
+ char *kbuf __free(kfree) = NULL;
+ char *buf, *tmp;
int ret = 0;
size_t done = 0;
size_t size;
@@ -10614,10 +10743,9 @@ ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
if (size >= WRITE_BUFSIZE)
size = WRITE_BUFSIZE - 1;
- if (copy_from_user(kbuf, buffer + done, size)) {
- ret = -EFAULT;
- goto out;
- }
+ if (copy_from_user(kbuf, buffer + done, size))
+ return -EFAULT;
+
kbuf[size] = '\0';
buf = kbuf;
do {
@@ -10633,8 +10761,7 @@ ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
pr_warn("Line length is too long: Should be less than %d\n",
WRITE_BUFSIZE - 2);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
done += size;
@@ -10647,17 +10774,12 @@ ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
ret = createfn(buf);
if (ret)
- goto out;
+ return ret;
buf += size;
} while (done < count);
}
- ret = done;
-
-out:
- kfree(kbuf);
-
- return ret;
+ return done;
}
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -10704,6 +10826,7 @@ static inline void do_allocate_snapshot(const char *name) { }
__init static void enable_instances(void)
{
struct trace_array *tr;
+ bool memmap_area = false;
char *curr_str;
char *name;
char *str;
@@ -10772,6 +10895,7 @@ __init static void enable_instances(void)
name);
continue;
}
+ memmap_area = true;
} else if (tok) {
if (!reserve_mem_find_by_name(tok, &start, &size)) {
start = 0;
@@ -10782,7 +10906,20 @@ __init static void enable_instances(void)
}
if (start) {
- addr = map_pages(start, size);
+ /* Start and size must be page aligned */
+ if (start & ~PAGE_MASK) {
+ pr_warn("Tracing: mapping start addr %pa is not page aligned\n", &start);
+ continue;
+ }
+ if (size & ~PAGE_MASK) {
+ pr_warn("Tracing: mapping size %pa is not page aligned\n", &size);
+ continue;
+ }
+
+ if (memmap_area)
+ addr = map_pages(start, size);
+ else
+ addr = (unsigned long)phys_to_virt(start);
if (addr) {
pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
name, &start, (unsigned long)size);
@@ -10809,10 +10946,13 @@ __init static void enable_instances(void)
update_printk_trace(tr);
/*
- * If start is set, then this is a mapped buffer, and
- * cannot be deleted by user space, so keep the reference
- * to it.
+ * memmap'd buffers can not be freed.
*/
+ if (memmap_area) {
+ tr->flags |= TRACE_ARRAY_FL_MEMMAP;
+ tr->ref++;
+ }
+
if (start) {
tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
tr->range_name = no_free_ptr(rname);
@@ -10842,7 +10982,7 @@ __init static int tracer_alloc_buffers(void)
BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
- goto out;
+ return -ENOMEM;
if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
@@ -10939,6 +11079,7 @@ __init static int tracer_alloc_buffers(void)
INIT_LIST_HEAD(&global_trace.events);
INIT_LIST_HEAD(&global_trace.hist_vars);
INIT_LIST_HEAD(&global_trace.err_log);
+ list_add(&global_trace.marker_list, &marker_copies);
list_add(&global_trace.list, &ftrace_trace_arrays);
apply_trace_boot_options();
@@ -10959,7 +11100,6 @@ out_free_cpumask:
free_cpumask_var(global_trace.tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
-out:
return ret;
}