diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 53 | 
1 files changed, 42 insertions, 11 deletions
| diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 169b3c44ee97..086af4f5c3e8 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,  		     struct trace_seq *s, u32 flags);  /* Add a function return address to the trace stack on thread info.*/ -int -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, +static int +ftrace_push_return_trace(unsigned long ret, unsigned long func,  			 unsigned long frame_pointer, unsigned long *retp)  {  	unsigned long long calltime; @@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,  #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR  	current->ret_stack[index].retp = retp;  #endif -	*depth = current->curr_ret_stack; +	return 0; +} + +int function_graph_enter(unsigned long ret, unsigned long func, +			 unsigned long frame_pointer, unsigned long *retp) +{ +	struct ftrace_graph_ent trace; + +	trace.func = func; +	trace.depth = ++current->curr_ret_depth; + +	if (ftrace_push_return_trace(ret, func, +				     frame_pointer, retp)) +		goto out; + +	/* Only trace if the calling function expects to */ +	if (!ftrace_graph_entry(&trace)) +		goto out_ret;  	return 0; + out_ret: +	current->curr_ret_stack--; + out: +	current->curr_ret_depth--; +	return -EBUSY;  }  /* Retrieve a function return address to the trace stack on thread info.*/ @@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,  	trace->func = current->ret_stack[index].func;  	trace->calltime = current->ret_stack[index].calltime;  	trace->overrun = atomic_read(¤t->trace_overrun); -	trace->depth = index; +	trace->depth = current->curr_ret_depth--; +	/* +	 * We still want to trace interrupts coming in if +	 * max_depth is set to 1. Make sure the decrement is +	 * seen before ftrace_graph_return. +	 */ +	barrier();  }  /* @@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)  	ftrace_pop_return_trace(&trace, &ret, frame_pointer);  	trace.rettime = trace_clock_local(); +	ftrace_graph_return(&trace); +	/* +	 * The ftrace_graph_return() may still access the current +	 * ret_stack structure, we need to make sure the update of +	 * curr_ret_stack is after that. +	 */  	barrier();  	current->curr_ret_stack--;  	/* @@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)  		return ret;  	} -	/* -	 * The trace should run after decrementing the ret counter -	 * in case an interrupt were to come in. We don't want to -	 * lose the interrupt if max_depth is set. -	 */ -	ftrace_graph_return(&trace); -  	if (unlikely(!ret)) {  		ftrace_graph_stop();  		WARN_ON(1); @@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)  	int cpu;  	int pc; +	ftrace_graph_addr_finish(trace); +  	local_irq_save(flags);  	cpu = raw_smp_processor_id();  	data = per_cpu_ptr(tr->trace_buffer.data, cpu); @@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr)  static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)  { +	ftrace_graph_addr_finish(trace); +  	if (tracing_thresh &&  	    (trace->rettime - trace->calltime < tracing_thresh))  		return; | 
