diff options
Diffstat (limited to 'tools/perf/util/stat.c')
| -rw-r--r-- | tools/perf/util/stat.c | 100 | 
1 files changed, 97 insertions, 3 deletions
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index a0061e0b0fad..4d40515307b8 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -374,9 +374,8 @@ int perf_stat_process_counter(struct perf_stat_config *config,  	return 0;  } -int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused, -				   union perf_event *event, -				   struct perf_session *session) +int perf_event__process_stat_event(struct perf_session *session, +				   union perf_event *event)  {  	struct perf_counts_values count;  	struct stat_event *st = &event->stat; @@ -435,3 +434,98 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)  	return ret;  } + +int create_perf_stat_counter(struct perf_evsel *evsel, +			     struct perf_stat_config *config, +			     struct target *target) +{ +	struct perf_event_attr *attr = &evsel->attr; +	struct perf_evsel *leader = evsel->leader; + +	if (config->scale) { +		attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | +				    PERF_FORMAT_TOTAL_TIME_RUNNING; +	} + +	/* +	 * The event is part of non trivial group, let's enable +	 * the group read (for leader) and ID retrieval for all +	 * members. +	 */ +	if (leader->nr_members > 1) +		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; + +	attr->inherit = !config->no_inherit; + +	/* +	 * Some events get initialized with sample_(period/type) set, +	 * like tracepoints. Clear it up for counting. +	 */ +	attr->sample_period = 0; + +	if (config->identifier) +		attr->sample_type = PERF_SAMPLE_IDENTIFIER; + +	/* +	 * Disabling all counters initially, they will be enabled +	 * either manually by us or by kernel via enable_on_exec +	 * set later. +	 */ +	if (perf_evsel__is_group_leader(evsel)) { +		attr->disabled = 1; + +		/* +		 * In case of initial_delay we enable tracee +		 * events manually. +		 */ +		if (target__none(target) && !config->initial_delay) +			attr->enable_on_exec = 1; +	} + +	if (target__has_cpu(target) && !target__has_per_thread(target)) +		return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); + +	return perf_evsel__open_per_thread(evsel, evsel->threads); +} + +int perf_stat_synthesize_config(struct perf_stat_config *config, +				struct perf_tool *tool, +				struct perf_evlist *evlist, +				perf_event__handler_t process, +				bool attrs) +{ +	int err; + +	if (attrs) { +		err = perf_event__synthesize_attrs(tool, evlist, process); +		if (err < 0) { +			pr_err("Couldn't synthesize attrs.\n"); +			return err; +		} +	} + +	err = perf_event__synthesize_extra_attr(tool, evlist, process, +						attrs); + +	err = perf_event__synthesize_thread_map2(tool, evlist->threads, +						 process, NULL); +	if (err < 0) { +		pr_err("Couldn't synthesize thread map.\n"); +		return err; +	} + +	err = perf_event__synthesize_cpu_map(tool, evlist->cpus, +					     process, NULL); +	if (err < 0) { +		pr_err("Couldn't synthesize thread map.\n"); +		return err; +	} + +	err = perf_event__synthesize_stat_config(tool, config, process, NULL); +	if (err < 0) { +		pr_err("Couldn't synthesize config.\n"); +		return err; +	} + +	return 0; +}  | 
