summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-21 10:51:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-21 10:51:08 -0800
commitc7decec2f2d2ab0366567f9e30c0e1418cece43f (patch)
tree50312739ad43d0655ea71c942d848db2ff123e8e /tools/perf/tests/shell
parent3544d5ce36f403db6e5c994f526101c870ffe9fe (diff)
parentdbf0108347bdb5d4ccef8910555b16c1f1a505f8 (diff)
Merge tag 'perf-tools-for-v7.0-1-2026-02-21' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools
Pull perf tools updates from Arnaldo Carvalho de Melo: - Introduce 'perf sched stats' tool with record/report/diff workflows using schedstat counters - Add a faster libdw based addr2line implementation and allow selecting it or its alternatives via 'perf config addr2line.style=' - Data-type profiling fixes and improvements including the ability to select fields using 'perf report''s -F/-fields, e.g.: 'perf report --fields overhead,type' - Add 'perf test' regression tests for Data-type profiling with C and Rust workloads - Fix srcline printing with inlines in callchains, make sure this has coverage in 'perf test' - Fix printing of leaf IP in LBR callchains - Fix display of metrics without sufficient permission in 'perf stat' - Print all machines in 'perf kvm report -vvv', not just the host - Switch from SHA-1 to BLAKE2s for build ID generation, remove SHA-1 code - Fix 'perf report's histogram entry collapsing with '-F' option - Use system's cacheline size instead of a hardcoded value in 'perf report' - Allow filtering conversion by time range in 'perf data' - Cover conversion to CTF using 'perf data' in 'perf test' - Address newer glibc const-correctness (-Werror=discarded-qualifiers) issues - Fixes and improvements for ARM's CoreSight support, simplify ARM SPE event config in 'perf mem', update docs for 'perf c2c' including the ARM events it can be used with - Build support for generating metrics from arch specific python script, add extra AMD, Intel, ARM64 metrics using it - Add AMD Zen 6 events and metrics - Add JSON file with OpenHW Risc-V CVA6 hardware counters - Add 'perf kvm' stats live testing - Add more 'perf stat' tests to 'perf test' - Fix segfault in `perf lock contention -b/--use-bpf` - Fix various 'perf test' cases for s390 - Build system cleanups, bump minimum shellcheck version to 0.7.2 - Support building the capstone based annotation routines as a plugin - Allow passing extra Clang flags via EXTRA_BPF_FLAGS * tag 'perf-tools-for-v7.0-1-2026-02-21' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools: (255 commits) perf test script: Add python script testing support perf test script: Add perl script testing support perf script: Allow the generated script to be a path perf test: perf data --to-ctf testing perf test: Test pipe mode with data conversion --to-json perf json: Pipe mode --to-ctf support perf json: Pipe mode --to-json support perf check: Add libbabeltrace to the listed features perf build: Allow passing extra Clang flags via EXTRA_BPF_FLAGS perf test data_type_profiling.sh: Skip just the Rust tests if code_with_type workload is missing tools build: Fix feature test for rust compiler perf libunwind: Fix calls to thread__e_machine() perf stat: Add no-affinity flag perf evlist: Reduce affinity use and move into iterator, fix no affinity perf evlist: Missing TPEBS close in evlist__close() perf evlist: Special map propagation for tool events that read on 1 CPU perf stat-shadow: In prepare_metric fix guard on reading NULL perf_stat_evsel Revert "perf tool_pmu: More accurately set the cpus for tool events" tools build: Emit dependencies file for test-rust.bin tools build: Make test-rust.bin be removed by the 'clean' target ...
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/addr2line_inlines.sh96
-rwxr-xr-xtools/perf/tests/shell/data_type_profiling.sh89
-rwxr-xr-xtools/perf/tests/shell/evlist.sh9
-rwxr-xr-xtools/perf/tests/shell/inject-callchain.sh45
-rwxr-xr-xtools/perf/tests/shell/kvm.sh30
-rwxr-xr-xtools/perf/tests/shell/perf_sched_stats.sh64
-rwxr-xr-xtools/perf/tests/shell/record.sh16
-rwxr-xr-xtools/perf/tests/shell/sched.sh2
-rwxr-xr-xtools/perf/tests/shell/script_dlfilter.sh10
-rwxr-xr-xtools/perf/tests/shell/script_perl.sh102
-rwxr-xr-xtools/perf/tests/shell/script_python.sh113
-rwxr-xr-xtools/perf/tests/shell/stat.sh244
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh26
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh29
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh54
-rwxr-xr-xtools/perf/tests/shell/test_java_symbol.sh4
-rwxr-xr-xtools/perf/tests/shell/test_perf_data_converter_ctf.sh104
-rwxr-xr-xtools/perf/tests/shell/test_perf_data_converter_json.sh33
18 files changed, 1031 insertions, 39 deletions
diff --git a/tools/perf/tests/shell/addr2line_inlines.sh b/tools/perf/tests/shell/addr2line_inlines.sh
new file mode 100755
index 000000000000..e8754ef2d7f2
--- /dev/null
+++ b/tools/perf/tests/shell/addr2line_inlines.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+# test addr2line inline unwinding
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+test_dir=$(mktemp -d /tmp/perf-test-inline-addr2line.XXXXXXXXXX)
+perf_data="${test_dir}/perf.data"
+perf_script_txt="${test_dir}/perf_script.txt"
+
+cleanup() {
+ rm -rf "${test_dir}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_fp() {
+ echo "Inline unwinding fp verification test"
+ # Record data. Currently only dwarf callchains support inlined functions.
+ perf record --call-graph fp -e task-clock:u -o "${perf_data}" -- perf test -w inlineloop 1
+
+ # Check output with inline (default) and srcline
+ perf script -i "${perf_data}" --fields +srcline > "${perf_script_txt}"
+
+ # Expect the leaf and middle functions to occur on lines in the 20s, with
+ # the non-inlined parent function on a line in the 30s.
+ if grep -q "inlineloop.c:2. (inlined)" "${perf_script_txt}" &&
+ grep -q "inlineloop.c:3.$" "${perf_script_txt}"
+ then
+ echo "Inline unwinding fp verification test [Success]"
+ else
+ echo "Inline unwinding fp verification test [Failed missing inlined functions]"
+ err=1
+ fi
+}
+
+test_dwarf() {
+ echo "Inline unwinding dwarf verification test"
+ # Record data. Currently only dwarf callchains support inlined functions.
+ perf record --call-graph dwarf -e task-clock:u -o "${perf_data}" -- perf test -w inlineloop 1
+
+ # Check output with inline (default) and srcline
+ perf script -i "${perf_data}" --fields +srcline > "${perf_script_txt}"
+
+ # Expect the leaf and middle functions to occur on lines in the 20s, with
+ # the non-inlined parent function on a line in the 30s.
+ if grep -q "inlineloop.c:2. (inlined)" "${perf_script_txt}" &&
+ grep -q "inlineloop.c:3.$" "${perf_script_txt}"
+ then
+ echo "Inline unwinding dwarf verification test [Success]"
+ else
+ echo "Inline unwinding dwarf verification test [Failed missing inlined functions]"
+ err=1
+ fi
+}
+
+test_lbr() {
+ echo "Inline unwinding LBR verification test"
+ if [ ! -f /sys/bus/event_source/devices/cpu/caps/branches ] &&
+ [ ! -f /sys/bus/event_source/devices/cpu_core/caps/branches ]
+ then
+ echo "Skip: only x86 CPUs support LBR"
+ return
+ fi
+
+ # Record data. Currently only dwarf callchains support inlined functions.
+ perf record --call-graph lbr -e cycles:u -o "${perf_data}" -- perf test -w inlineloop 1
+
+ # Check output with inline (default) and srcline
+ perf script -i "${perf_data}" --fields +srcline > "${perf_script_txt}"
+
+ # Expect the leaf and middle functions to occur on lines in the 20s, with
+ # the non-inlined parent function on a line in the 30s.
+ if grep -q "inlineloop.c:2. (inlined)" "${perf_script_txt}" &&
+ grep -q "inlineloop.c:3.$" "${perf_script_txt}"
+ then
+ echo "Inline unwinding lbr verification test [Success]"
+ else
+ echo "Inline unwinding lbr verification test [Failed missing inlined functions]"
+ err=1
+ fi
+}
+
+test_fp
+test_dwarf
+test_lbr
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/data_type_profiling.sh b/tools/perf/tests/shell/data_type_profiling.sh
new file mode 100755
index 000000000000..2a7f8f7c42d0
--- /dev/null
+++ b/tools/perf/tests/shell/data_type_profiling.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+# perf data type profiling tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# The logic below follows the same line as the annotate test, but looks for a
+# data type profiling manifestation
+
+# Values in testtypes and testprogs should match
+testtypes=("# data-type: struct Buf" "# data-type: struct _buf")
+testprogs=("perf test -w code_with_type" "perf test -w datasym")
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+perfout=$(mktemp /tmp/__perf_test.perf.out.XXXXX)
+
+cleanup() {
+ rm -rf "${perfdata}" "${perfout}"
+ rm -rf "${perfdata}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_basic_annotate() {
+ mode=$1
+ runtime=$2
+
+ echo "${mode} ${runtime} perf annotate test"
+
+ case "x${runtime}" in
+ "xRust")
+ if ! perf check feature -q rust
+ then
+ echo "Skip: code_with_type workload not built in 'perf test'"
+ return
+ fi
+ index=0 ;;
+
+ "xC")
+ index=1 ;;
+ esac
+
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf mem record -o "${perfdata}" ${testprogs[$index]} 2> /dev/null
+ else
+ perf mem record -o - ${testprogs[$index]} 2> /dev/null > "${perfdata}"
+ fi
+ if [ "x$?" != "x0" ]
+ then
+ echo "${mode} annotate [Failed: perf record]"
+ err=1
+ return
+ fi
+
+ # Generate the annotated output file
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf annotate --code-with-type -i "${perfdata}" --stdio --percent-limit 1 2> /dev/null > "${perfout}"
+ else
+ perf annotate --code-with-type -i - --stdio 2> /dev/null --percent-limit 1 < "${perfdata}" > "${perfout}"
+ fi
+
+ # check if it has the target data type
+ if ! grep -q "${testtypes[$index]}" "${perfout}"
+ then
+ echo "${mode} annotate [Failed: missing target data type]"
+ cat "${perfout}"
+ err=1
+ return
+ fi
+ echo "${mode} annotate test [Success]"
+}
+
+test_basic_annotate Basic Rust
+test_basic_annotate Pipe Rust
+test_basic_annotate Basic C
+test_basic_annotate Pipe C
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/evlist.sh b/tools/perf/tests/shell/evlist.sh
index 140f099e75c1..8a22f4171c07 100755
--- a/tools/perf/tests/shell/evlist.sh
+++ b/tools/perf/tests/shell/evlist.sh
@@ -21,13 +21,13 @@ trap trap_cleanup EXIT TERM INT
test_evlist_simple() {
echo "Simple evlist test"
- if ! perf record -e cycles -o "${perfdata}" true 2> /dev/null
+ if ! perf record -e cpu-clock -o "${perfdata}" true 2> /dev/null
then
echo "Simple evlist [Failed record]"
err=1
return
fi
- if ! perf evlist -i "${perfdata}" | grep -q "cycles"
+ if ! perf evlist -i "${perfdata}" | grep -q "cpu-clock"
then
echo "Simple evlist [Failed to list event]"
err=1
@@ -38,13 +38,14 @@ test_evlist_simple() {
test_evlist_group() {
echo "Group evlist test"
- if ! perf record -e "{cycles,instructions}" -o "${perfdata}" true 2> /dev/null
+ if ! perf record -e "{cpu-clock,task-clock}" -o "${perfdata}" \
+ -- perf test -w noploop 2> /dev/null
then
echo "Group evlist [Skipped event group recording failed]"
return
fi
- if ! perf evlist -i "${perfdata}" -g | grep -q "{.*cycles.*,.*instructions.*}"
+ if ! perf evlist -i "${perfdata}" -g | grep -q "{.*cpu-clock.*,.*task-clock.*}"
then
echo "Group evlist [Failed to list event group]"
err=1
diff --git a/tools/perf/tests/shell/inject-callchain.sh b/tools/perf/tests/shell/inject-callchain.sh
new file mode 100755
index 000000000000..a1cba8010f95
--- /dev/null
+++ b/tools/perf/tests/shell/inject-callchain.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# perf inject to convert DWARF callchains to regular ones
+# SPDX-License-Identifier: GPL-2.0
+
+if ! perf check feature -q dwarf; then
+ echo "SKIP: DWARF support is not available"
+ exit 2
+fi
+
+TESTDATA=$(mktemp /tmp/perf-test.XXXXXX)
+
+err=0
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ rm -f ${TESTDATA}*
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+echo "recording data with DWARF callchain"
+perf record -F 999 --call-graph dwarf -o "${TESTDATA}" -- perf test -w noploop
+
+echo "convert DWARF callchain using perf inject"
+perf inject -i "${TESTDATA}" --convert-callchain -o "${TESTDATA}.new"
+
+perf report -i "${TESTDATA}" --no-children -q --percent-limit=1 > ${TESTDATA}.out
+perf report -i "${TESTDATA}.new" --no-children -q --percent-limit=1 > ${TESTDATA}.new.out
+
+echo "compare the both result excluding inlined functions"
+if diff -u "${TESTDATA}.out" "${TESTDATA}.new.out" | grep "^- " | grep -qv "(inlined)"; then
+ echo "Found some differences"
+ diff -u "${TESTDATA}.out" "${TESTDATA}.new.out"
+ err=1
+fi
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/kvm.sh b/tools/perf/tests/shell/kvm.sh
index 2a399b83fe80..f88e859025c4 100755
--- a/tools/perf/tests/shell/kvm.sh
+++ b/tools/perf/tests/shell/kvm.sh
@@ -7,9 +7,10 @@ set -e
err=0
perfdata=$(mktemp /tmp/__perf_kvm_test.perf.data.XXXXX)
qemu_pid_file=$(mktemp /tmp/__perf_kvm_test.qemu.pid.XXXXX)
+log_file=$(mktemp /tmp/__perf_kvm_test.live_log.XXXXX)
cleanup() {
- rm -f "${perfdata}"
+ rm -f "${perfdata}" "${log_file}"
if [ -f "${qemu_pid_file}" ]; then
if [ -s "${qemu_pid_file}" ]; then
qemu_pid=$(cat "${qemu_pid_file}")
@@ -96,6 +97,32 @@ test_kvm_buildid_list() {
echo "perf kvm buildid-list test [Success]"
}
+test_kvm_stat_live() {
+ echo "Testing perf kvm stat live"
+
+ # Run perf kvm live for 5 seconds, monitoring that PID
+ # Use sleep to keep stdin open but silent, preventing EOF loop or interactive spam
+ if ! sleep 10 | timeout 5s perf kvm stat live -p "${qemu_pid}" > "${log_file}" 2>&1; then
+ retval=$?
+ if [ $retval -ne 124 ] && [ $retval -ne 0 ]; then
+ echo "perf kvm stat live [Failed: perf kvm stat live failed to start or run (ret=$retval)]"
+ head -n 50 "${log_file}"
+ err=1
+ return
+ fi
+ fi
+
+ # Check for some sample data (percentage)
+ if ! grep -E -q "[0-9]+\.[0-9]+%" "${log_file}"; then
+ echo "perf kvm stat live [Failed: no sample percentage found]"
+ head -n 50 "${log_file}"
+ err=1
+ return
+ fi
+
+ echo "perf kvm stat live test [Success]"
+}
+
setup_qemu() {
# Find qemu
if [ "$(uname -m)" = "x86_64" ]; then
@@ -148,6 +175,7 @@ if [ $err -eq 0 ]; then
test_kvm_stat
test_kvm_record_report
test_kvm_buildid_list
+ test_kvm_stat_live
fi
cleanup
diff --git a/tools/perf/tests/shell/perf_sched_stats.sh b/tools/perf/tests/shell/perf_sched_stats.sh
new file mode 100755
index 000000000000..2b1410b050d0
--- /dev/null
+++ b/tools/perf/tests/shell/perf_sched_stats.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+# perf sched stats tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+test_perf_sched_stats_record() {
+ echo "Basic perf sched stats record test"
+ if ! perf sched stats record true 2>&1 | \
+ grep -E -q "[ perf sched stats: Wrote samples to perf.data ]"
+ then
+ echo "Basic perf sched stats record test [Failed]"
+ err=1
+ return
+ fi
+ echo "Basic perf sched stats record test [Success]"
+}
+
+test_perf_sched_stats_report() {
+ echo "Basic perf sched stats report test"
+ perf sched stats record true > /dev/null
+ if ! perf sched stats report 2>&1 | grep -E -q "Description"
+ then
+ echo "Basic perf sched stats report test [Failed]"
+ err=1
+ rm perf.data
+ return
+ fi
+ rm perf.data
+ echo "Basic perf sched stats report test [Success]"
+}
+
+test_perf_sched_stats_live() {
+ echo "Basic perf sched stats live mode test"
+ if ! perf sched stats true 2>&1 | grep -E -q "Description"
+ then
+ echo "Basic perf sched stats live mode test [Failed]"
+ err=1
+ return
+ fi
+ echo "Basic perf sched stats live mode test [Success]"
+}
+
+test_perf_sched_stats_diff() {
+ echo "Basic perf sched stats diff test"
+ perf sched stats record true > /dev/null
+ perf sched stats record true > /dev/null
+ if ! perf sched stats diff > /dev/null
+ then
+ echo "Basic perf sched stats diff test [Failed]"
+ err=1
+ rm perf.data.old perf.data
+ return
+ fi
+ rm perf.data.old perf.data
+ echo "Basic perf sched stats diff test [Success]"
+}
+
+test_perf_sched_stats_record
+test_perf_sched_stats_report
+test_perf_sched_stats_live
+test_perf_sched_stats_diff
+exit $err
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 0f5841c479e7..7cb81cf3444a 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -260,7 +260,21 @@ test_uid() {
test_leader_sampling() {
echo "Basic leader sampling test"
- if ! perf record -o "${perfdata}" -e "{cycles,cycles}:Su" -- \
+ events="{cycles,cycles}:Su"
+ [ "$(uname -m)" = "s390x" ] && {
+ [ ! -d /sys/devices/cpum_sf ] && {
+ echo "No CPUMF [Skipped record]"
+ return
+ }
+ events="{cpum_sf/SF_CYCLES_BASIC/,cycles}:Su"
+ perf record -o "${perfdata}" -e "$events" -- perf test -w brstack 2> /dev/null
+ # Perf grouping might be unsupported, depends on version.
+ [ "$?" -ne 0 ] && {
+ echo "Grouping not support [Skipped record]"
+ return
+ }
+ }
+ if ! perf record -o "${perfdata}" -e "$events" -- \
perf test -w brstack 2> /dev/null
then
echo "Leader sampling [Failed record]"
diff --git a/tools/perf/tests/shell/sched.sh b/tools/perf/tests/shell/sched.sh
index b9b81eaf856e..b9637069adb1 100755
--- a/tools/perf/tests/shell/sched.sh
+++ b/tools/perf/tests/shell/sched.sh
@@ -53,7 +53,7 @@ start_noploops() {
}
cleanup_noploops() {
- kill "$PID1" "$PID2"
+ kill "$PID1" "$PID2" || true
}
test_sched_record() {
diff --git a/tools/perf/tests/shell/script_dlfilter.sh b/tools/perf/tests/shell/script_dlfilter.sh
index 45c97d4a7d5f..aaed92bb7828 100755
--- a/tools/perf/tests/shell/script_dlfilter.sh
+++ b/tools/perf/tests/shell/script_dlfilter.sh
@@ -68,17 +68,17 @@ test_dlfilter() {
fi
# Build the dlfilter
- if ! cc -c -I tools/perf/include -fpic -x c "${dlfilter_c}" -o "${dlfilter_so}.o"
+ if ! cc -c -I ${shelldir}/../../include -fpic -x c "${dlfilter_c}" -o "${dlfilter_so}.o"
then
- echo "Basic --dlfilter test [Failed to build dlfilter object]"
- err=1
+ echo "Basic --dlfilter test [Skip - failed to build dlfilter object]"
+ err=2
return
fi
if ! cc -shared -o "${dlfilter_so}" "${dlfilter_so}.o"
then
- echo "Basic --dlfilter test [Failed to link dlfilter shared object]"
- err=1
+ echo "Basic --dlfilter test [Skip - failed to link dlfilter shared object]"
+ err=2
return
fi
diff --git a/tools/perf/tests/shell/script_perl.sh b/tools/perf/tests/shell/script_perl.sh
new file mode 100755
index 000000000000..b6d65b6fbda1
--- /dev/null
+++ b/tools/perf/tests/shell/script_perl.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# perf script perl tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# set PERF_EXEC_PATH to find scripts in the source directory
+perfdir=$(dirname "$0")/../..
+if [ -e "$perfdir/scripts/perl/Perf-Trace-Util" ]; then
+ export PERF_EXEC_PATH=$perfdir
+fi
+
+
+perfdata=$(mktemp /tmp/__perf_test_script_perl.perf.data.XXXXX)
+generated_script=$(mktemp /tmp/__perf_test_script.XXXXX.pl)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${generated_script}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup TERM INT
+trap cleanup EXIT
+
+check_perl_support() {
+ if perf check feature -q libperl; then
+ return 0
+ fi
+ echo "perf script perl test [Skipped: no libperl support]"
+ return 2
+}
+
+test_script() {
+ local event_name=$1
+ local expected_output=$2
+ local record_opts=$3
+
+ echo "Testing event: $event_name"
+
+ # Try to record. If this fails, it might be permissions or lack of support.
+ # We return 2 to indicate "skip this event" rather than "fail test".
+ if ! perf record -o "${perfdata}" -e "$event_name" $record_opts -- perf test -w thloop > /dev/null 2>&1; then
+ echo "perf script perl test [Skipped: failed to record $event_name]"
+ return 2
+ fi
+
+ echo "Generating perl script..."
+ if ! perf script -i "${perfdata}" -g "${generated_script}"; then
+ echo "perf script perl test [Failed: script generation for $event_name]"
+ return 1
+ fi
+
+ if [ ! -f "${generated_script}" ]; then
+ echo "perf script perl test [Failed: script not generated for $event_name]"
+ return 1
+ fi
+
+ echo "Executing perl script..."
+ output=$(perf script -i "${perfdata}" -s "${generated_script}" 2>&1)
+
+ if echo "$output" | grep -q "$expected_output"; then
+ echo "perf script perl test [Success: $event_name triggered $expected_output]"
+ return 0
+ else
+ echo "perf script perl test [Failed: $event_name did not trigger $expected_output]"
+ echo "Output was:"
+ echo "$output" | head -n 20
+ return 1
+ fi
+}
+
+check_perl_support || exit 2
+
+# Try tracepoint first
+test_script "sched:sched_switch" "sched::sched_switch" "-c 1" && res=0 || res=$?
+
+if [ $res -eq 0 ]; then
+ exit 0
+elif [ $res -eq 1 ]; then
+ exit 1
+fi
+
+# If tracepoint skipped (res=2), try task-clock
+# For generic events like task-clock, the generated script uses process_event()
+# which dumps data using Data::Dumper. We check for "$VAR1" which is standard Dumper output.
+test_script "task-clock" "\$VAR1" "-c 100" && res=0 || res=$?
+
+if [ $res -eq 0 ]; then
+ exit 0
+elif [ $res -eq 1 ]; then
+ exit 1
+fi
+
+# If both skipped
+echo "perf script perl test [Skipped: Could not record tracepoint or task-clock]"
+exit 2
diff --git a/tools/perf/tests/shell/script_python.sh b/tools/perf/tests/shell/script_python.sh
new file mode 100755
index 000000000000..6bc66074a31f
--- /dev/null
+++ b/tools/perf/tests/shell/script_python.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+# perf script python tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# set PERF_EXEC_PATH to find scripts in the source directory
+perfdir=$(dirname "$0")/../..
+if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then
+ export PERF_EXEC_PATH=$perfdir
+fi
+
+
+perfdata=$(mktemp /tmp/__perf_test_script_python.perf.data.XXXXX)
+generated_script=$(mktemp /tmp/__perf_test_script.XXXXX.py)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${generated_script}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup TERM INT
+trap cleanup EXIT
+
+check_python_support() {
+ if perf check feature -q libpython; then
+ return 0
+ fi
+ echo "perf script python test [Skipped: no libpython support]"
+ return 2
+}
+
+test_script() {
+ local event_name=$1
+ local expected_output=$2
+ local record_opts=$3
+
+ echo "Testing event: $event_name"
+
+ # Try to record. If this fails, it might be permissions or lack of
+ # support. Return 2 to indicate "skip this event" rather than "fail
+ # test".
+ if ! perf record -o "${perfdata}" -e "$event_name" $record_opts -- perf test -w thloop > /dev/null 2>&1; then
+ echo "perf script python test [Skipped: failed to record $event_name]"
+ return 2
+ fi
+
+ echo "Generating python script..."
+ if ! perf script -i "${perfdata}" -g "${generated_script}"; then
+ echo "perf script python test [Failed: script generation for $event_name]"
+ return 1
+ fi
+
+ if [ ! -f "${generated_script}" ]; then
+ echo "perf script python test [Failed: script not generated for $event_name]"
+ return 1
+ fi
+
+ # Perf script -g python doesn't generate process_event for generic
+ # events so append it manually to test that the callback works.
+ if ! grep -q "def process_event" "${generated_script}"; then
+ cat <<EOF >> "${generated_script}"
+
+def process_event(param_dict):
+ print("param_dict: %s" % param_dict)
+EOF
+ fi
+
+ echo "Executing python script..."
+ output=$(perf script -i "${perfdata}" -s "${generated_script}" 2>&1)
+
+ if echo "$output" | grep -q "$expected_output"; then
+ echo "perf script python test [Success: $event_name triggered $expected_output]"
+ return 0
+ else
+ echo "perf script python test [Failed: $event_name did not trigger $expected_output]"
+ echo "Output was:"
+ echo "$output" | head -n 20
+ return 1
+ fi
+}
+
+check_python_support || exit 2
+
+# Try tracepoint first
+test_script "sched:sched_switch" "sched__sched_switch" "-c 1" && res=0 || res=$?
+
+if [ $res -eq 0 ]; then
+ exit 0
+elif [ $res -eq 1 ]; then
+ exit 1
+fi
+
+# If tracepoint skipped (res=2), try task-clock
+# For generic events like task-clock, the generated script uses process_event()
+# which prints the param_dict.
+test_script "task-clock" "param_dict" "-c 100" && res=0 || res=$?
+
+if [ $res -eq 0 ]; then
+ exit 0
+elif [ $res -eq 1 ]; then
+ exit 1
+fi
+
+# If both skipped
+echo "perf script python test [Skipped: Could not record tracepoint or task-clock]"
+exit 2
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 0b2f0f88ca16..4edb04039036 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -5,6 +5,21 @@
set -e
err=0
+stat_output=$(mktemp /tmp/perf-stat-test-output.XXXXX)
+
+cleanup() {
+ rm -f "${stat_output}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
test_default_stat() {
echo "Basic stat command test"
if ! perf stat true 2>&1 | grep -E -q "Performance counter stats for 'true':"
@@ -233,7 +248,7 @@ test_hybrid() {
fi
# Run default Perf stat
- cycles_events=$(perf stat -a -- sleep 0.1 2>&1 | grep -E "/cpu-cycles/[uH]*| cpu-cycles[:uH]* " -c)
+ cycles_events=$(perf stat -a -- sleep 0.1 2>&1 | grep -E "/cpu-cycles/[uH]*| cpu-cycles[:uH]* " | wc -l)
# The expectation is that default output will have a cycles events on each
# hybrid PMU. In situations with no cycles PMU events, like virtualized, this
@@ -248,6 +263,226 @@ test_hybrid() {
echo "hybrid test [Success]"
}
+test_stat_cpu() {
+ echo "stat -C <cpu> test"
+ # Test the full online CPU list (ranges and lists)
+ online_cpus=$(cat /sys/devices/system/cpu/online)
+ if ! perf stat -C "$online_cpus" -a true > "${stat_output}" 2>&1
+ then
+ echo "stat -C <cpu> test [Failed - command failed for cpus $online_cpus]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! grep -E -q "Performance counter stats for" "${stat_output}"
+ then
+ echo "stat -C <cpu> test [Failed - missing output for cpus $online_cpus]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ # Test each individual online CPU
+ for cpu_dir in /sys/devices/system/cpu/cpu[0-9]*; do
+ cpu=${cpu_dir##*/cpu}
+ # Check if online
+ if [ -f "$cpu_dir/online" ] && [ "$(cat "$cpu_dir/online")" -eq 0 ]
+ then
+ continue
+ fi
+
+ if ! perf stat -C "$cpu" -a true > "${stat_output}" 2>&1
+ then
+ echo "stat -C <cpu> test [Failed - command failed for cpu $cpu]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+ if ! grep -E -q "Performance counter stats for" "${stat_output}"
+ then
+ echo "stat -C <cpu> test [Failed - missing output for cpu $cpu]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+ done
+
+ # Test synthetic list and range if cpu0 and cpu1 are online
+ c0_online=0
+ c1_online=0
+ if [ -d "/sys/devices/system/cpu/cpu0" ]
+ then
+ if [ ! -f "/sys/devices/system/cpu/cpu0/online" ] || [ "$(cat /sys/devices/system/cpu/cpu0/online)" -eq 1 ]
+ then
+ c0_online=1
+ fi
+ fi
+ if [ -d "/sys/devices/system/cpu/cpu1" ]
+ then
+ if [ ! -f "/sys/devices/system/cpu/cpu1/online" ] || [ "$(cat /sys/devices/system/cpu/cpu1/online)" -eq 1 ]
+ then
+ c1_online=1
+ fi
+ fi
+
+ if [ $c0_online -eq 1 ] && [ $c1_online -eq 1 ]
+ then
+ # Test list "0,1"
+ if ! perf stat -C "0,1" -a true > "${stat_output}" 2>&1
+ then
+ echo "stat -C <cpu> test [Failed - command failed for cpus 0,1]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+ if ! grep -E -q "Performance counter stats for" "${stat_output}"
+ then
+ echo "stat -C <cpu> test [Failed - missing output for cpus 0,1]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ # Test range "0-1"
+ if ! perf stat -C "0-1" -a true > "${stat_output}" 2>&1
+ then
+ echo "stat -C <cpu> test [Failed - command failed for cpus 0-1]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+ if ! grep -E -q "Performance counter stats for" "${stat_output}"
+ then
+ echo "stat -C <cpu> test [Failed - missing output for cpus 0-1]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+ fi
+
+ echo "stat -C <cpu> test [Success]"
+}
+
+test_stat_no_aggr() {
+ echo "stat -A test"
+ if ! perf stat -A -a true > "${stat_output}" 2>&1
+ then
+ echo "stat -A test [Failed - command failed]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! grep -E -q "CPU" "${stat_output}"
+ then
+ echo "stat -A test [Failed - missing CPU column]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+ echo "stat -A test [Success]"
+}
+
+test_stat_detailed() {
+ echo "stat -d test"
+ if ! perf stat -d true > "${stat_output}" 2>&1
+ then
+ echo "stat -d test [Failed - command failed]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! grep -E -q "Performance counter stats" "${stat_output}"
+ then
+ echo "stat -d test [Failed - missing output]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! perf stat -dd true > "${stat_output}" 2>&1
+ then
+ echo "stat -dd test [Failed - command failed]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! grep -E -q "Performance counter stats" "${stat_output}"
+ then
+ echo "stat -dd test [Failed - missing output]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! perf stat -ddd true > "${stat_output}" 2>&1
+ then
+ echo "stat -ddd test [Failed - command failed]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! grep -E -q "Performance counter stats" "${stat_output}"
+ then
+ echo "stat -ddd test [Failed - missing output]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ echo "stat -d test [Success]"
+}
+
+test_stat_repeat() {
+ echo "stat -r test"
+ if ! perf stat -r 2 true > "${stat_output}" 2>&1
+ then
+ echo "stat -r test [Failed - command failed]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+
+ if ! grep -E -q "\([[:space:]]*\+-.*%[[:space:]]*\)" "${stat_output}"
+ then
+ echo "stat -r test [Failed - missing variance]"
+ cat "${stat_output}"
+ err=1
+ return
+ fi
+ echo "stat -r test [Success]"
+}
+
+test_stat_pid() {
+ echo "stat -p test"
+ sleep 1 &
+ pid=$!
+ if ! perf stat -p $pid > "${stat_output}" 2>&1
+ then
+ echo "stat -p test [Failed - command failed]"
+ cat "${stat_output}"
+ err=1
+ kill $pid 2>/dev/null || true
+ wait $pid 2>/dev/null || true
+ return
+ fi
+
+ if ! grep -E -q "Performance counter stats" "${stat_output}"
+ then
+ echo "stat -p test [Failed - missing output]"
+ cat "${stat_output}"
+ err=1
+ else
+ echo "stat -p test [Success]"
+ fi
+ kill $pid 2>/dev/null || true
+ wait $pid 2>/dev/null || true
+}
+
test_default_stat
test_null_stat
test_offline_cpu_stat
@@ -258,4 +493,11 @@ test_topdown_groups
test_topdown_weak_groups
test_cputype
test_hybrid
+test_stat_cpu
+test_stat_no_aggr
+test_stat_detailed
+test_stat_repeat
+test_stat_pid
+
+cleanup
exit $err
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
index 1400880ec01f..81bc7070b5ab 100755
--- a/tools/perf/tests/shell/stat_all_metricgroups.sh
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -12,31 +12,32 @@ if ParanoidAndNotRoot 0
then
system_wide_flag=""
fi
-err=0
+
+err=3
+skip=0
for m in $(perf list --raw-dump metricgroups)
do
echo "Testing $m"
result=$(perf stat -M "$m" $system_wide_flag sleep 0.01 2>&1)
result_err=$?
- if [[ $result_err -gt 0 ]]
+ if [[ $result_err -eq 0 ]]
then
+ if [[ "$err" -ne 1 ]]
+ then
+ err=0
+ fi
+ else
if [[ "$result" =~ \
"Access to performance monitoring and observability operations is limited" ]]
then
echo "Permission failure"
echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
+ skip=1
elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
then
echo "Permissions - need system wide mode"
echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
+ skip=1
elif [[ "$m" == @(Default2|Default3|Default4) ]]
then
echo "Ignoring failures in $m that may contain unsupported legacy events"
@@ -48,4 +49,9 @@ do
fi
done
+if [[ "$err" -eq 3 && "$skip" -eq 1 ]]
+then
+ err=2
+fi
+
exit $err
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index 3dabb39c7cc8..b582d23f28c9 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -15,7 +15,8 @@ then
test_prog="perf test -w noploop"
fi
-err=0
+skip=0
+err=3
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
result=$(perf stat -M "$m" $system_wide_flag -- $test_prog 2>&1)
@@ -23,6 +24,10 @@ for m in $(perf list --raw-dump metrics); do
if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
# No error result and metric shown.
+ if [[ "$err" -ne 1 ]]
+ then
+ err=0
+ fi
continue
fi
if [[ "$result" =~ "Cannot resolve IDs for" || "$result" =~ "No supported events found" ]]
@@ -44,7 +49,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
- err=2 # Skip
+ skip=1
fi
continue
elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
@@ -53,7 +58,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
- err=2 # Skip
+ skip=1
fi
continue
elif [[ "$result" =~ "<not supported>" ]]
@@ -68,7 +73,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
- err=2 # Skip
+ skip=1
fi
continue
elif [[ "$result" =~ "<not counted>" ]]
@@ -77,7 +82,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
- err=2 # Skip
+ skip=1
fi
continue
elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
@@ -86,7 +91,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
- err=2 # Skip
+ skip=1
fi
continue
elif [[ "$result" =~ "PMM" ]]
@@ -95,7 +100,7 @@ for m in $(perf list --raw-dump metrics); do
echo $result
if [[ $err -eq 0 ]]
then
- err=2 # Skip
+ skip=1
fi
continue
fi
@@ -106,6 +111,10 @@ for m in $(perf list --raw-dump metrics); do
if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
# No error result and metric shown.
+ if [[ "$err" -ne 1 ]]
+ then
+ err=0
+ fi
continue
fi
echo "[Failed $m] has non-zero error '$result_err' or not printed in:"
@@ -113,4 +122,10 @@ for m in $(perf list --raw-dump metrics); do
err=1
done
+# return SKIP only if no success returned
+if [[ "$err" -eq 3 && "$skip" -eq 1 ]]
+then
+ err=2
+fi
+
exit "$err"
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 1c750b67d141..bbf89e944e7b 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -198,6 +198,58 @@ arm_cs_etm_basic_test() {
arm_cs_report "CoreSight basic testing with '$*'" $err
}
+arm_cs_etm_test_cpu_list() {
+ echo "Testing sparse CPU list: $1"
+ perf record -o ${perfdata} -e cs_etm//u -C $1 \
+ -- taskset --cpu-list $1 true > /dev/null 2>&1
+ perf_script_branch_samples true
+ err=$?
+ arm_cs_report "CoreSight sparse CPUs with '$*'" $err
+}
+
+arm_cs_etm_sparse_cpus_test() {
+ # Iterate for every ETM device
+ cpus=()
+ for dev in /sys/bus/event_source/devices/cs_etm/cpu*; do
+ # Canonicalize the path
+ dev=`readlink -f $dev`
+
+ # Find the ETM device belonging to which CPU
+ cpus+=("$(cat $dev/cpu)")
+ done
+
+ mapfile -t cpus < <(printf '%s\n' "${cpus[@]}" | sort -n)
+ total=${#cpus[@]}
+
+ # Need more than 1 to test
+ if [ $total -le 1 ]; then
+ return 0
+ fi
+
+ half=$((total / 2))
+
+ # First half
+ first_half=$(IFS=,; echo "${cpus[*]:0:$half}")
+ arm_cs_etm_test_cpu_list $first_half
+
+ # Second half
+ second_half=$(IFS=,; echo "${cpus[*]:$half}")
+ arm_cs_etm_test_cpu_list $second_half
+
+ # Odd list is the same as halves unless >= 4 CPUs
+ if [ $total -lt 4 ]; then
+ return 0
+ fi
+
+ # Odd indices
+ odd_cpus=()
+ for ((i=1; i<total; i+=2)); do
+ odd_cpus+=("${cpus[$i]}")
+ done
+ odd_list=$(IFS=,; echo "${odd_cpus[*]}")
+ arm_cs_etm_test_cpu_list $odd_list
+}
+
arm_cs_etm_traverse_path_test
arm_cs_etm_system_wide_test
arm_cs_etm_snapshot_test
@@ -211,4 +263,6 @@ arm_cs_etm_basic_test -e cs_etm/timestamp=1/ -a
arm_cs_etm_basic_test -e cs_etm/timestamp=0/
arm_cs_etm_basic_test -e cs_etm/timestamp=1/
+arm_cs_etm_sparse_cpus_test
+
exit $glb_err
diff --git a/tools/perf/tests/shell/test_java_symbol.sh b/tools/perf/tests/shell/test_java_symbol.sh
index 499539d1c479..63a2cc9bf13f 100755
--- a/tools/perf/tests/shell/test_java_symbol.sh
+++ b/tools/perf/tests/shell/test_java_symbol.sh
@@ -22,10 +22,13 @@ cleanup_files()
trap cleanup_files exit term int
+PERF_DIR=$(dirname "$(which perf)")
if [ -e "$PWD/tools/perf/libperf-jvmti.so" ]; then
LIBJVMTI=$PWD/tools/perf/libperf-jvmti.so
elif [ -e "$PWD/libperf-jvmti.so" ]; then
LIBJVMTI=$PWD/libperf-jvmti.so
+elif [ -e "$PERF_DIR/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PERF_DIR/libperf-jvmti.so
elif [ -e "$PREFIX/lib64/libperf-jvmti.so" ]; then
LIBJVMTI=$PREFIX/lib64/libperf-jvmti.so
elif [ -e "$PREFIX/lib/libperf-jvmti.so" ]; then
@@ -34,6 +37,7 @@ elif [ -e "/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-gen
LIBJVMTI=/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-generic//')/libperf-jvmti.so
else
echo "Fail to find libperf-jvmti.so"
+
# JVMTI is a build option, skip the test if fail to find lib
exit 2
fi
diff --git a/tools/perf/tests/shell/test_perf_data_converter_ctf.sh b/tools/perf/tests/shell/test_perf_data_converter_ctf.sh
new file mode 100755
index 000000000000..334eebc9945e
--- /dev/null
+++ b/tools/perf/tests/shell/test_perf_data_converter_ctf.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+# 'perf data convert --to-ctf' command test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ctf_dir=$(mktemp -d /tmp/__perf_test.ctf.XXXXX)
+
+cleanup()
+{
+ rm -f "${perfdata}"
+ rm -rf "${ctf_dir}"
+ trap - exit term int
+}
+
+trap_cleanup()
+{
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit ${err}
+}
+trap trap_cleanup exit term int
+
+check_babeltrace_support()
+{
+ if ! perf check feature libbabeltrace
+ then
+ echo "perf not linked with libbabeltrace, skipping test"
+ exit 2
+ fi
+}
+
+test_ctf_converter_file()
+{
+ echo "Testing Perf Data Conversion Command to CTF (File input)"
+ # Record some data
+ if ! perf record -o "$perfdata" -F 99 -g -- perf test -w noploop
+ then
+ echo "Failed to record perf data"
+ err=1
+ return
+ fi
+
+ # Cleanup previous ctf dir
+ rm -rf "${ctf_dir}"
+
+ # Convert
+ if ! perf data convert --to-ctf "$ctf_dir" --force -i "$perfdata"
+ then
+ echo "Perf Data Converter Command to CTF (File input) [FAILED]"
+ err=1
+ return
+ fi
+
+ if [ -d "${ctf_dir}" ] && [ "$(ls -A "${ctf_dir}")" ]
+ then
+ echo "Perf Data Converter Command to CTF (File input) [SUCCESS]"
+ else
+ echo "Perf Data Converter Command to CTF (File input) [FAILED]"
+ echo " Output directory empty or missing"
+ err=1
+ fi
+}
+
+test_ctf_converter_pipe()
+{
+ echo "Testing Perf Data Conversion Command to CTF (Pipe mode)"
+
+ # Cleanup previous ctf dir
+ rm -rf "${ctf_dir}"
+
+ # Record to stdout and pipe to $perfdata file
+ if ! perf record -o - -F 99 -g -- perf test -w noploop > "$perfdata"
+ then
+ echo "Failed to record perf data"
+ err=1
+ return
+ fi
+
+ if ! perf data convert --to-ctf "$ctf_dir" --force -i "$perfdata"
+ then
+ echo "Perf Data Converter Command to CTF (Pipe mode) [FAILED]"
+ err=1
+ return
+ fi
+
+ if [ -d "${ctf_dir}" ] && [ "$(ls -A "${ctf_dir}")" ]
+ then
+ echo "Perf Data Converter Command to CTF (Pipe mode) [SUCCESS]"
+ else
+ echo "Perf Data Converter Command to CTF (Pipe mode) [FAILED]"
+ echo " Output directory empty or missing"
+ err=1
+ fi
+}
+
+check_babeltrace_support
+test_ctf_converter_file
+test_ctf_converter_pipe
+
+exit ${err}
diff --git a/tools/perf/tests/shell/test_perf_data_converter_json.sh b/tools/perf/tests/shell/test_perf_data_converter_json.sh
index c4f1b59d116f..35d81e39a26c 100755
--- a/tools/perf/tests/shell/test_perf_data_converter_json.sh
+++ b/tools/perf/tests/shell/test_perf_data_converter_json.sh
@@ -15,29 +15,42 @@ result=$(mktemp /tmp/__perf_test.output.json.XXXXX)
cleanup()
{
- rm -f "${perfdata}"
+ rm -f "${perfdata}*"
rm -f "${result}"
trap - exit term int
}
trap_cleanup()
{
+ echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
- exit ${err}
+ exit 1
}
trap trap_cleanup exit term int
test_json_converter_command()
{
- echo "Testing Perf Data Convertion Command to JSON"
- perf record -o "$perfdata" -F 99 -g -- perf test -w noploop > /dev/null 2>&1
- perf data convert --to-json "$result" --force -i "$perfdata" >/dev/null 2>&1
+ echo "Testing Perf Data Conversion Command to JSON"
+ perf record -o "$perfdata" -F 99 -g -- perf test -w noploop
+ perf data convert --to-json "$result" --force -i "$perfdata"
if [ "$(cat ${result} | wc -l)" -gt "0" ] ; then
echo "Perf Data Converter Command to JSON [SUCCESS]"
else
echo "Perf Data Converter Command to JSON [FAILED]"
err=1
- exit
+ fi
+}
+
+test_json_converter_pipe()
+{
+ echo "Testing Perf Data Conversion Command to JSON (Pipe mode)"
+ perf record -o - -F 99 -g -- perf test -w noploop > "$perfdata"
+ cat "$perfdata" | perf data convert --to-json "$result" --force -i -
+ if [ "$(cat ${result} | wc -l)" -gt "0" ] ; then
+ echo "Perf Data Converter Command to JSON (Pipe mode) [SUCCESS]"
+ else
+ echo "Perf Data Converter Command to JSON (Pipe mode) [FAILED]"
+ err=1
fi
}
@@ -50,16 +63,18 @@ validate_json_format()
else
echo "The file does not contain valid JSON format [FAILED]"
err=1
- exit
fi
else
echo "File not found [FAILED]"
- err=2
- exit
+ err=1
fi
}
test_json_converter_command
validate_json_format
+test_json_converter_pipe
+validate_json_format
+
+cleanup
exit ${err}