summaryrefslogtreecommitdiff
path: root/tests/run-perfbench.py
diff options
context:
space:
mode:
authorDamien George <damien@micropython.org>2022-02-10 14:52:51 +1100
committerDamien George <damien@micropython.org>2022-02-10 15:25:33 +1100
commitb33fdbe5357ae224d668b827958fbbd04b507540 (patch)
treeae5e1a1f8774db5f322349233d17a305349eb36b /tests/run-perfbench.py
parentd8a7bf83ccf28d0e8acf9790a1fc38aa5d13a2e5 (diff)
tests/run-perfbench.py: Allow a test to SKIP, and to have a .exp file.
Signed-off-by: Damien George <damien@micropython.org>
Diffstat (limited to 'tests/run-perfbench.py')
-rwxr-xr-xtests/run-perfbench.py11
1 files changed, 10 insertions, 1 deletions
diff --git a/tests/run-perfbench.py b/tests/run-perfbench.py
index 5f299281f..fccb7a768 100755
--- a/tests/run-perfbench.py
+++ b/tests/run-perfbench.py
@@ -74,6 +74,8 @@ def run_feature_test(target, test):
def run_benchmark_on_target(target, script):
output, err = run_script_on_target(target, script)
if err is None:
+ if output == "SKIP":
+ return -1, -1, "SKIP"
time, norm, result = output.split(None, 2)
try:
return int(time), int(norm), result
@@ -133,7 +135,14 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list):
# Check result against truth if needed
if error is None and result_out != "None":
- _, _, result_exp = run_benchmark_on_target(PYTHON_TRUTH, test_script)
+ test_file_expected = test_file + ".exp"
+ if os.path.isfile(test_file_expected):
+ # Expected result is given by a file, so read that in
+ with open(test_file_expected) as f:
+ result_exp = f.read().strip()
+ else:
+ # Run CPython to work out the expected result
+ _, _, result_exp = run_benchmark_on_target(PYTHON_TRUTH, test_script)
if result_out != result_exp:
error = "FAIL truth"