summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xtests/run-perfbench.py10
1 files changed, 9 insertions, 1 deletions
diff --git a/tests/run-perfbench.py b/tests/run-perfbench.py
index c143ae32f..05efa8331 100755
--- a/tests/run-perfbench.py
+++ b/tests/run-perfbench.py
@@ -88,6 +88,7 @@ def run_benchmark_on_target(target, script):
def run_benchmarks(target, param_n, param_m, n_average, test_list):
skip_complex = run_feature_test(target, "complex") != "complex"
skip_native = run_feature_test(target, "native_check") != "native"
+ target_had_error = False
for test_file in sorted(test_list):
print(test_file + ": ", end="")
@@ -147,6 +148,8 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list):
error = "FAIL truth"
if error is not None:
+ if not error.startswith("SKIP"):
+ target_had_error = True
print(error)
else:
t_avg, t_sd = compute_stats(times)
@@ -162,6 +165,8 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list):
sys.stdout.flush()
+ return target_had_error
+
def parse_output(filename):
with open(filename) as f:
@@ -279,12 +284,15 @@ def main():
print("N={} M={} n_average={}".format(N, M, n_average))
- run_benchmarks(target, N, M, n_average, tests)
+ target_had_error = run_benchmarks(target, N, M, n_average, tests)
if isinstance(target, pyboard.Pyboard):
target.exit_raw_repl()
target.close()
+ if target_had_error:
+ sys.exit(1)
+
if __name__ == "__main__":
main()