diff options
| author | Damien George <damien@micropython.org> | 2022-05-17 14:00:01 +1000 |
|---|---|---|
| committer | Damien George <damien@micropython.org> | 2022-05-17 14:06:41 +1000 |
| commit | 6f68a8c2402dfd6b38363e61998e046735c1ab67 (patch) | |
| tree | 8a3f2dbe68e3bbb446f427ff4ece4433313c76ce /tests | |
| parent | d7cf8a3b9d14d8e77062125eae8d99efbcbf7700 (diff) | |
tests/run-perfbench.py: Return error code if any test fails on target.
Signed-off-by: Damien George <damien@micropython.org>
Diffstat (limited to 'tests')
| -rwxr-xr-x | tests/run-perfbench.py | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/tests/run-perfbench.py b/tests/run-perfbench.py index c143ae32f..05efa8331 100755 --- a/tests/run-perfbench.py +++ b/tests/run-perfbench.py @@ -88,6 +88,7 @@ def run_benchmark_on_target(target, script): def run_benchmarks(target, param_n, param_m, n_average, test_list): skip_complex = run_feature_test(target, "complex") != "complex" skip_native = run_feature_test(target, "native_check") != "native" + target_had_error = False for test_file in sorted(test_list): print(test_file + ": ", end="") @@ -147,6 +148,8 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list): error = "FAIL truth" if error is not None: + if not error.startswith("SKIP"): + target_had_error = True print(error) else: t_avg, t_sd = compute_stats(times) @@ -162,6 +165,8 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list): sys.stdout.flush() + return target_had_error + def parse_output(filename): with open(filename) as f: @@ -279,12 +284,15 @@ def main(): print("N={} M={} n_average={}".format(N, M, n_average)) - run_benchmarks(target, N, M, n_average, tests) + target_had_error = run_benchmarks(target, N, M, n_average, tests) if isinstance(target, pyboard.Pyboard): target.exit_raw_repl() target.close() + if target_had_error: + sys.exit(1) + if __name__ == "__main__": main() |
