diff options
| author | Damien George <damien@micropython.org> | 2022-05-19 15:02:33 +1000 |
|---|---|---|
| committer | Damien George <damien@micropython.org> | 2022-05-19 17:31:56 +1000 |
| commit | 54ab9d23e973bdd02326eba68ea89078429e5bce (patch) | |
| tree | 9b049404f8aca3fff8fa8c49633683cb2d5019fc /tests | |
| parent | 1786dacc839e5229298bedf23c3b4d8c53d43a5e (diff) | |
tests/run-perfbench.py: Allow running tests via mpy and native emitter.
The performance benchmark tests now support `--via-mpy` and `--emit native`
on remote targets. For example:
$ ./run-perfbench.py -p --via-mpy --emit native 100 100
Signed-off-by: Damien George <damien@micropython.org>
Diffstat (limited to 'tests')
| -rwxr-xr-x | tests/run-perfbench.py | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/tests/run-perfbench.py b/tests/run-perfbench.py index 05efa8331..22ad6308f 100755 --- a/tests/run-perfbench.py +++ b/tests/run-perfbench.py @@ -13,6 +13,8 @@ from glob import glob sys.path.append("../tools") import pyboard +prepare_script_for_target = __import__("run-tests").prepare_script_for_target + # Paths for host executables if os.name == "nt": CPYTHON3 = os.getenv("MICROPY_CPYTHON3", "python3.exe") @@ -85,7 +87,7 @@ def run_benchmark_on_target(target, script): return -1, -1, "CRASH: %r" % err -def run_benchmarks(target, param_n, param_m, n_average, test_list): +def run_benchmarks(args, target, param_n, param_m, n_average, test_list): skip_complex = run_feature_test(target, "complex") != "complex" skip_native = run_feature_test(target, "native_check") != "native" target_had_error = False @@ -116,13 +118,19 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list): with open("%s.full" % test_file, "wb") as f: f.write(test_script) + # Process script through mpy-cross if needed + if isinstance(target, pyboard.Pyboard) or args.via_mpy: + test_script_target = prepare_script_for_target(args, script_text=test_script) + else: + test_script_target = test_script + # Run MicroPython a given number of times times = [] scores = [] error = None result_out = None for _ in range(n_average): - time, norm, result = run_benchmark_on_target(target, test_script) + time, norm, result = run_benchmark_on_target(target, test_script_target) if time < 0 or norm < 0: error = result break @@ -247,6 +255,8 @@ def main(): cmd_parser.add_argument( "--emit", default="bytecode", help="MicroPython emitter to use (bytecode or native)" ) + cmd_parser.add_argument("--via-mpy", action="store_true", help="compile code to .mpy first") + cmd_parser.add_argument("--mpy-cross-flags", default="", help="flags to pass to mpy-cross") cmd_parser.add_argument("N", nargs=1, help="N parameter (approximate target CPU frequency)") cmd_parser.add_argument("M", nargs=1, help="M parameter (approximate target heap in kbytes)") cmd_parser.add_argument("files", nargs="*", help="input test files") @@ -264,6 +274,8 @@ def main(): n_average = int(args.average) if args.pyboard: + if not args.mpy_cross_flags: + args.mpy_cross_flags = "-march=armv7m" target = pyboard.Pyboard(args.device) target.enter_raw_repl() else: @@ -284,7 +296,7 @@ def main(): print("N={} M={} n_average={}".format(N, M, n_average)) - target_had_error = run_benchmarks(target, N, M, n_average, tests) + target_had_error = run_benchmarks(args, target, N, M, n_average, tests) if isinstance(target, pyboard.Pyboard): target.exit_raw_repl() |
