summaryrefslogtreecommitdiff
path: root/tests/run-bench-tests
diff options
context:
space:
mode:
authorPaul Sokolovsky <pfalcon@users.sourceforge.net>2014-05-05 01:24:16 +0300
committerPaul Sokolovsky <pfalcon@users.sourceforge.net>2014-05-05 01:24:16 +0300
commitaaff82afe5a72ec69e05f1e56047d0acfde91d0e (patch)
tree7bd8dc3cf156b790b66c05b2222bd362bdbce3d8 /tests/run-bench-tests
parent22a0d67c0fc7daf18280d3b7e938be8442102110 (diff)
tests: Add framework for comparative benchmarking.
Motivation is optimizing handling of various constructs as well as understanding which constructs are more efficient in MicroPython. More info: http://forum.micropython.org/viewtopic.php?f=3&t=77 Results are wildly unexpected. For example, "optimization" of range iteration into while loop makes it twice as slow. Generally, the more bytecodes, the slower the code.
Diffstat (limited to 'tests/run-bench-tests')
-rwxr-xr-xtests/run-bench-tests97
1 files changed, 97 insertions, 0 deletions
diff --git a/tests/run-bench-tests b/tests/run-bench-tests
new file mode 100755
index 000000000..59074bb87
--- /dev/null
+++ b/tests/run-bench-tests
@@ -0,0 +1,97 @@
+#! /usr/bin/env python3
+
+import os
+import subprocess
+import sys
+import argparse
+import re
+from glob import glob
+from collections import defaultdict
+
+# Tests require at least CPython 3.3. If your default python3 executable
+# is of lower version, you can point MICROPY_CPYTHON3 environment var
+# to the correct executable.
+if os.name == 'nt':
+ CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3.exe')
+ MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../windows/micropython.exe')
+else:
+ CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
+ MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../unix/micropython')
+
+def run_tests(pyb, test_dict):
+ test_count = 0
+ testcase_count = 0
+
+ for base_test, tests in test_dict.items():
+ print(base_test + ":")
+ for test_file in tests:
+
+ # run Micro Python
+ if pyb is None:
+ # run on PC
+ try:
+ output_mupy = subprocess.check_output([MICROPYTHON, '-X', 'emit=bytecode', test_file[0]])
+ except subprocess.CalledProcessError:
+ output_mupy = b'CRASH'
+ else:
+ # run on pyboard
+ pyb.enter_raw_repl()
+ try:
+ output_mupy = pyb.execfile(test_file).replace(b'\r\n', b'\n')
+ except pyboard.PyboardError:
+ output_mupy = b'CRASH'
+
+ output_mupy = float(output_mupy.strip())
+ test_file[1] = output_mupy
+ testcase_count += 1
+
+ test_count += 1
+ baseline = None
+ for t in tests:
+ if baseline is None:
+ baseline = t[1]
+ print(" %.3fs (%+06.2f%%) %s" % (t[1], (t[1] * 100 / baseline) - 100, t[0]))
+
+ print("{} tests performed ({} individual testcases)".format(test_count, testcase_count))
+
+ # all tests succeeded
+ return True
+
+def main():
+ cmd_parser = argparse.ArgumentParser(description='Run tests for Micro Python.')
+ cmd_parser.add_argument('--pyboard', action='store_true', help='run the tests on the pyboard')
+ cmd_parser.add_argument('files', nargs='*', help='input test files')
+ args = cmd_parser.parse_args()
+
+ # Note pyboard support is copied over from run-tests, not testes, and likely needs revamping
+ if args.pyboard:
+ import pyboard
+ pyb = pyboard.Pyboard('/dev/ttyACM0')
+ pyb.enter_raw_repl()
+ else:
+ pyb = None
+
+ if len(args.files) == 0:
+ if pyb is None:
+ # run PC tests
+ test_dirs = ('bench',)
+ else:
+ # run pyboard tests
+ test_dirs = ('basics', 'float', 'pyb')
+ tests = sorted(test_file for test_files in (glob('{}/*.py'.format(dir)) for dir in test_dirs) for test_file in test_files)
+ else:
+ # tests explicitly given
+ tests = sorted(args.files)
+
+ test_dict = defaultdict(lambda: [])
+ for t in tests:
+ m = re.match(r"(.+?)-(.+)\.py", t)
+ if not m:
+ continue
+ test_dict[m.group(1)].append([t, None])
+
+ if not run_tests(pyb, test_dict):
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()