summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/README.md35
-rwxr-xr-xtests/run-tests.py141
2 files changed, 132 insertions, 44 deletions
diff --git a/tests/README.md b/tests/README.md
index b39833aa0..28c1b3a08 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -15,6 +15,32 @@ That will run tests on the `/dev/ttyACM0` serial port. You can also use shortcu
device names like `a<n>` for `/dev/ttyACM<n>` and `c<n>` for `COM<n>`. Use
`./run-tests.py --help` to see all of the device possibilites, and other options.
+There are three kinds of tests:
+
+* Tests that use `unittest`: these tests require `unittest` to be installed on the
+ target (eg via `mpremote mip install unittest`), and are used to test things that are
+ MicroPython-specific, such as behaviour that is different to CPython, modules that
+ aren't available in CPython, and hardware tests. These tests are run only under
+ MicroPython and the test passes if the `unittest` runner prints "OK" at the end of the
+ run. Other output may be printed, eg for use as diagnostics, and this output does not
+ affect the result of the test.
+
+* Tests with a corresponding `.exp` file: similar to the `unittest` tests, these tests
+ are for features that generally cannot be run under CPython. In this case the test is
+ run under MicroPython only and the output from MicroPython is compared against the
+ provided `.exp` file. The test passes if the output matches exactly.
+
+* Tests without a corresponding `.exp` file (and don't use `unittest`): these tests are
+ used to test MicroPython behaviour that should precisely match CPython. These tests
+ are first run under CPython and the output captured, and then run under MicroPython
+ and the output compared to the CPython output. The test passes if the output matches
+ exactly. If the output differs then the test fails and the outputs are saved in a
+ `.exp` and a `.out` file respectively.
+
+In all three cases above, the test can usually be run directly on the target MicroPython
+instance, either using the unix port with `micropython <test.py>`, or on a board with
+`mpremote run <test.py>`. This is useful for creating and debugging tests.
+
Tests of capabilities not supported on all platforms should be written
to check for the capability being present. If it is not, the test
should merely output 'SKIP' followed by the line terminator, and call
@@ -27,15 +53,6 @@ condition a test. The run-tests.py script uses small scripts in the
feature_check directory to check whether each such feature is present,
and skips the relevant tests if not.
-Tests are generally verified by running the test both in MicroPython and
-in CPython and comparing the outputs. If the output differs the test fails
-and the outputs are saved in a .out and a .exp file respectively.
-For tests that cannot be run in CPython, for example because they use
-the machine module, a .exp file can be provided next to the test's .py
-file. A convenient way to generate that is to run the test, let it fail
-(because CPython cannot run it) and then copy the .out file (but not
-before checking it manually!)
-
When creating new tests, anything that relies on float support should go in the
float/ subdirectory. Anything that relies on import x, where x is not a built-in
module, should go in the import/ subdirectory.
diff --git a/tests/run-tests.py b/tests/run-tests.py
index d0feb4bcd..0e491ccd6 100755
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -508,6 +508,10 @@ def run_feature_check(pyb, args, test_file):
return run_micropython(pyb, args, test_file_path, test_file_path, is_special=True)
+class TestError(Exception):
+ pass
+
+
class ThreadSafeCounter:
def __init__(self, start=0):
self._value = start
@@ -862,29 +866,10 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
skipped_tests.append(test_name)
return
- # get expected output
- test_file_expected = test_file + ".exp"
- if os.path.isfile(test_file_expected):
- # expected output given by a file, so read that in
- with open(test_file_expected, "rb") as f:
- output_expected = f.read()
- else:
- # run CPython to work out expected output
- try:
- output_expected = subprocess.check_output(
- CPYTHON3_CMD + [test_file_abspath],
- cwd=os.path.dirname(test_file),
- stderr=subprocess.STDOUT,
- )
- except subprocess.CalledProcessError:
- output_expected = b"CPYTHON3 CRASH"
-
- # canonical form for all host platforms is to use \n for end-of-line
- output_expected = output_expected.replace(b"\r\n", b"\n")
-
- # run MicroPython
+ # Run the test on the MicroPython target.
output_mupy = run_micropython(pyb, args, test_file, test_file_abspath)
+ # Check if the target requested to skip this test.
if output_mupy == b"SKIP\n":
if pyb is not None and hasattr(pyb, "read_until"):
# Running on a target over a serial connection, and the target requested
@@ -896,22 +881,96 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
skipped_tests.append(test_name)
return
- testcase_count.add(len(output_expected.splitlines()))
+ # Look at the output of the test to see if unittest was used.
+ uses_unittest = False
+ output_mupy_lines = output_mupy.splitlines()
+ if any(
+ line == b"ImportError: no module named 'unittest'" for line in output_mupy_lines[-3:]
+ ):
+ raise TestError(
+ (
+ "error: test {} requires unittest".format(test_file),
+ "(eg run `mpremote mip install unittest` to install it)",
+ )
+ )
+ elif (
+ len(output_mupy_lines) > 4
+ and output_mupy_lines[-4] == b"-" * 70
+ and output_mupy_lines[-2] == b""
+ ):
+ # look for unittest summary
+ unittest_ran_match = re.match(rb"Ran (\d+) tests$", output_mupy_lines[-3])
+ unittest_result_match = re.match(
+ b"("
+ rb"(OK)( \(skipped=(\d+)\))?"
+ b"|"
+ rb"(FAILED) \(failures=(\d+), errors=(\d+)\)"
+ b")$",
+ output_mupy_lines[-1],
+ )
+ uses_unittest = unittest_ran_match and unittest_result_match
+
+ # Determine the expected output.
+ if uses_unittest:
+ # Expected output is result of running unittest.
+ output_expected = None
+ else:
+ test_file_expected = test_file + ".exp"
+ if os.path.isfile(test_file_expected):
+ # Expected output given by a file, so read that in.
+ with open(test_file_expected, "rb") as f:
+ output_expected = f.read()
+ else:
+ # Run CPython to work out expected output.
+ try:
+ output_expected = subprocess.check_output(
+ CPYTHON3_CMD + [test_file_abspath],
+ cwd=os.path.dirname(test_file),
+ stderr=subprocess.STDOUT,
+ )
+ except subprocess.CalledProcessError:
+ output_expected = b"CPYTHON3 CRASH"
+
+ # Canonical form for all host platforms is to use \n for end-of-line.
+ output_expected = output_expected.replace(b"\r\n", b"\n")
+
+ # Work out if test passed or not.
+ test_passed = False
+ extra_info = ""
+ if uses_unittest:
+ test_passed = unittest_result_match.group(2) == b"OK"
+ num_test_cases = int(unittest_ran_match.group(1))
+ extra_info = "unittest: {} ran".format(num_test_cases)
+ if test_passed and unittest_result_match.group(4) is not None:
+ num_skipped = int(unittest_result_match.group(4))
+ num_test_cases -= num_skipped
+ extra_info += ", {} skipped".format(num_skipped)
+ elif not test_passed:
+ num_failures = int(unittest_result_match.group(6))
+ num_errors = int(unittest_result_match.group(7))
+ extra_info += ", {} failures, {} errors".format(num_failures, num_errors)
+ extra_info = "(" + extra_info + ")"
+ testcase_count.add(num_test_cases)
+ else:
+ testcase_count.add(len(output_expected.splitlines()))
+ test_passed = output_expected == output_mupy
filename_expected = os.path.join(result_dir, test_basename + ".exp")
filename_mupy = os.path.join(result_dir, test_basename + ".out")
- if output_expected == output_mupy:
- print("pass ", test_file)
+ # Print test summary, update counters, and save .exp/.out files if needed.
+ if test_passed:
+ print("pass ", test_file, extra_info)
passed_count.increment()
rm_f(filename_expected)
rm_f(filename_mupy)
else:
- with open(filename_expected, "wb") as f:
- f.write(output_expected)
+ print("FAIL ", test_file, extra_info)
+ if output_expected is not None:
+ with open(filename_expected, "wb") as f:
+ f.write(output_expected)
with open(filename_mupy, "wb") as f:
f.write(output_mupy)
- print("FAIL ", test_file)
failed_tests.append((test_name, test_file))
test_count.increment()
@@ -919,12 +978,17 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
if pyb:
num_threads = 1
- if num_threads > 1:
- pool = ThreadPool(num_threads)
- pool.map(run_one_test, tests)
- else:
- for test in tests:
- run_one_test(test)
+ try:
+ if num_threads > 1:
+ pool = ThreadPool(num_threads)
+ pool.map(run_one_test, tests)
+ else:
+ for test in tests:
+ run_one_test(test)
+ except TestError as er:
+ for line in er.args[0]:
+ print(line)
+ sys.exit(1)
print(
"{} tests performed ({} individual testcases)".format(
@@ -1189,8 +1253,15 @@ the last matching regex is used:
tests = args.files
if not args.keep_path:
- # clear search path to make sure tests use only builtin modules and those in extmod
- os.environ["MICROPYPATH"] = ".frozen" + os.pathsep + base_path("../extmod")
+ # Clear search path to make sure tests use only builtin modules, those in
+ # extmod, and a path to unittest in case it's needed.
+ os.environ["MICROPYPATH"] = (
+ ".frozen"
+ + os.pathsep
+ + base_path("../extmod")
+ + os.pathsep
+ + base_path("../lib/micropython-lib/python-stdlib/unittest")
+ )
try:
os.makedirs(args.result_dir, exist_ok=True)