summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDamien George <damien@micropython.org>2025-04-28 11:15:32 +1000
committerDamien George <damien@micropython.org>2025-06-05 15:15:31 +1000
commit229104558fb7f9d283b7302bc3720bc35c5c49cf (patch)
tree4c16db4a1eff5becb175e252e3a4eb87d792a23f
parent4c55b0879b38b373b44e84552d6754b7842b5b72 (diff)
tests/run-tests.py: Automatically skip tests that are too large.
Some tests are just too big for targets that don't have much heap memory, eg `tests/extmod/vfs_rom.py`. Other tests are too large because the target doesn't have enough IRAM for native code, eg esp8266 running `tests/micropython/viper_args.py`. Previously, such tests were explicitly skipped on targets known to have little memory, eg esp8266. But this doesn't scale to multiple targets, nor to more and more tests which are too large. This commit addresses that by adding logic to the test runner so it can automatically skip tests when they don't fit in the target's memory. It does this by prepending a `print('START TEST')` to every test, and if a `MemoryError` occurs before that line is printed then the test was too big. This works for standard tests, tests that go via .mpy files, and tests that run in native emitter mode via .mpy files. For tests that are too big, it prints `lrge <test name>` on the output, and at the end prints them on a separate line of skipped tests so they can be distinguished. They are also distinguished in the `_result.json` file as a skipped test with reason "too large". Signed-off-by: Damien George <damien@micropython.org>
-rw-r--r--tests/misc/print_exception.py2
-rwxr-xr-xtests/run-tests.py53
2 files changed, 45 insertions, 10 deletions
diff --git a/tests/misc/print_exception.py b/tests/misc/print_exception.py
index 1d196d6ab..92754733b 100644
--- a/tests/misc/print_exception.py
+++ b/tests/misc/print_exception.py
@@ -1,3 +1,5 @@
+# Test sys.print_exception (MicroPython) / traceback.print_exception (CPython).
+
try:
import io
import sys
diff --git a/tests/run-tests.py b/tests/run-tests.py
index ecd611aa5..c012dc96f 100755
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -105,14 +105,11 @@ PC_PLATFORMS = ("darwin", "linux", "win32")
# These are tests that are difficult to detect that they should not be run on the given target.
platform_tests_to_skip = {
"esp8266": (
- "micropython/viper_args.py", # too large
- "micropython/viper_binop_arith.py", # too large
- "misc/rge_sm.py", # too large
+ "misc/rge_sm.py", # incorrect values due to object representation C
),
"minimal": (
"basics/class_inplace_op.py", # all special methods not supported
"basics/subclass_native_init.py", # native subclassing corner cases not support
- "misc/rge_sm.py", # too large
"micropython/opt_level.py", # don't assume line numbers are stored
),
"nrf": (
@@ -315,9 +312,21 @@ def prepare_script_for_target(args, *, script_filename=None, script_text=None, f
def run_script_on_remote_target(pyb, args, test_file, is_special):
- had_crash, script = prepare_script_for_target(
- args, script_filename=test_file, force_plain=is_special
- )
+ with open(test_file, "rb") as f:
+ script = f.read()
+
+ # If the test is not a special test, prepend it with a print to indicate that it started.
+ # If the print does not execute this means that the test did not even start, eg it was
+ # too large for the target.
+ prepend_start_test = not is_special
+ if prepend_start_test:
+ if script.startswith(b"#"):
+ script = b"print('START TEST')" + script
+ else:
+ script = b"print('START TEST')\n" + script
+
+ had_crash, script = prepare_script_for_target(args, script_text=script, force_plain=is_special)
+
if had_crash:
return True, script
@@ -328,9 +337,19 @@ def run_script_on_remote_target(pyb, args, test_file, is_special):
except pyboard.PyboardError as e:
had_crash = True
if not is_special and e.args[0] == "exception":
- output_mupy = e.args[1] + e.args[2] + b"CRASH"
+ if prepend_start_test and e.args[1] == b"" and b"MemoryError" in e.args[2]:
+ output_mupy = b"SKIP-TOO-LARGE\n"
+ else:
+ output_mupy = e.args[1] + e.args[2] + b"CRASH"
else:
output_mupy = bytes(e.args[0], "ascii") + b"\nCRASH"
+
+ if prepend_start_test:
+ if output_mupy.startswith(b"START TEST\r\n"):
+ output_mupy = output_mupy.removeprefix(b"START TEST\r\n")
+ else:
+ had_crash = True
+
return had_crash, output_mupy
@@ -474,7 +493,7 @@ def run_micropython(pyb, args, test_file, test_file_abspath, is_special=False):
output_mupy = output_mupy.replace(b"\r\n", b"\n")
# don't try to convert the output if we should skip this test
- if had_crash or output_mupy in (b"SKIP\n", b"CRASH"):
+ if had_crash or output_mupy in (b"SKIP\n", b"SKIP-TOO-LARGE\n", b"CRASH"):
return output_mupy
# skipped special tests will output "SKIP" surrounded by other interpreter debug output
@@ -911,6 +930,10 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
print("skip ", test_file)
test_results.append((test_name, test_file, "skip", ""))
return
+ elif output_mupy == b"SKIP-TOO-LARGE\n":
+ print("lrge ", test_file)
+ test_results.append((test_name, test_file, "skip", "too large"))
+ return
# Look at the output of the test to see if unittest was used.
uses_unittest = False
@@ -1035,7 +1058,10 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
test_results = test_results.value
passed_tests = list(r for r in test_results if r[2] == "pass")
- skipped_tests = list(r for r in test_results if r[2] == "skip")
+ skipped_tests = list(r for r in test_results if r[2] == "skip" and r[3] != "too large")
+ skipped_tests_too_large = list(
+ r for r in test_results if r[2] == "skip" and r[3] == "too large"
+ )
failed_tests = list(r for r in test_results if r[2] == "fail")
print(
@@ -1052,6 +1078,13 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
)
)
+ if len(skipped_tests_too_large) > 0:
+ print(
+ "{} tests skipped because they are too large: {}".format(
+ len(skipped_tests_too_large), " ".join(test[0] for test in skipped_tests_too_large)
+ )
+ )
+
if len(failed_tests) > 0:
print(
"{} tests failed: {}".format(