summaryrefslogtreecommitdiff
path: root/tests/run-multitests.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/run-multitests.py')
-rwxr-xr-xtests/run-multitests.py19
1 files changed, 16 insertions, 3 deletions
diff --git a/tests/run-multitests.py b/tests/run-multitests.py
index b316ce69f..81db31ea9 100755
--- a/tests/run-multitests.py
+++ b/tests/run-multitests.py
@@ -93,6 +93,9 @@ class multitest:
@staticmethod
def expect_reboot(resume, delay_ms=0):
print("WAIT_FOR_REBOOT", resume, delay_ms)
+ @staticmethod
+ def output_metric(data):
+ print("OUTPUT_METRIC", data)
{}
@@ -312,6 +315,7 @@ def run_test_on_instances(test_file, num_instances, instances):
skip = False
injected_globals = ""
output = [[] for _ in range(num_instances)]
+ output_metrics = []
# If the test calls get_network_ip() then inject HOST_IP so that devices can know
# the IP address of the host. Do this lazily to not require a TCP/IP connection
@@ -400,6 +404,8 @@ def run_test_on_instances(test_file, num_instances, instances):
for instance2 in instances:
if instance2 is not instance:
instance2.write(bytes(out, "ascii") + b"\r\n")
+ elif out.startswith("OUTPUT_METRIC "):
+ output_metrics.append(out.split(" ", 1)[1])
else:
output[idx].append(out)
if err is not None:
@@ -421,7 +427,7 @@ def run_test_on_instances(test_file, num_instances, instances):
output_str += "--- instance{} ---\n".format(idx)
output_str += "\n".join(lines) + "\n"
- return error, skip, output_str
+ return error, skip, output_str, output_metrics
def wait_for_reboot(instance, extra_timeout_ms=0):
@@ -481,7 +487,9 @@ def run_tests(test_files, instances_truth, instances_test):
sys.stdout.flush()
# Run test on test instances
- error, skip, output_test = run_test_on_instances(test_file, num_instances, instances_test)
+ error, skip, output_test, output_metrics = run_test_on_instances(
+ test_file, num_instances, instances_test
+ )
if not skip:
# Check if truth exists in a file, and read it in
@@ -491,7 +499,7 @@ def run_tests(test_files, instances_truth, instances_test):
output_truth = f.read()
else:
# Run test on truth instances to get expected output
- _, _, output_truth = run_test_on_instances(
+ _, _, output_truth, _ = run_test_on_instances(
test_file, num_instances, instances_truth
)
@@ -520,6 +528,11 @@ def run_tests(test_files, instances_truth, instances_test):
print("### DIFF ###")
print_diff(output_truth, output_test)
+ # Print test output metrics, if there are any.
+ if output_metrics:
+ for metric in output_metrics:
+ print(test_file, ": ", metric, sep="")
+
if cmd_args.show_output:
print()