Skip to content

Commit d4e534c

Browse files
regrtest computes statistics (#108793)
test_netrc, test_pep646_syntax and test_xml_etree now return results in the test_main() function. Changes: * Rewrite TestResult as a dataclass with a new State class. * Add test.support.TestStats class and Regrtest.stats_dict attribute. * libregrtest.runtest functions now modify a TestResult instance in-place. * libregrtest summary lists the number of run tests and skipped tests, and denied resources. * Add TestResult.has_meaningful_duration() method. * Compute TestResult duration in the upper function. * Use time.perf_counter() instead of time.monotonic(). * Regrtest: rename 'resource_denieds' attribute to 'resource_denied'. * Rename CHILD_ERROR to MULTIPROCESSING_ERROR. * Use match/case syntadx to have different code depending on the test state. Co-authored-by: Alex Waygood <[email protected]>
1 parent e7de0c5 commit d4e534c

10 files changed

+512
-308
lines changed

Lib/test/libregrtest/main.py

+83-43
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,14 @@
1111
import unittest
1212
from test.libregrtest.cmdline import _parse_args
1313
from test.libregrtest.runtest import (
14-
findtests, split_test_packages, runtest, get_abs_module, is_failed,
15-
PROGRESS_MIN_TIME,
16-
Passed, Failed, EnvChanged, Skipped, ResourceDenied, Interrupted,
17-
ChildError, DidNotRun)
14+
findtests, split_test_packages, runtest, get_abs_module,
15+
PROGRESS_MIN_TIME, State)
1816
from test.libregrtest.setup import setup_tests
1917
from test.libregrtest.pgo import setup_pgo_tests
2018
from test.libregrtest.utils import (removepy, count, format_duration,
2119
printlist, get_build_info)
2220
from test import support
21+
from test.support import TestStats
2322
from test.support import os_helper
2423
from test.support import threading_helper
2524

@@ -78,13 +77,14 @@ def __init__(self):
7877
self.good = []
7978
self.bad = []
8079
self.skipped = []
81-
self.resource_denieds = []
80+
self.resource_denied = []
8281
self.environment_changed = []
8382
self.run_no_tests = []
8483
self.need_rerun = []
8584
self.rerun = []
8685
self.first_result = None
8786
self.interrupted = False
87+
self.stats_dict: dict[str, TestStats] = {}
8888

8989
# used by --slow
9090
self.test_times = []
@@ -93,7 +93,7 @@ def __init__(self):
9393
self.tracer = None
9494

9595
# used to display the progress bar "[ 3/100]"
96-
self.start_time = time.monotonic()
96+
self.start_time = time.perf_counter()
9797
self.test_count = ''
9898
self.test_count_width = 1
9999

@@ -111,36 +111,41 @@ def __init__(self):
111111

112112
def get_executed(self):
113113
return (set(self.good) | set(self.bad) | set(self.skipped)
114-
| set(self.resource_denieds) | set(self.environment_changed)
114+
| set(self.resource_denied) | set(self.environment_changed)
115115
| set(self.run_no_tests))
116116

117117
def accumulate_result(self, result, rerun=False):
118-
test_name = result.name
119-
120-
if not isinstance(result, (ChildError, Interrupted)) and not rerun:
121-
self.test_times.append((result.duration_sec, test_name))
122-
123-
if isinstance(result, Passed):
124-
self.good.append(test_name)
125-
elif isinstance(result, ResourceDenied):
126-
self.skipped.append(test_name)
127-
self.resource_denieds.append(test_name)
128-
elif isinstance(result, Skipped):
129-
self.skipped.append(test_name)
130-
elif isinstance(result, EnvChanged):
131-
self.environment_changed.append(test_name)
132-
elif isinstance(result, Failed):
133-
if not rerun:
134-
self.bad.append(test_name)
135-
self.need_rerun.append(result)
136-
elif isinstance(result, DidNotRun):
137-
self.run_no_tests.append(test_name)
138-
elif isinstance(result, Interrupted):
139-
self.interrupted = True
140-
else:
141-
raise ValueError("invalid test result: %r" % result)
118+
test_name = result.test_name
119+
120+
if result.has_meaningful_duration() and not rerun:
121+
self.test_times.append((result.duration, test_name))
142122

143-
if rerun and not isinstance(result, (Failed, Interrupted)):
123+
match result.state:
124+
case State.PASSED:
125+
self.good.append(test_name)
126+
case State.ENV_CHANGED:
127+
self.environment_changed.append(test_name)
128+
case State.SKIPPED:
129+
self.skipped.append(test_name)
130+
case State.RESOURCE_DENIED:
131+
self.skipped.append(test_name)
132+
self.resource_denied.append(test_name)
133+
case State.INTERRUPTED:
134+
self.interrupted = True
135+
case State.DID_NOT_RUN:
136+
self.run_no_tests.append(test_name)
137+
case _:
138+
if result.is_failed(self.ns.fail_env_changed):
139+
if not rerun:
140+
self.bad.append(test_name)
141+
self.need_rerun.append(result)
142+
else:
143+
raise ValueError(f"invalid test state: {state!r}")
144+
145+
if result.stats is not None:
146+
self.stats_dict[result.test_name] = result.stats
147+
148+
if rerun and not(result.is_failed(False) or result.state == State.INTERRUPTED):
144149
self.bad.remove(test_name)
145150

146151
xml_data = result.xml_data
@@ -162,7 +167,7 @@ def log(self, line=''):
162167
line = f"load avg: {load_avg:.2f} {line}"
163168

164169
# add the timestamp prefix: "0:01:05 "
165-
test_time = time.monotonic() - self.start_time
170+
test_time = time.perf_counter() - self.start_time
166171

167172
mins, secs = divmod(int(test_time), 60)
168173
hours, mins = divmod(mins, 60)
@@ -337,7 +342,7 @@ def rerun_failed_tests(self):
337342
rerun_list = list(self.need_rerun)
338343
self.need_rerun.clear()
339344
for result in rerun_list:
340-
test_name = result.name
345+
test_name = result.test_name
341346
self.rerun.append(test_name)
342347

343348
errors = result.errors or []
@@ -364,7 +369,7 @@ def rerun_failed_tests(self):
364369

365370
self.accumulate_result(result, rerun=True)
366371

367-
if isinstance(result, Interrupted):
372+
if result.state == State.INTERRUPTED:
368373
break
369374

370375
if self.bad:
@@ -461,7 +466,7 @@ def run_tests_sequential(self):
461466

462467
previous_test = None
463468
for test_index, test_name in enumerate(self.tests, 1):
464-
start_time = time.monotonic()
469+
start_time = time.perf_counter()
465470

466471
text = test_name
467472
if previous_test:
@@ -480,14 +485,14 @@ def run_tests_sequential(self):
480485
result = runtest(self.ns, test_name)
481486
self.accumulate_result(result)
482487

483-
if isinstance(result, Interrupted):
488+
if result.state == State.INTERRUPTED:
484489
break
485490

486491
previous_test = str(result)
487-
test_time = time.monotonic() - start_time
492+
test_time = time.perf_counter() - start_time
488493
if test_time >= PROGRESS_MIN_TIME:
489494
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
490-
elif isinstance(result, Passed):
495+
elif result.state == State.PASSED:
491496
# be quiet: say nothing if the test passed shortly
492497
previous_test = None
493498

@@ -496,7 +501,7 @@ def run_tests_sequential(self):
496501
if module not in save_modules and module.startswith("test."):
497502
support.unload(module)
498503

499-
if self.ns.failfast and is_failed(result, self.ns):
504+
if self.ns.failfast and result.is_failed(self.ns.fail_env_changed):
500505
break
501506

502507
if previous_test:
@@ -638,13 +643,48 @@ def finalize(self):
638643
coverdir=self.ns.coverdir)
639644

640645
print()
641-
duration = time.monotonic() - self.start_time
642-
print("Total duration: %s" % format_duration(duration))
643-
print("Tests result: %s" % self.get_tests_result())
646+
self.display_summary()
644647

645648
if self.ns.runleaks:
646649
os.system("leaks %d" % os.getpid())
647650

651+
def display_summary(self):
652+
duration = time.perf_counter() - self.start_time
653+
654+
# Total duration
655+
print("Total duration: %s" % format_duration(duration))
656+
657+
# Total tests
658+
total = TestStats()
659+
for stats in self.stats_dict.values():
660+
total.accumulate(stats)
661+
stats = [f'run={total.tests_run:,}']
662+
if total.failures:
663+
stats.append(f'failures={total.failures:,}')
664+
if total.skipped:
665+
stats.append(f'skipped={total.skipped:,}')
666+
print(f"Total tests: {' '.join(stats)}")
667+
668+
# Total test files
669+
report = [f'success={len(self.good)}']
670+
if self.bad:
671+
report.append(f'failed={len(self.bad)}')
672+
if self.environment_changed:
673+
report.append(f'env_changed={len(self.environment_changed)}')
674+
if self.skipped:
675+
report.append(f'skipped={len(self.skipped)}')
676+
if self.resource_denied:
677+
report.append(f'resource_denied={len(self.resource_denied)}')
678+
if self.rerun:
679+
report.append(f'rerun={len(self.rerun)}')
680+
if self.run_no_tests:
681+
report.append(f'run_no_tests={len(self.run_no_tests)}')
682+
print(f"Total test files: {' '.join(report)}")
683+
684+
# Result
685+
result = self.get_tests_result()
686+
print(f"Result: {result}")
687+
648688
def save_xml_result(self):
649689
if not self.ns.xmlpath and not self.testsuite_xml:
650690
return

Lib/test/libregrtest/refleak.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -83,11 +83,12 @@ def get_pooled_int(value):
8383
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
8484
flush=True)
8585

86+
results = None
8687
dash_R_cleanup(fs, ps, pic, zdc, abcs)
8788
support.gc_collect()
8889

8990
for i in rep_range:
90-
test_func()
91+
results = test_func()
9192

9293
dash_R_cleanup(fs, ps, pic, zdc, abcs)
9394
support.gc_collect()
@@ -151,7 +152,7 @@ def check_fd_deltas(deltas):
151152
print(msg, file=refrep)
152153
refrep.flush()
153154
failed = True
154-
return failed
155+
return (failed, results)
155156

156157

157158
def dash_R_cleanup(fs, ps, pic, zdc, abcs):

0 commit comments

Comments
 (0)