11
11
import unittest
12
12
from test .libregrtest .cmdline import _parse_args
13
13
from test .libregrtest .runtest import (
14
- findtests , split_test_packages , runtest , get_abs_module , is_failed ,
15
- PROGRESS_MIN_TIME ,
16
- Passed , Failed , EnvChanged , Skipped , ResourceDenied , Interrupted ,
17
- ChildError , DidNotRun )
14
+ findtests , split_test_packages , runtest , get_abs_module ,
15
+ PROGRESS_MIN_TIME , State )
18
16
from test .libregrtest .setup import setup_tests
19
17
from test .libregrtest .pgo import setup_pgo_tests
20
18
from test .libregrtest .utils import (removepy , count , format_duration ,
21
19
printlist , get_build_info )
22
20
from test import support
21
+ from test .support import TestStats
23
22
from test .support import os_helper
24
23
from test .support import threading_helper
25
24
@@ -78,13 +77,14 @@ def __init__(self):
78
77
self .good = []
79
78
self .bad = []
80
79
self .skipped = []
81
- self .resource_denieds = []
80
+ self .resource_denied = []
82
81
self .environment_changed = []
83
82
self .run_no_tests = []
84
83
self .need_rerun = []
85
84
self .rerun = []
86
85
self .first_result = None
87
86
self .interrupted = False
87
+ self .stats_dict : dict [str , TestStats ] = {}
88
88
89
89
# used by --slow
90
90
self .test_times = []
@@ -93,7 +93,7 @@ def __init__(self):
93
93
self .tracer = None
94
94
95
95
# used to display the progress bar "[ 3/100]"
96
- self .start_time = time .monotonic ()
96
+ self .start_time = time .perf_counter ()
97
97
self .test_count = ''
98
98
self .test_count_width = 1
99
99
@@ -111,36 +111,41 @@ def __init__(self):
111
111
112
112
def get_executed (self ):
113
113
return (set (self .good ) | set (self .bad ) | set (self .skipped )
114
- | set (self .resource_denieds ) | set (self .environment_changed )
114
+ | set (self .resource_denied ) | set (self .environment_changed )
115
115
| set (self .run_no_tests ))
116
116
117
117
def accumulate_result (self , result , rerun = False ):
118
- test_name = result .name
119
-
120
- if not isinstance (result , (ChildError , Interrupted )) and not rerun :
121
- self .test_times .append ((result .duration_sec , test_name ))
122
-
123
- if isinstance (result , Passed ):
124
- self .good .append (test_name )
125
- elif isinstance (result , ResourceDenied ):
126
- self .skipped .append (test_name )
127
- self .resource_denieds .append (test_name )
128
- elif isinstance (result , Skipped ):
129
- self .skipped .append (test_name )
130
- elif isinstance (result , EnvChanged ):
131
- self .environment_changed .append (test_name )
132
- elif isinstance (result , Failed ):
133
- if not rerun :
134
- self .bad .append (test_name )
135
- self .need_rerun .append (result )
136
- elif isinstance (result , DidNotRun ):
137
- self .run_no_tests .append (test_name )
138
- elif isinstance (result , Interrupted ):
139
- self .interrupted = True
140
- else :
141
- raise ValueError ("invalid test result: %r" % result )
118
+ test_name = result .test_name
119
+
120
+ if result .has_meaningful_duration () and not rerun :
121
+ self .test_times .append ((result .duration , test_name ))
142
122
143
- if rerun and not isinstance (result , (Failed , Interrupted )):
123
+ match result .state :
124
+ case State .PASSED :
125
+ self .good .append (test_name )
126
+ case State .ENV_CHANGED :
127
+ self .environment_changed .append (test_name )
128
+ case State .SKIPPED :
129
+ self .skipped .append (test_name )
130
+ case State .RESOURCE_DENIED :
131
+ self .skipped .append (test_name )
132
+ self .resource_denied .append (test_name )
133
+ case State .INTERRUPTED :
134
+ self .interrupted = True
135
+ case State .DID_NOT_RUN :
136
+ self .run_no_tests .append (test_name )
137
+ case _:
138
+ if result .is_failed (self .ns .fail_env_changed ):
139
+ if not rerun :
140
+ self .bad .append (test_name )
141
+ self .need_rerun .append (result )
142
+ else :
143
+ raise ValueError (f"invalid test state: { state !r} " )
144
+
145
+ if result .stats is not None :
146
+ self .stats_dict [result .test_name ] = result .stats
147
+
148
+ if rerun and not (result .is_failed (False ) or result .state == State .INTERRUPTED ):
144
149
self .bad .remove (test_name )
145
150
146
151
xml_data = result .xml_data
@@ -162,7 +167,7 @@ def log(self, line=''):
162
167
line = f"load avg: { load_avg :.2f} { line } "
163
168
164
169
# add the timestamp prefix: "0:01:05 "
165
- test_time = time .monotonic () - self .start_time
170
+ test_time = time .perf_counter () - self .start_time
166
171
167
172
mins , secs = divmod (int (test_time ), 60 )
168
173
hours , mins = divmod (mins , 60 )
@@ -337,7 +342,7 @@ def rerun_failed_tests(self):
337
342
rerun_list = list (self .need_rerun )
338
343
self .need_rerun .clear ()
339
344
for result in rerun_list :
340
- test_name = result .name
345
+ test_name = result .test_name
341
346
self .rerun .append (test_name )
342
347
343
348
errors = result .errors or []
@@ -364,7 +369,7 @@ def rerun_failed_tests(self):
364
369
365
370
self .accumulate_result (result , rerun = True )
366
371
367
- if isinstance ( result , Interrupted ) :
372
+ if result . state == State . INTERRUPTED :
368
373
break
369
374
370
375
if self .bad :
@@ -461,7 +466,7 @@ def run_tests_sequential(self):
461
466
462
467
previous_test = None
463
468
for test_index , test_name in enumerate (self .tests , 1 ):
464
- start_time = time .monotonic ()
469
+ start_time = time .perf_counter ()
465
470
466
471
text = test_name
467
472
if previous_test :
@@ -480,14 +485,14 @@ def run_tests_sequential(self):
480
485
result = runtest (self .ns , test_name )
481
486
self .accumulate_result (result )
482
487
483
- if isinstance ( result , Interrupted ) :
488
+ if result . state == State . INTERRUPTED :
484
489
break
485
490
486
491
previous_test = str (result )
487
- test_time = time .monotonic () - start_time
492
+ test_time = time .perf_counter () - start_time
488
493
if test_time >= PROGRESS_MIN_TIME :
489
494
previous_test = "%s in %s" % (previous_test , format_duration (test_time ))
490
- elif isinstance ( result , Passed ) :
495
+ elif result . state == State . PASSED :
491
496
# be quiet: say nothing if the test passed shortly
492
497
previous_test = None
493
498
@@ -496,7 +501,7 @@ def run_tests_sequential(self):
496
501
if module not in save_modules and module .startswith ("test." ):
497
502
support .unload (module )
498
503
499
- if self .ns .failfast and is_failed (result , self .ns ):
504
+ if self .ns .failfast and result . is_failed (self .ns . fail_env_changed ):
500
505
break
501
506
502
507
if previous_test :
@@ -638,13 +643,48 @@ def finalize(self):
638
643
coverdir = self .ns .coverdir )
639
644
640
645
print ()
641
- duration = time .monotonic () - self .start_time
642
- print ("Total duration: %s" % format_duration (duration ))
643
- print ("Tests result: %s" % self .get_tests_result ())
646
+ self .display_summary ()
644
647
645
648
if self .ns .runleaks :
646
649
os .system ("leaks %d" % os .getpid ())
647
650
651
+ def display_summary (self ):
652
+ duration = time .perf_counter () - self .start_time
653
+
654
+ # Total duration
655
+ print ("Total duration: %s" % format_duration (duration ))
656
+
657
+ # Total tests
658
+ total = TestStats ()
659
+ for stats in self .stats_dict .values ():
660
+ total .accumulate (stats )
661
+ stats = [f'run={ total .tests_run :,} ' ]
662
+ if total .failures :
663
+ stats .append (f'failures={ total .failures :,} ' )
664
+ if total .skipped :
665
+ stats .append (f'skipped={ total .skipped :,} ' )
666
+ print (f"Total tests: { ' ' .join (stats )} " )
667
+
668
+ # Total test files
669
+ report = [f'success={ len (self .good )} ' ]
670
+ if self .bad :
671
+ report .append (f'failed={ len (self .bad )} ' )
672
+ if self .environment_changed :
673
+ report .append (f'env_changed={ len (self .environment_changed )} ' )
674
+ if self .skipped :
675
+ report .append (f'skipped={ len (self .skipped )} ' )
676
+ if self .resource_denied :
677
+ report .append (f'resource_denied={ len (self .resource_denied )} ' )
678
+ if self .rerun :
679
+ report .append (f'rerun={ len (self .rerun )} ' )
680
+ if self .run_no_tests :
681
+ report .append (f'run_no_tests={ len (self .run_no_tests )} ' )
682
+ print (f"Total test files: { ' ' .join (report )} " )
683
+
684
+ # Result
685
+ result = self .get_tests_result ()
686
+ print (f"Result: { result } " )
687
+
648
688
def save_xml_result (self ):
649
689
if not self .ns .xmlpath and not self .testsuite_xml :
650
690
return
0 commit comments