8000 regrtest computes statistics by vstinner · Pull Request #108793 · python/cpython · GitHub
[go: up one dir, main page]

Skip to content

regrtest computes statistics #108793

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
8000
Diff view
Diff view
regrtest computes statistics
test_main() of test_netrc, test_pep646_syntax and test_xml_etree now
return results.

Changes:

* Rewrite TestResult as a dataclass with a new State class.
* Add test.support.TestStats class and Regrtest.stats_dict attribute.
* libregrtest.runtest functions now modify a TestResult instance
  in-place.
* libregrtest summary lists the number of run tests and skipped
  tests, and denied resources.
* Add TestResult.has_meaningful_duration() method.
* Compute TestResult duration in the upper function.
* Use time.perf_counter() instead of time.monotonic().
* Regrtest: rename 'resource_denieds' attribute to 'resource_denied'.
* Rename CHILD_ERROR to MULTIPROCESSING_ERROR.
* Use match/case syntadx to have different code depending on the
  test state.

Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
  • Loading branch information
vstinner and AlexWaygood committed Sep 2, 2023
commit 97c72637aadbc5403e91350a1148054246e52805
126 changes: 83 additions & 43 deletions Lib/test/libregrtest/main.py
10000
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,14 @@
import unittest
from test.libregrtest.cmdline import _parse_args
from test.libregrtest.runtest import (
findtests, split_test_packages, runtest, get_abs_module, is_failed,
PROGRESS_MIN_TIME,
Passed, Failed, EnvChanged, Skipped, ResourceDenied, Interrupted,
ChildError, DidNotRun)
findtests, split_test_packages, runtest, get_abs_module,
PROGRESS_MIN_TIME, State)
from test.libregrtest.setup import setup_tests
from test.libregrtest.pgo import setup_pgo_tests
from test.libregrtest.utils import (removepy, count, format_duration,
printlist, get_build_info)
from test import support
from test.support import TestStats
from test.support import os_helper
from test.support import threading_helper

Expand Down Expand Up @@ -78,13 +77,14 @@ def __init__(self):
self.good = []
self.bad = []
self.skipped = []
self.resource_denieds = []
self.resource_denied = []
self.environment_changed = []
self.run_no_tests = []
self.need_rerun = []
self.rerun = []
self.first_result = None
self.interrupted = False
self.stats_dict: dict[str, TestStats] = {}

# used by --slow
self.test_times = []
Expand All @@ -93,7 +93,7 @@ def __init__(self):
self.tracer = None

# used to display the progress bar "[ 3/100]"
self.start_time = time.monotonic()
self.start_time = time.perf_counter()
self.test_count = ''
self.test_count_width = 1

Expand All @@ -111,36 +111,41 @@ def __init__(self):

def get_executed(self):
return (set(self.good) | set(self.bad) | set(self.skipped)
| set(self.resource_denieds) | set(self.environment_changed)
| set(self.resource_denied) | set(self.environment_changed)
| set(self.run_no_tests))

def accumulate_result(self, result, rerun=False):
test_name = result.name

if not isinstance(result, (ChildError, Interrupted)) and not rerun:
self.test_times.append((result.duration_sec, test_name))

if isinstance(result, Passed):
self.good.append(test_name)
elif isinstance(result, ResourceDenied):
self.skipped.append(test_name)
self.resource_denieds.append(test_name)
elif isinstance(result, Skipped):
self.skipped.append(test_name)
elif isinstance(result, EnvChanged):
self.environment_changed.append(test_name)
elif isinstance(result, Failed):
if not rerun:
self.bad.append(test_name)
self.need_rerun.append(result)
elif isinstance(result, DidNotRun):
self.run_no_tests.append(test_name)
elif isinstance(result, Interrupted):
self.interrupted = True
else:
raise ValueError("invalid test result: %r" % result)
test_name = result.test_name

if result.has_meaningful_duration() and not rerun:
self.test_times.append((result.duration, test_name))

if rerun and not isinstance(result, (Failed, Interrupted)):
match result.state:
case State.PASSED:
self.good.append(test_name)
case State.ENV_CHANGED:
self.environment_changed.append(test_name)
case State.SKIPPED:
self.skipped.append(test_name)
case State.RESOURCE_DENIED:
self.skipped.append(test_name)
self.resource_denied.append(test_name)
case State.INTERRUPTED:
self.interrupted = True
case State.DID_NOT_RUN:
self.run_no_tests.append(test_name)
case _:
if result.is_failed(self.ns.fail_env_changed):
if not rerun:
self.bad.append(test_name)
self.need_rerun.append(result)
else:
raise ValueError(f"invalid test state: {state!r}")

if result.stats is not None:
self.stats_dict[result.test_name] = result.stats

if rerun and not(result.is_failed(False) or result.state == State.INTERRUPTED):
self.bad.remove(test_name)

xml_data = result.xml_data
Expand All @@ -162,7 +167,7 @@ def log(self, line=''):
line = f"load avg: {load_avg:.2f} {line}"

# add the timestamp prefix: "0:01:05 "
test_time = time.monotonic() - self.start_time
test_time = time.perf_counter() - self.start_time

mins, secs = divmod(int(test_time), 60)
hours, mins = divmod(mins, 60)
Expand Down Expand Up @@ -337,7 +342,7 @@ def rerun_failed_tests(self):
rerun_list = list(self.need_rerun)
self.need_rerun.clear()
for result in rerun_list:
test_name = result.name
test_name = result.test_name
self.rerun.append(test_name)

errors = result.errors or []
Expand All @@ -364,7 +369,7 @@ def rerun_failed_tests(self):

self.accumulate_result(result, rerun=True)

if isinstance(result, Interrupted):
if result.state == State.INTERRUPTED:
break

if self.bad:
Expand Down Expand Up @@ -461,7 +466,7 @@ def run_tests_sequential(self):

previous_test = None
for test_index, test_name in enumerate(self.tests, 1):
start_time = time.monotonic()
start_time = time.perf_counter()

text = test_name
if previous_test:
Expand All @@ -480,14 +485,14 @@ def run_tests_sequential(self):
result = runtest(self.ns, test_name)
self.accumulate_result(result)

if isinstance(result, Interrupted):
if result.state == State.INTERRUPTED:
break

previous_test = str(result)
test_time = time.monotonic() - start_time
test_time = time.perf_counter() - start_time
if test_time >= PROGRESS_MIN_TIME:
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
elif isinstance(result, Passed):
elif result.state == State.PASSED:
# be quiet: say nothing if the test passed shortly
previous_test = None

Expand All @@ -496,7 +501,7 @@ def run_tests_sequential(self):
if module not in save_modules and module.startswith("test."):
support.unload(module)

if self.ns.failfast and is_failed(result, self.ns):
if self.ns.failfast and result.is_failed(self.ns.fail_env_changed):
break

if previous_test:
Expand Down Expand Up @@ -638,13 +643,48 @@ def finalize(self):
coverdir=self.ns.coverdir)

print()
duration = time.monotonic() - self.start_time
print("Total duration: %s" % format_duration(duration))
print("Tests result: %s" % self.get_tests_result())
self.display_summary()

if self.ns.runleaks:
os.system("leaks %d" % os.getpid())

def display_summary(self):
duration = time.perf_counter() - self.start_time

# Total duration
print("Total duration: %s" % format_duration(duration))

# Total tests
total = TestStats()
for stats in self.stats_dict.values():
total.accumulate(stats)
stats = [f'run={total.tests_run:,}']
if total.failures:
stats.append(f'failures={total.failures:,}')
if total.skipped:
stats.append(f'skipped={total.skipped:,}')
print(f"Total tests: {' '.join(stats)}")

# Total test files
report = [f'success={len(self.good)}']
if self.bad:
report.append(f'failed={len(self.bad)}')
if self.environment_changed:
report.append(f'env_changed={len(self.environment_changed)}')
if self.skipped:
report.append(f'skipped={len(self.skipped)}')
if self.resource_denied:
report.append(f'resource_denied={len(self.resource_denied)}')
if self.rerun:
report.append(f'rerun={len(self.rerun)}')
if self.run_no_tests:
report.append(f'run_no_tests={len(self.run_no_tests)}')
print(f"Total test files: {' '.join(report)}")

# Result
result = self.get_tests_result()
print(f"Result: {result}")

def save_xml_result(self):
if not self.ns.xmlpath and not self.testsuite_xml:
return
Expand Down
5 changes: 3 additions & 2 deletions Lib/test/libregrtest/refleak.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,12 @@ def get_pooled_int(value):
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
flush=True)

results = None
dash_R_cleanup(fs, ps, pic, zdc, abcs)
support.gc_collect()

for i in rep_range:
test_func()
results = test_func()

dash_R_cleanup(fs, ps, pic, zdc, abcs)
support.gc_collect()
Expand Down Expand Up @@ -151,7 +152,7 @@ def check_fd_deltas(deltas):
print(msg, file=refrep)
refrep.flush()
failed = True
return failed
return (failed, results)


def dash_R_cleanup(fs, ps, pic, zdc, abcs):
Expand Down
Loading
0