Skip to content

Commit de1850b

Browse files
authored
[3.5] bpo-30523, bpo-30764, bpo-30776: Sync regrtest from master (#2442)
* bpo-30523: regrtest --list-cases --match (#2401) * regrtest --list-cases now supports --match and --match-file options. Example: ./python -m test --list-cases -m FileTests test_os * --list-cases now also sets support.verbose to False to prevent messages to stdout when loading test modules. * Add support._match_test() private function. (cherry picked from commit ace56d5) * bpo-30764: regrtest: add --fail-env-changed option (#2402) * bpo-30764: regrtest: change exit code on failure * Exit code 2 if failed tests ("bad") * Exit code 3 if interrupted * bpo-30764: regrtest: add --fail-env-changed option If the option is set, mark a test as failed if it alters the environment, for example if it creates a file without removing it. (cherry picked from commit 63f54c6) * bpo-30776: reduce regrtest -R false positives (#2422) * Change the regrtest --huntrleaks checker to decide if a test file leaks or not. Require that each run leaks at least 1 reference. * Warmup runs are now completely ignored: ignored in the checker test and not used anymore to compute the sum. * Add an unit test for a reference leak. Example of reference differences previously considered a failure (leak) and now considered as success (success, no leak): [3, 0, 0] [0, 1, 0] [8, -8, 1] (cherry picked from commit 48b5c42)
1 parent eef254d commit de1850b

File tree

3 files changed

+135
-31
lines changed

3 files changed

+135
-31
lines changed

Lib/test/regrtest.py

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -343,6 +343,9 @@ def _create_parser():
343343
' , don\'t execute them')
344344
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
345345
help='enable Profile Guided Optimization training')
346+
group.add_argument('--fail-env-changed', action='store_true',
347+
help='if a test file alters the environment, mark '
348+
'the test as failed')
346349

347350
return parser
348351

@@ -944,11 +947,19 @@ def runtest_accumulate():
944947
result = "FAILURE"
945948
elif interrupted:
946949
result = "INTERRUPTED"
950+
elif environment_changed and ns.fail_env_changed:
951+
result = "ENV CHANGED"
947952
else:
948953
result = "SUCCESS"
949954
print("Tests result: %s" % result)
950955

951-
sys.exit(len(bad) > 0 or interrupted)
956+
if bad:
957+
sys.exit(2)
958+
if interrupted:
959+
sys.exit(130)
960+
if ns.fail_env_changed and environment_changed:
961+
sys.exit(3)
962+
sys.exit(0)
952963

953964

954965
# small set of tests to determine if we have a basically functioning interpreter
@@ -1510,9 +1521,21 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
15101521
alloc_deltas[i] = alloc_after - alloc_before
15111522
alloc_before, rc_before = alloc_after, rc_after
15121523
print(file=sys.stderr)
1524+
15131525
# These checkers return False on success, True on failure
15141526
def check_rc_deltas(deltas):
1515-
return any(deltas)
1527+
# bpo-30776: Try to ignore false positives:
1528+
#
1529+
# [3, 0, 0]
1530+
# [0, 1, 0]
1531+
# [8, -8, 1]
1532+
#
1533+
# Expected leaks:
1534+
#
1535+
# [5, 5, 6]
1536+
# [10, 1, 1]
1537+
return all(delta >= 1 for delta in deltas)
1538+
15161539
def check_alloc_deltas(deltas):
15171540
# At least 1/3rd of 0s
15181541
if 3 * deltas.count(0) < len(deltas):
@@ -1524,10 +1547,13 @@ def check_alloc_deltas(deltas):
15241547
failed = False
15251548
for deltas, item_name, checker in [
15261549
(rc_deltas, 'references', check_rc_deltas),
1527-
(alloc_deltas, 'memory blocks', check_alloc_deltas)]:
1550+
(alloc_deltas, 'memory blocks', check_alloc_deltas)
1551+
]:
1552+
# ignore warmup runs
1553+
deltas = deltas[nwarmup:]
15281554
if checker(deltas):
15291555
msg = '%s leaked %s %s, sum=%s' % (
1530-
test, deltas[nwarmup:], item_name, sum(deltas))
1556+
test, deltas, item_name, sum(deltas))
15311557
print(msg, file=sys.stderr)
15321558
sys.stderr.flush()
15331559
with open(fname, "a") as refrep:
@@ -1735,10 +1761,14 @@ def _list_cases(suite):
17351761
if isinstance(test, unittest.TestSuite):
17361762
_list_cases(test)
17371763
elif isinstance(test, unittest.TestCase):
1738-
print(test.id())
1764+
if support._match_test(test):
1765+
print(test.id())
17391766

17401767

17411768
def list_cases(ns, selected):
1769+
support.verbose = False
1770+
support.match_tests = ns.match_tests
1771+
17421772
skipped = []
17431773
for test in selected:
17441774
abstest = get_abs_module(ns, test)

Lib/test/support/__init__.py

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1866,6 +1866,23 @@ def _run_suite(suite):
18661866
raise TestFailed(err)
18671867

18681868

1869+
def _match_test(test):
1870+
global match_tests
1871+
1872+
if match_tests is None:
1873+
return True
1874+
test_id = test.id()
1875+
1876+
for match_test in match_tests:
1877+
if fnmatch.fnmatchcase(test_id, match_test):
1878+
return True
1879+
1880+
for name in test_id.split("."):
1881+
if fnmatch.fnmatchcase(name, match_test):
1882+
return True
1883+
return False
1884+
1885+
18691886
def run_unittest(*classes):
18701887
"""Run tests from unittest.TestCase-derived classes."""
18711888
valid_types = (unittest.TestSuite, unittest.TestCase)
@@ -1880,20 +1897,7 @@ def run_unittest(*classes):
18801897
suite.addTest(cls)
18811898
else:
18821899
suite.addTest(unittest.makeSuite(cls))
1883-
def case_pred(test):
1884-
if match_tests is None:
1885-
return True
1886-
test_id = test.id()
1887-
1888-
for match_test in match_tests:
1889-
if fnmatch.fnmatchcase(test_id, match_test):
1890-
return True
1891-
1892-
for name in test_id.split("."):
1893-
if fnmatch.fnmatchcase(name, match_test):
1894-
return True
1895-
return False
1896-
_filter_suite(suite, case_pred)
1900+
_filter_suite(suite, _match_test)
18971901
_run_suite(suite)
18981902

18991903
#=======================================================================

Lib/test/test_regrtest.py

Lines changed: 82 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -377,19 +377,19 @@ def parse_executed_tests(self, output):
377377
return list(match.group(1) for match in parser)
378378

379379
def check_executed_tests(self, output, tests, skipped=(), failed=(),
380-
omitted=(), randomize=False, interrupted=False):
380+
env_changed=(), omitted=(),
381+
randomize=False, interrupted=False,
382+
fail_env_changed=False):
381383
if isinstance(tests, str):
382384
tests = [tests]
383385
if isinstance(skipped, str):
384386
skipped = [skipped]
385387
if isinstance(failed, str):
386388
failed = [failed]
389+
if isinstance(env_changed, str):
390+
env_changed = [env_changed]
387391
if isinstance(omitted, str):
388392
omitted = [omitted]
389-
ntest = len(tests)
390-
nskipped = len(skipped)
391-
nfailed = len(failed)
392-
nomitted = len(omitted)
393393

394394
executed = self.parse_executed_tests(output)
395395
if randomize:
@@ -415,11 +415,17 @@ def list_regex(line_format, tests):
415415
regex = list_regex('%s test%s failed', failed)
416416
self.check_line(output, regex)
417417

418+
if env_changed:
419+
regex = list_regex('%s test%s altered the execution environment',
420+
env_changed)
421+
self.check_line(output, regex)
422+
418423
if omitted:
419424
regex = list_regex('%s test%s omitted', omitted)
420425
self.check_line(output, regex)
421426

422-
good = ntest - nskipped - nfailed - nomitted
427+
good = (len(tests) - len(skipped) - len(failed)
428+
- len(omitted) - len(env_changed))
423429
if good:
424430
regex = r'%s test%s OK\.$' % (good, plural(good))
425431
if not skipped and not failed and good > 1:
@@ -429,10 +435,12 @@ def list_regex(line_format, tests):
429435
if interrupted:
430436
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
431437

432-
if nfailed:
438+
if failed:
433439
result = 'FAILURE'
434440
elif interrupted:
435441
result = 'INTERRUPTED'
442+
elif fail_env_changed and env_changed:
443+
result = 'ENV CHANGED'
436444
else:
437445
result = 'SUCCESS'
438446
self.check_line(output, 'Tests result: %s' % result)
@@ -604,7 +612,7 @@ def test_failing(self):
604612
test_failing = self.create_test('failing', code=code)
605613
tests = [test_ok, test_failing]
606614

607-
output = self.run_tests(*tests, exitcode=1)
615+
output = self.run_tests(*tests, exitcode=2)
608616
self.check_executed_tests(output, tests, failed=test_failing)
609617

610618
def test_resources(self):
@@ -703,7 +711,7 @@ def test_fromfile(self):
703711
def test_interrupted(self):
704712
code = TEST_INTERRUPTED
705713
test = self.create_test('sigint', code=code)
706-
output = self.run_tests(test, exitcode=1)
714+
output = self.run_tests(test, exitcode=130)
707715
self.check_executed_tests(output, test, omitted=test,
708716
interrupted=True)
709717

@@ -732,7 +740,7 @@ def test_slow_interrupted(self):
732740
args = ("--slowest", "-j2", test)
733741
else:
734742
args = ("--slowest", test)
735-
output = self.run_tests(*args, exitcode=1)
743+
output = self.run_tests(*args, exitcode=130)
736744
self.check_executed_tests(output, test,
737745
omitted=test, interrupted=True)
738746

@@ -772,9 +780,43 @@ def test_run(self):
772780
builtins.__dict__['RUN'] = 1
773781
""")
774782
test = self.create_test('forever', code=code)
775-
output = self.run_tests('--forever', test, exitcode=1)
783+
output = self.run_tests('--forever', test, exitcode=2)
776784
self.check_executed_tests(output, [test]*3, failed=test)
777785

786+
def check_leak(self, code, what):
787+
test = self.create_test('huntrleaks', code=code)
788+
789+
filename = 'reflog.txt'
790+
self.addCleanup(support.unlink, filename)
791+
output = self.run_tests('--huntrleaks', '3:3:', test,
792+
exitcode=2,
793+
stderr=subprocess.STDOUT)
794+
self.check_executed_tests(output, [test], failed=test)
795+
796+
line = 'beginning 6 repetitions\n123456\n......\n'
797+
self.check_line(output, re.escape(line))
798+
799+
line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
800+
self.assertIn(line2, output)
801+
802+
with open(filename) as fp:
803+
reflog = fp.read()
804+
self.assertIn(line2, reflog)
805+
806+
@unittest.skipUnless(Py_DEBUG, 'need a debug build')
807+
def test_huntrleaks(self):
808+
# test --huntrleaks
809+
code = textwrap.dedent("""
810+
import unittest
811+
812+
GLOBAL_LIST = []
813+
814+
class RefLeakTest(unittest.TestCase):
815+
def test_leak(self):
816+
GLOBAL_LIST.append(object())
817+
""")
818+
self.check_leak(code, 'references')
819+
778820
def test_list_tests(self):
779821
# test --list-tests
780822
tests = [self.create_test() for i in range(5)]
@@ -794,19 +836,28 @@ def test_method2(self):
794836
pass
795837
""")
796838
testname = self.create_test(code=code)
839+
840+
# Test --list-cases
797841
all_methods = ['%s.Tests.test_method1' % testname,
798842
'%s.Tests.test_method2' % testname]
799843
output = self.run_tests('--list-cases', testname)
800844
self.assertEqual(output.splitlines(), all_methods)
801845

846+
# Test --list-cases with --match
847+
all_methods = ['%s.Tests.test_method1' % testname]
848+
output = self.run_tests('--list-cases',
849+
'-m', 'test_method1',
850+
testname)
851+
self.assertEqual(output.splitlines(), all_methods)
852+
802853
def test_crashed(self):
803854
# Any code which causes a crash
804855
code = 'import faulthandler; faulthandler._sigsegv()'
805856
crash_test = self.create_test(name="crash", code=code)
806857
ok_test = self.create_test(name="ok")
807858

808859
tests = [crash_test, ok_test]
809-
output = self.run_tests("-j2", *tests, exitcode=1)
860+
output = self.run_tests("-j2", *tests, exitcode=2)
810861
self.check_executed_tests(output, tests, failed=crash_test,
811862
randomize=True)
812863

@@ -855,6 +906,25 @@ def test_method4(self):
855906
subset = ['test_method1', 'test_method3']
856907
self.assertEqual(methods, subset)
857908

909+
def test_env_changed(self):
910+
code = textwrap.dedent("""
911+
import unittest
912+
913+
class Tests(unittest.TestCase):
914+
def test_env_changed(self):
915+
open("env_changed", "w").close()
916+
""")
917+
testname = self.create_test(code=code)
918+
919+
# don't fail by default
920+
output = self.run_tests(testname)
921+
self.check_executed_tests(output, [testname], env_changed=testname)
922+
923+
# fail with --fail-env-changed
924+
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
925+
self.check_executed_tests(output, [testname], env_changed=testname,
926+
fail_env_changed=True)
927+
858928

859929
if __name__ == '__main__':
860930
unittest.main()

0 commit comments

Comments
 (0)