Skip to content

[2.7] bpo-30523, bpo-30764, bpo-30776: Sync regrtest from master #2444

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 27, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 34 additions & 6 deletions Lib/test/regrtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@
don't execute them
--list-cases -- only write the name of test cases that will be run,
don't execute them
--fail-env-changed -- if a test file alters the environment, mark the test
as failed


Additional Option Details:
Expand Down Expand Up @@ -327,7 +329,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo',
'failfast', 'match=', 'testdir=', 'list-tests', 'list-cases',
'coverage', 'matchfile='])
'coverage', 'matchfile=', 'fail-env-changed'])
except getopt.error, msg:
usage(2, msg)

Expand All @@ -339,6 +341,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
slaveargs = None
list_tests = False
list_cases_opt = False
fail_env_changed = False
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
Expand Down Expand Up @@ -439,6 +442,8 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
list_tests = True
elif o == '--list-cases':
list_cases_opt = True
elif o == '--fail-env-changed':
fail_env_changed = True
else:
print >>sys.stderr, ("No handler for option {}. Please "
"report this as a bug at http://bugs.python.org.").format(o)
Expand Down Expand Up @@ -558,7 +563,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
sys.exit(0)

if list_cases_opt:
list_cases(testdir, selected)
list_cases(testdir, selected, match_tests)
sys.exit(0)

if trace:
Expand Down Expand Up @@ -908,11 +913,19 @@ def local_runtest():
result = "FAILURE"
elif interrupted:
result = "INTERRUPTED"
elif environment_changed and fail_env_changed:
result = "ENV CHANGED"
else:
result = "SUCCESS"
print("Tests result: %s" % result)

sys.exit(len(bad) > 0 or interrupted)
if bad:
sys.exit(2)
if interrupted:
sys.exit(130)
if fail_env_changed and environment_changed:
sys.exit(3)
sys.exit(0)


STDTESTS = [
Expand Down Expand Up @@ -1310,7 +1323,18 @@ def run_the_test():
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print >> sys.stderr
if any(deltas):

# bpo-30776: Try to ignore false positives:
#
# [3, 0, 0]
# [0, 1, 0]
# [8, -8, 1]
#
# Expected leaks:
#
# [5, 5, 6]
# [10, 1, 1]
if all(delta >= 1 for delta in deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print >> sys.stderr, msg
with open(fname, "a") as refrep:
Expand Down Expand Up @@ -1501,9 +1525,13 @@ def _list_cases(suite):
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
print(test.id())
if test_support._match_test(test):
print(test.id())

def list_cases(testdir, selected, match_tests):
test_support.verbose = False
test_support.match_tests = match_tests

def list_cases(testdir, selected):
skipped = []
for test in selected:
abstest = get_abs_module(testdir, test)
Expand Down
32 changes: 18 additions & 14 deletions Lib/test/support/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1542,6 +1542,23 @@ def _run_suite(suite):
raise TestFailed(err)


def _match_test(test):
global match_tests

if match_tests is None:
return True
test_id = test.id()

for match_test in match_tests:
if fnmatch.fnmatchcase(test_id, match_test):
return True

for name in test_id.split("."):
if fnmatch.fnmatchcase(name, match_test):
return True
return False


def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
Expand All @@ -1556,20 +1573,7 @@ def run_unittest(*classes):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
test_id = test.id()

for match_test in match_tests:
if fnmatch.fnmatchcase(test_id, match_test):
return True

for name in test_id.split("."):
if fnmatch.fnmatchcase(name, match_test):
return True
return False
_filter_suite(suite, case_pred)
_filter_suite(suite, _match_test)
_run_suite(suite)

#=======================================================================
Expand Down
118 changes: 96 additions & 22 deletions Lib/test/test_regrtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,19 +91,19 @@ def parse_executed_tests(self, output):
return list(match.group(1) for match in parser)

def check_executed_tests(self, output, tests, skipped=(), failed=(),
omitted=(), randomize=False, interrupted=False):
env_changed=(), omitted=(),
randomize=False, interrupted=False,
fail_env_changed=False):
if isinstance(tests, str):
tests = [tests]
if isinstance(skipped, str):
skipped = [skipped]
if isinstance(failed, str):
failed = [failed]
if isinstance(env_changed, str):
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
ntest = len(tests)
nskipped = len(skipped)
nfailed = len(failed)
nomitted = len(omitted)

executed = self.parse_executed_tests(output)
if randomize:
Expand All @@ -129,11 +129,17 @@ def list_regex(line_format, tests):
regex = list_regex('%s test%s failed', failed)
self.check_line(output, regex)

if env_changed:
regex = list_regex('%s test%s altered the execution environment',
env_changed)
self.check_line(output, regex)

if omitted:
regex = list_regex('%s test%s omitted', omitted)
self.check_line(output, regex)

good = ntest - nskipped - nfailed - nomitted
good = (len(tests) - len(skipped) - len(failed)
- len(omitted) - len(env_changed))
if good:
regex = r'%s test%s OK\.$' % (good, plural(good))
if not skipped and not failed and good > 1:
Expand All @@ -143,10 +149,12 @@ def list_regex(line_format, tests):
if interrupted:
self.check_line(output, 'Test suite interrupted by signal SIGINT.')

if nfailed:
if failed:
result = 'FAILURE'
elif interrupted:
result = 'INTERRUPTED'
elif fail_env_changed and env_changed:
result = 'ENV CHANGED'
else:
result = 'SUCCESS'
self.check_line(output, 'Tests result: %s' % result)
Expand Down Expand Up @@ -325,7 +333,7 @@ def test_main():
test_failing = self.create_test('failing', code=code)
tests = [test_ok, test_failing]

output = self.run_tests(*tests, exitcode=1)
output = self.run_tests(*tests, exitcode=2)
self.check_executed_tests(output, tests, failed=test_failing)

def test_resources(self):
Expand Down Expand Up @@ -394,7 +402,7 @@ def test_fromfile(self):
def test_interrupted(self):
code = TEST_INTERRUPTED
test = self.create_test('sigint', code=code)
output = self.run_tests(test, exitcode=1)
output = self.run_tests(test, exitcode=130)
self.check_executed_tests(output, test, omitted=test,
interrupted=True)

Expand Down Expand Up @@ -423,7 +431,7 @@ def test_slow_interrupted(self):
args = ("--slowest", "-j2", test)
else:
args = ("--slowest", test)
output = self.run_tests(*args, exitcode=1)
output = self.run_tests(*args, exitcode=130)
self.check_executed_tests(output, test,
omitted=test, interrupted=True)

Expand Down Expand Up @@ -461,24 +469,88 @@ def test_main():
support.run_unittest(ForeverTester)
""")
test = self.create_test('forever', code=code)
output = self.run_tests('--forever', test, exitcode=1)
output = self.run_tests('--forever', test, exitcode=2)
self.check_executed_tests(output, [test]*3, failed=test)

def check_leak(self, code, what):
test = self.create_test('huntrleaks', code=code)

filename = 'reflog.txt'
self.addCleanup(support.unlink, filename)
output = self.run_tests('--huntrleaks', '3:3:', test,
exitcode=2,
stderr=subprocess.STDOUT)
self.check_executed_tests(output, [test], failed=test)

line = 'beginning 6 repetitions\n123456\n......\n'
self.check_line(output, re.escape(line))

line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
self.assertIn(line2, output)

with open(filename) as fp:
reflog = fp.read()
self.assertIn(line2, reflog)

@unittest.skipUnless(Py_DEBUG, 'need a debug build')
def test_huntrleaks(self):
# test --huntrleaks
code = textwrap.dedent("""
import unittest
from test import support

GLOBAL_LIST = []

class RefLeakTest(unittest.TestCase):
def test_leak(self):
GLOBAL_LIST.append(object())

def test_main():
support.run_unittest(RefLeakTest)
""")
self.check_leak(code, 'references')

def test_list_tests(self):
# test --list-tests
tests = [self.create_test() for i in range(5)]
output = self.run_tests('--list-tests', *tests)
self.assertEqual(output.rstrip().splitlines(),
tests)

def test_list_cases(self):
# test --list-cases
code = textwrap.dedent("""
import unittest

class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
""")
testname = self.create_test(code=code)

# Test --list-cases
all_methods = ['%s.Tests.test_method1' % testname,
'%s.Tests.test_method2' % testname]
output = self.run_tests('--list-cases', testname)
self.assertEqual(output.splitlines(), all_methods)

# Test --list-cases with --match
all_methods = ['%s.Tests.test_method1' % testname]
output = self.run_tests('--list-cases',
'-m', 'test_method1',
testname)
self.assertEqual(output.splitlines(), all_methods)

def test_crashed(self):
# Any code which causes a crash
code = 'import test.support; test.support._crash_python()'
crash_test = self.create_test(name="crash", code=code)
ok_test = self.create_test(name="ok")

tests = [crash_test, ok_test]
output = self.run_tests("-j2", *tests, exitcode=1)
output = self.run_tests("-j2", *tests, exitcode=2)
self.check_executed_tests(output, tests, failed=crash_test,
randomize=True)

Expand Down Expand Up @@ -532,26 +604,28 @@ def test_main():
subset = ['test_method1', 'test_method3']
self.assertEqual(methods, subset)

def test_list_cases(self):
# test --list-cases
def test_env_changed(self):
code = textwrap.dedent("""
import unittest
from test import support

class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
def test_env_changed(self):
open("env_changed", "w").close()

def test_main():
support.run_unittest(Tests)
""")
testname = self.create_test(code=code)
all_methods = ['%s.Tests.test_method1' % testname,
'%s.Tests.test_method2' % testname]
output = self.run_tests('--list-cases', testname)
self.assertEqual(output.splitlines(), all_methods)

# don't fail by default
output = self.run_tests(testname)
self.check_executed_tests(output, [testname], env_changed=testname)

# fail with --fail-env-changed
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
self.check_executed_tests(output, [testname], env_changed=testname,
fail_env_changed=True)


def test_main():
Expand Down