diff --git a/.travis.yml b/.travis.yml index 4110653853bb..43498026bd0c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ python: # - "pypy3" install: - - pip install -r test-requirements.txt + - pip install -r requirements-testing.txt - python setup.py install script: diff --git a/README.md b/README.md index d056fada1e35..b48e7e7f4f6d 100644 --- a/README.md +++ b/README.md @@ -193,6 +193,14 @@ pass inferior arguments via `-a`: $ PYTHONPATH=$PWD scripts/myunit -m mypy.test.testlex -v '*backslash*' $ ./runtests.py mypy.test.testlex -a -v -a '*backslash*' +Mypy is currently in the process of converting its tests from myunit to pytest. +Some of the tests, such as `test_check`, have already been converted. To run +these individually, you can either use `./runtests.py` but pass options with +`-t` instead of `-a`, or you can just run `py.test` itself: + + $ ./runtests.py pytest -t -k -t NestedListAssignment + $ py.test -k NestedListAssignment + You can also run the type checker for manual testing without installing anything by setting up the Python module search path suitably (the lib-typing/3.2 path entry is not needed for Python 3.5 diff --git a/conftest.py b/conftest.py new file mode 100644 index 000000000000..a6f1824ef3d7 --- /dev/null +++ b/conftest.py @@ -0,0 +1,13 @@ +from mypy.test.helpers import PytestSuite +import inspect +import pytest + +def pytest_addoption(parser): + parser.addoption('--update-testcases', action='store_true', + dest='UPDATE_TESTCASES') + +def pytest_pycollect_makeitem(collector, name, obj): + if (inspect.isclass(obj) and issubclass(obj, PytestSuite) and + obj is not PytestSuite): + print(name) + obj.collect_tests() diff --git a/mypy/test/data.py b/mypy/test/data.py index bc5b7c40fc0d..ae9fd265518c 100644 --- a/mypy/test/data.py +++ b/mypy/test/data.py @@ -6,14 +6,14 @@ from os import remove, rmdir import shutil -from typing import Callable, List, Tuple +from typing import Callable, List, Tuple, Any from mypy.myunit import TestCase, SkipTestCaseException def parse_test_cases( path: str, - perform: Callable[['DataDrivenTestCase'], None], + perform: Callable[..., None], base_path: str = '.', optional_out: bool = False, include_path: str = None, @@ -89,22 +89,27 @@ def parse_test_cases( class DataDrivenTestCase(TestCase): + name = None # type: str input = None # type: List[str] output = None # type: List[str] file = '' line = 0 - perform = None # type: Callable[['DataDrivenTestCase'], None] + # NOTE: For info on the ..., see `run`. + perform = None # type: Callable[..., None] # (file path, file content) tuples files = None # type: List[Tuple[str, str]] clean_up = None # type: List[Tuple[bool, str]] + marked_skip = False + def __init__(self, name, input, output, file, line, lastline, perform, files): super().__init__(name) + self.name = name self.input = input self.output = output self.lastline = lastline @@ -112,6 +117,7 @@ def __init__(self, name, input, output, file, line, lastline, self.line = line self.perform = perform self.files = files + self.marked_skip = self.name.endswith('-skip') def set_up(self) -> None: super().set_up() @@ -137,19 +143,34 @@ def add_dirs(self, dir: str) -> List[str]: os.mkdir(dir) return dirs - def run(self): - if self.name.endswith('-skip'): - raise SkipTestCaseException() + def run(self, obj: Any = None): + if obj is None: + # XXX: The unit tests are being converted over to pytest. Due to + # modifications requires to make BOTH run at the moment, this branch + # is necessary. It should be removed once all the tests relying on + # DataDrivenTestCase are converted to pytest. + if self.marked_skip: + raise SkipTestCaseException() + else: + self.perform(self) else: - self.perform(self) + assert not self.marked_skip + # Because perform is an unbound method, it needs to be passed it + # own self, which is obj. In the future, after all tests are moved + # over to pytest, this whole class should probably be generic, to + # allow annotating obj. In the mean time, there isn't a cleaner way + # to handle this... + self.perform(obj, self) def tear_down(self) -> None: # First remove files. for is_dir, path in reversed(self.clean_up): - if not is_dir: + if not is_dir and os.path.exists(path): remove(path) # Then remove directories. for is_dir, path in reversed(self.clean_up): + if not os.path.exists(path): + continue if is_dir: pycache = os.path.join(path, '__pycache__') if os.path.isdir(pycache): diff --git a/mypy/test/helpers.py b/mypy/test/helpers.py index d1b5a65a02c9..559fe98510ac 100644 --- a/mypy/test/helpers.py +++ b/mypy/test/helpers.py @@ -1,6 +1,7 @@ import sys import re import os +import pytest # type: ignore from typing import List, Dict, Tuple @@ -263,18 +264,18 @@ def num_skipped_suffix_lines(a1: List[str], a2: List[str]) -> int: return max(0, num_eq - 4) -def testfile_pyversion(path: str) -> Tuple[int, int]: +def pyversion_testfile(path: str) -> Tuple[int, int]: if path.endswith('python2.test'): return defaults.PYTHON2_VERSION else: return defaults.PYTHON3_VERSION -def testcase_pyversion(path: str, testcase_name: str) -> Tuple[int, int]: +def pyversion_testcase(path: str, testcase_name: str) -> Tuple[int, int]: if testcase_name.endswith('python2'): return defaults.PYTHON2_VERSION else: - return testfile_pyversion(path) + return pyversion_testfile(path) def normalize_error_messages(messages: List[str]) -> List[str]: @@ -284,3 +285,42 @@ def normalize_error_messages(messages: List[str]) -> List[str]: for m in messages: a.append(m.replace(os.sep, '/')) return a + + +@pytest.fixture(scope='function') +def test(request): + test = request.function.test + test.set_up() + request.addfinalizer(test.tear_down) + return test + + +class PytestSuite: + """Assists in setting up data-driven test cases for pytest.""" + @classmethod + def collect_tests(cls): + """ + Sets up the child class's test case. The child must have a method + `cases` that returns a list of `DataDrivenTestCase`s. This method will + load the data-driven test cases and use setattr to assign it to the + class, which will allow pytest to recognize the test. + + This will be called during test collection (see conftest.py in the root + of the repository). + """ + c = cls.cases() # type: List[DataDrivenTestCase] + for test in c: + def func(self, test): + test.run(self) + if test.marked_skip: + func = pytest.mark.skip(reason='Test ends with -skip')(func) + if 'FastParse' in test.name and not test.marked_skip: + try: + import mypy.fastparse + except SystemExit: + func = pytest.mark.skip( + reason='You must install the typed_ast package in ' + 'order to run this test')(func) + func.test = test + setattr(cls, test.name, func) + # setattr(cls, test.name.replace('test', 'test_', 1), func) diff --git a/mypy/test/testcheck.py b/mypy/test/test_check.py similarity index 90% rename from mypy/test/testcheck.py rename to mypy/test/test_check.py index d64f3a4dec36..09e625290ab9 100644 --- a/mypy/test/testcheck.py +++ b/mypy/test/test_check.py @@ -4,18 +4,18 @@ import re import shutil import sys +import mypy +import pytest # type: ignore from typing import Tuple, List, Dict, Set from mypy import build -import mypy.myunit # for mutable globals (ick!) from mypy.build import BuildSource, find_module_clear_caches -from mypy.myunit import Suite, AssertionFailure from mypy.test.config import test_temp_dir, test_data_prefix from mypy.test.data import parse_test_cases, DataDrivenTestCase from mypy.test.helpers import ( - assert_string_arrays_equal, normalize_error_messages, - testcase_pyversion, update_testcase_output, + normalize_error_messages, pyversion_testcase, update_testcase_output, + PytestSuite, test ) from mypy.errors import CompileError @@ -61,13 +61,13 @@ ] -class TypeCheckSuite(Suite): - - def cases(self) -> List[DataDrivenTestCase]: +class TestTypeCheck(PytestSuite): + @classmethod + def cases(cls): c = [] # type: List[DataDrivenTestCase] for f in files: c += parse_test_cases(os.path.join(test_data_prefix, f), - self.run_test, test_temp_dir, True) + cls.run_test, test_temp_dir, True) return c def run_test(self, testcase: DataDrivenTestCase) -> None: @@ -88,7 +88,7 @@ def clear_cache(self) -> None: def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: find_module_clear_caches() - pyversion = testcase_pyversion(testcase.file, testcase.name) + pyversion = pyversion_testcase(testcase.file, testcase.name) program_text = '\n'.join(testcase.input) module_name, program_name, program_text = self.parse_options(program_text) flags = self.parse_flags(program_text) @@ -122,13 +122,12 @@ def run_test_once(self, testcase: DataDrivenTestCase, incremental=0) -> None: a = e.messages a = normalize_error_messages(a) - if output != a and mypy.myunit.UPDATE_TESTCASES: + if output != a and pytest.config.getoption('UPDATE_TESTCASES'): update_testcase_output(testcase, a, mypy.myunit.APPEND_TESTCASES) - assert_string_arrays_equal( - output, a, + assert output == a, \ 'Invalid type checker output ({}, line {})'.format( - testcase.file, testcase.line)) + testcase.file, testcase.line) if incremental and res: self.verify_cache(module_name, program_name, a, res.manager) @@ -145,9 +144,7 @@ def verify_cache(self, module_name: str, program_name: str, a: List[str], modules = self.find_module_files() modules.update({module_name: program_name}) missing_paths = self.find_missing_cache_files(modules, manager) - if missing_paths != error_paths: - raise AssertionFailure("cache data discrepancy %s != %s" % - (missing_paths, error_paths)) + assert missing_paths == error_paths, 'cache data discrepancy' def find_error_paths(self, a: List[str]) -> Set[str]: hits = set() diff --git a/mypy/test/testcmdline.py b/mypy/test/test_cmdline.py similarity index 82% rename from mypy/test/testcmdline.py rename to mypy/test/test_cmdline.py index a78cbe265a79..549561298ff3 100644 --- a/mypy/test/testcmdline.py +++ b/mypy/test/test_cmdline.py @@ -14,7 +14,7 @@ from mypy.myunit import Suite, SkipTestCaseException from mypy.test.config import test_data_prefix, test_temp_dir from mypy.test.data import parse_test_cases, DataDrivenTestCase -from mypy.test.helpers import assert_string_arrays_equal +from mypy.test.helpers import PytestSuite, test # Path to Python 3 interpreter python3_path = sys.executable @@ -23,20 +23,20 @@ cmdline_files = ['cmdline.test'] -class PythonEvaluationSuite(Suite): - - def cases(self) -> List[DataDrivenTestCase]: +class TestPythonEvaluation(PytestSuite): + @classmethod + def cases(cls) -> List[DataDrivenTestCase]: c = [] # type: List[DataDrivenTestCase] for f in cmdline_files: c += parse_test_cases(os.path.join(test_data_prefix, f), - test_python_evaluation, + python_evaluation_test, base_path=test_temp_dir, optional_out=True, native_sep=True) return c -def test_python_evaluation(testcase: DataDrivenTestCase) -> None: +def python_evaluation_test(obj: None, testcase: DataDrivenTestCase) -> None: # Write the program to a file. program = '_program.py' program_path = os.path.join(test_temp_dir, program) @@ -58,9 +58,8 @@ def test_python_evaluation(testcase: DataDrivenTestCase) -> None: # Remove temp file. os.remove(program_path) # Compare actual output to expected. - assert_string_arrays_equal(testcase.output, out, - 'Invalid output ({}, line {})'.format( - testcase.file, testcase.line)) + assert testcase.output == out, 'Invalid output ({}, line {})'.format( + testcase.file, testcase.line) def parse_args(line: str) -> List[str]: diff --git a/mypy/test/testsemanal.py b/mypy/test/test_semanal.py similarity index 85% rename from mypy/test/testsemanal.py rename to mypy/test/test_semanal.py index 9611284e793e..a1b6191a2a35 100644 --- a/mypy/test/testsemanal.py +++ b/mypy/test/test_semanal.py @@ -6,9 +6,8 @@ from mypy import build from mypy.build import BuildSource -from mypy.myunit import Suite from mypy.test.helpers import ( - assert_string_arrays_equal, normalize_error_messages, testfile_pyversion, + normalize_error_messages, pyversion_testfile, PytestSuite, test ) from mypy.test.data import parse_test_cases from mypy.test.config import test_data_prefix, test_temp_dir @@ -31,19 +30,20 @@ 'semanal-python2.test'] -class SemAnalSuite(Suite): - def cases(self): +class TestSemAnal(PytestSuite): + @classmethod + def cases(cls): c = [] for f in semanal_files: c += parse_test_cases(os.path.join(test_data_prefix, f), - test_semanal, + semanal_tests, base_path=test_temp_dir, optional_out=True, native_sep=True) return c -def test_semanal(testcase): +def semanal_tests(obj, testcase): """Perform a semantic analysis test case. The testcase argument contains a description of the test case @@ -54,7 +54,7 @@ def test_semanal(testcase): src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], - pyversion=testfile_pyversion(testcase.file), + pyversion=pyversion_testfile(testcase.file), flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = result.errors @@ -77,10 +77,9 @@ def test_semanal(testcase): a += str(f).split('\n') except CompileError as e: a = e.messages - assert_string_arrays_equal( - testcase.output, a, + assert testcase.output == a, \ 'Invalid semantic analyzer output ({}, line {})'.format(testcase.file, - testcase.line)) + testcase.line) # Semantic analyzer error test cases @@ -88,17 +87,19 @@ def test_semanal(testcase): semanal_error_files = ['semanal-errors.test'] -class SemAnalErrorSuite(Suite): - def cases(self): +class TestSemAnalError(PytestSuite): + @classmethod + def cases(cls): # Read test cases from test case description files. c = [] for f in semanal_error_files: c += parse_test_cases(os.path.join(test_data_prefix, f), - test_semanal_error, test_temp_dir, optional_out=True) + semanal_error_tests, test_temp_dir, + optional_out=True) return c -def test_semanal_error(testcase): +def semanal_error_tests(obj, testcase): """Perform a test case.""" try: @@ -113,9 +114,9 @@ def test_semanal_error(testcase): # Verify that there was a compile error and that the error messages # are equivalent. a = e.messages - assert_string_arrays_equal( - testcase.output, normalize_error_messages(a), - 'Invalid compiler output ({}, line {})'.format(testcase.file, testcase.line)) + assert testcase.output == normalize_error_messages(a), \ + 'Invalid compiler output ({}, line {})'.format(testcase.file, + testcase.line) # SymbolNode table export test cases @@ -124,12 +125,13 @@ def test_semanal_error(testcase): semanal_symtable_files = ['semanal-symtable.test'] -class SemAnalSymtableSuite(Suite): - def cases(self): +class TestSemAnalSymtable(PytestSuite): + @classmethod + def cases(cls): c = [] for f in semanal_symtable_files: c += parse_test_cases(os.path.join(test_data_prefix, f), - self.run_test, test_temp_dir) + cls.run_test, test_temp_dir) return c def run_test(self, testcase): @@ -152,10 +154,9 @@ def run_test(self, testcase): a.append(' ' + s) except CompileError as e: a = e.messages - assert_string_arrays_equal( - testcase.output, a, + assert testcase.output == a, \ 'Invalid semantic analyzer output ({}, line {})'.format( - testcase.file, testcase.line)) + testcase.file, testcase.line) # Type info export test cases @@ -163,13 +164,14 @@ def run_test(self, testcase): semanal_typeinfo_files = ['semanal-typeinfo.test'] -class SemAnalTypeInfoSuite(Suite): - def cases(self): +class TestSemAnalTypeInfo(PytestSuite): + @classmethod + def cases(cls): """Test case descriptions""" c = [] for f in semanal_typeinfo_files: c += parse_test_cases(os.path.join(test_data_prefix, f), - self.run_test, test_temp_dir) + cls.run_test, test_temp_dir) return c def run_test(self, testcase): @@ -196,10 +198,9 @@ def run_test(self, testcase): a = str(typeinfos).split('\n') except CompileError as e: a = e.messages - assert_string_arrays_equal( - testcase.output, a, + assert testcase.output == a, \ 'Invalid semantic analyzer output ({}, line {})'.format( - testcase.file, testcase.line)) + testcase.file, testcase.line) class TypeInfoMap(Dict[str, TypeInfo]): diff --git a/mypy/test/testtransform.py b/mypy/test/testtransform.py index 4b17d2ef8e59..b09929953a72 100644 --- a/mypy/test/testtransform.py +++ b/mypy/test/testtransform.py @@ -7,7 +7,7 @@ from mypy import build from mypy.build import BuildSource from mypy.myunit import Suite -from mypy.test.helpers import assert_string_arrays_equal, testfile_pyversion +from mypy.test.helpers import assert_string_arrays_equal, pyversion_testfile from mypy.test.data import parse_test_cases from mypy.test.config import test_data_prefix, test_temp_dir from mypy.errors import CompileError @@ -43,7 +43,7 @@ def test_transform(testcase): src = '\n'.join(testcase.input) result = build.build(target=build.SEMANTIC_ANALYSIS, sources=[BuildSource('main', None, src)], - pyversion=testfile_pyversion(testcase.file), + pyversion=pyversion_testfile(testcase.file), flags=[build.TEST_BUILTINS], alt_lib_path=test_temp_dir) a = result.errors diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 000000000000..5327f691d30a --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +norecursedirs=stdlib-samples lib-typing pinfer mypy/codec +addopts=--ignore=mypy/test/helpers.py diff --git a/test-requirements.txt b/requirements-testing.txt similarity index 70% rename from test-requirements.txt rename to requirements-testing.txt index 47744fe21aac..bb240784ac4d 100644 --- a/test-requirements.txt +++ b/requirements-testing.txt @@ -1,2 +1,3 @@ flake8 typed-ast +pytest diff --git a/runtests.py b/runtests.py index 82b42c3f58a4..16ae6a8ddf28 100755 --- a/runtests.py +++ b/runtests.py @@ -39,11 +39,12 @@ def get_versions(): # type: () -> typing.List[str] class Driver: def __init__(self, whitelist: List[str], blacklist: List[str], - arglist: List[str], verbosity: int, parallel_limit: int, - xfail: List[str]) -> None: + arglist: List[str], testarglist: List[str], verbosity: int, + parallel_limit: int, xfail: List[str]) -> None: self.whitelist = whitelist self.blacklist = blacklist self.arglist = arglist + self.testarglist = testarglist self.verbosity = verbosity self.waiter = Waiter(verbosity=verbosity, limit=parallel_limit, xfail=xfail) self.versions = get_versions() @@ -189,6 +190,10 @@ def add_imports(driver: Driver) -> None: driver.add_flake8('module %s' % mod, f) +def add_pytest(driver: Driver) -> None: + driver.add_python_mod('pytest', 'pytest', *driver.testarglist) + + def add_myunit(driver: Driver) -> None: for f in find_files('mypy', prefix='test', suffix='.py'): mod = file_to_module(f) @@ -196,12 +201,12 @@ def add_myunit(driver: Driver) -> None: # myunit is Python3 only. driver.add_python_mod('unittest %s' % mod, 'unittest', mod) driver.add_python2('unittest %s' % mod, '-m', 'unittest', mod) - elif mod in ('mypy.test.testpythoneval', 'mypy.test.testcmdline'): - # Run Python evaluation integration tests and command-line - # parsing tests separately since they are much slower than - # proper unit tests. + elif mod == 'mypy.test.testpythoneval': + # Run Python evaluation integration tests separately since they are # + # much slower than proper unit tests. pass - else: + elif 'test_' not in mod: + # Modules containing `test_` have been ported to pytest. driver.add_python_mod('unit-test %s' % mod, 'mypy.myunit', '-m', mod, *driver.arglist) @@ -210,11 +215,6 @@ def add_pythoneval(driver: Driver) -> None: '-m', 'mypy.test.testpythoneval', *driver.arglist) -def add_cmdline(driver: Driver) -> None: - driver.add_python_mod('cmdline-test', 'mypy.myunit', - '-m', 'mypy.test.testcmdline', *driver.arglist) - - def add_stubs(driver: Driver) -> None: # We only test each module in the one version mypy prefers to find. # TODO: test stubs for other versions, especially Python 2 stubs. @@ -265,7 +265,8 @@ def usage(status: int) -> None: print('Run mypy tests. If given no arguments, run all tests.') print() print('Examples:') - print(' %s unit-test (run unit tests only)' % sys.argv[0]) + print(' %s pytest (run pytest only)') + print(' %s unit-test (run unit tests and pytest only)' % sys.argv[0]) print(' %s unit-test -a "*tuple*"' % sys.argv[0]) print(' (run all unit tests with "tuple" in test name)') print() @@ -276,6 +277,7 @@ def usage(status: int) -> None: print(' -jN run N tasks at once (default: one per CPU)') print(' -a, --argument ARG pass an argument to myunit tasks') print(' (-v: verbose; glob pattern: filter by test name)') + print(' -t, --test-arg ARG pass an argument to pytest') print(' -l, --list list included tasks (after filtering) and exit') print(' FILTER include tasks matching FILTER') print(' -x, --exclude FILTER exclude tasks matching FILTER') @@ -306,13 +308,15 @@ def main() -> None: whitelist = [] # type: List[str] blacklist = [] # type: List[str] arglist = [] # type: List[str] + testarglist = [] # type: List[str] list_only = False dirty_stubs = False allow_opts = True curlist = whitelist for a in sys.argv[1:]: - if curlist is not arglist and allow_opts and a.startswith('-'): + if (curlist is not arglist and curlist is not testarglist and + allow_opts and a.startswith('-')): if curlist is not whitelist: break if a == '--': @@ -330,6 +334,8 @@ def main() -> None: curlist = blacklist elif a == '-a' or a == '--argument': curlist = arglist + elif a == '-t' or a == '--test-args': + curlist = testarglist elif a == '-l' or a == '--list': list_only = True elif a == '-f' or a == '--dirty-stubs': @@ -349,8 +355,12 @@ def main() -> None: if not whitelist: whitelist.append('') + if 'unit-test' in whitelist and 'pytest' not in whitelist: + whitelist.append('pytest') + driver = Driver(whitelist=whitelist, blacklist=blacklist, arglist=arglist, - verbosity=verbosity, parallel_limit=parallel_limit, xfail=[]) + testarglist=testarglist, verbosity=verbosity, + parallel_limit=parallel_limit, xfail=[]) if not dirty_stubs: git.verify_git_integrity_or_abort(driver.cwd) @@ -361,10 +371,10 @@ def main() -> None: driver.prepend_path('PYTHONPATH', [join(driver.cwd, 'lib-typing', v) for v in driver.versions]) add_pythoneval(driver) - add_cmdline(driver) add_basic(driver) add_selftypecheck(driver) add_myunit(driver) + add_pytest(driver) add_imports(driver) add_stubs(driver) add_stdlibsamples(driver)