Skip to content

Run cmdline tests on AppVeyor #2815

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Feb 6, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,5 @@ build: off

test_script:
# Ignore lint (it's run separately below)
# and cmdline (since one of its tests depend on lxml)
- "%PYTHON%\\python.exe runtests.py -x lint -x cmdline"
- "%PYTHON%\\python.exe runtests.py -x lint"
- ps: if ($env:PYTHON_VERSION -Match "3.6.x" -And $env:PYTHON_ARCH -Match "64") { iex "$env:PYTHON\\python.exe -m flake8" }
17 changes: 16 additions & 1 deletion mypy/test/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def parse_test_cases(
tc = DataDrivenTestCase(p[i0].arg, input, tcout, tcout2, path,
p[i0].line, lastline, perform,
files, output_files, stale_modules,
rechecked_modules)
rechecked_modules, native_sep)
out.append(tc)
if not ok:
raise ValueError(
Expand Down Expand Up @@ -158,6 +158,7 @@ def __init__(self,
output_files: List[Tuple[str, str]],
expected_stale_modules: Optional[Set[str]],
expected_rechecked_modules: Optional[Set[str]],
native_sep: bool = False,
) -> None:
super().__init__(name)
self.input = input
Expand All @@ -171,6 +172,7 @@ def __init__(self,
self.output_files = output_files
self.expected_stale_modules = expected_stale_modules
self.expected_rechecked_modules = expected_rechecked_modules
self.native_sep = native_sep

def set_up(self) -> None:
super().set_up()
Expand Down Expand Up @@ -409,6 +411,19 @@ def fix_win_path(line: str) -> str:
lineno or '', message)


def fix_cobertura_filename(line: str) -> str:
r"""Changes filename paths to Linux paths in Cobertura output files.

E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
"""
m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
if not m:
return line
return '{}{}{}'.format(line[:m.start(1)],
m.group('filename').replace('\\', '/'),
line[m.end(1):])


##
#
# pytest setup
Expand Down
7 changes: 5 additions & 2 deletions mypy/test/testcmdline.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from mypy.myunit import Suite, SkipTestCaseException, AssertionFailure
from mypy.test.config import test_data_prefix, test_temp_dir
from mypy.test.data import fix_cobertura_filename
from mypy.test.data import parse_test_cases, DataDrivenTestCase
from mypy.test.helpers import assert_string_arrays_equal
from mypy.version import __version__, base_version
Expand Down Expand Up @@ -66,9 +67,11 @@ def test_python_evaluation(testcase: DataDrivenTestCase) -> None:
'Expected file {} was not produced by test case'.format(path))
with open(path, 'r') as output_file:
actual_output_content = output_file.read().splitlines()
noramlized_output = normalize_file_output(actual_output_content,
normalized_output = normalize_file_output(actual_output_content,
os.path.abspath(test_temp_dir))
assert_string_arrays_equal(expected_content.splitlines(), noramlized_output,
if testcase.native_sep and os.path.sep == '\\':
normalized_output = [fix_cobertura_filename(line) for line in normalized_output]
assert_string_arrays_equal(expected_content.splitlines(), normalized_output,
'Output file {} did not match its expected output'.format(
path))
else:
Expand Down