Skip to content

Commit cf9e0d6

Browse files
committed
factor out into separate file
1 parent e76f087 commit cf9e0d6

File tree

3 files changed

+88
-85
lines changed

3 files changed

+88
-85
lines changed

mypy/test/testcheck.py

+3-85
Original file line numberDiff line numberDiff line change
@@ -5,24 +5,14 @@
55
import os
66
import re
77
import sys
8-
from collections import defaultdict
9-
from typing import Iterator
108

119
from mypy import build
1210
from mypy.build import Graph
1311
from mypy.errors import CompileError
1412
from mypy.modulefinder import BuildSource, FindModuleCache, SearchPaths
1513
from mypy.options import TYPE_VAR_TUPLE, UNPACK
1614
from mypy.test.config import test_data_prefix, test_temp_dir
17-
from mypy.test.data import (
18-
DataDrivenTestCase,
19-
DataFileCollector,
20-
DataFileFix,
21-
DataSuite,
22-
FileOperation,
23-
module_from_path,
24-
parse_test_data,
25-
)
15+
from mypy.test.data import DataDrivenTestCase, DataSuite, FileOperation, module_from_path
2616
from mypy.test.helpers import (
2717
assert_module_equivalence,
2818
assert_string_arrays_equal,
@@ -33,6 +23,7 @@
3323
parse_options,
3424
perform_file_operations,
3525
)
26+
from mypy.test.update_data import update_testcase_output
3627

3728
try:
3829
import lxml # type: ignore[import]
@@ -201,12 +192,7 @@ def run_case_once(
201192
output = testcase.output2.get(incremental_step, [])
202193

203194
if output != a and testcase.config.getoption("--update-data", False):
204-
collector = testcase.parent
205-
assert isinstance(collector, DataFileCollector)
206-
for fix in self.iter_data_file_fixes(
207-
testcase, actual=a, incremental_step=incremental_step
208-
):
209-
collector.enqueue_fix(fix)
195+
update_testcase_output(testcase, a, incremental_step=incremental_step)
210196

211197
assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line))
212198

@@ -241,74 +227,6 @@ def run_case_once(
241227
if testcase.output_files:
242228
check_test_output_files(testcase, incremental_step, strip_prefix="tmp/")
243229

244-
def iter_data_file_fixes(
245-
self, testcase: DataDrivenTestCase, *, actual: list[str], incremental_step: int
246-
) -> Iterator[DataFileFix]:
247-
reports_by_line: dict[tuple[str, int], list[tuple[str, str]]] = defaultdict(list)
248-
for error_line in actual:
249-
comment_match = re.match(
250-
r"^(?P<filename>[^:]+):(?P<lineno>\d+): (?P<severity>error|note|warning): (?P<msg>.+)$",
251-
error_line,
252-
)
253-
if comment_match:
254-
filename = comment_match.group("filename")
255-
lineno = int(comment_match.group("lineno"))
256-
severity = comment_match.group("severity")
257-
msg = comment_match.group("msg")
258-
reports_by_line[filename, lineno].append((severity, msg))
259-
260-
test_items = parse_test_data(testcase.data, testcase.name)
261-
262-
# If we have [out] and/or [outN], we update just those sections.
263-
if any(re.match(r"^out\d*$", test_item.id) for test_item in test_items):
264-
for test_item in test_items:
265-
if (incremental_step < 2 and test_item.id == "out") or (
266-
incremental_step >= 2 and test_item.id == f"out{incremental_step}"
267-
):
268-
yield DataFileFix(
269-
lineno=testcase.line + test_item.line - 1,
270-
end_lineno=testcase.line + test_item.end_line - 1,
271-
lines=actual + [""] * test_item.trimmed_newlines,
272-
)
273-
274-
return
275-
276-
# Update assertion comments within the sections
277-
for test_item in test_items:
278-
if test_item.id == "case":
279-
source_lines = test_item.data
280-
file_path = "main"
281-
elif test_item.id == "file":
282-
source_lines = test_item.data
283-
file_path = f"tmp/{test_item.arg}"
284-
else:
285-
continue # other sections we don't touch
286-
287-
fix_lines = []
288-
for lineno, source_line in enumerate(source_lines, start=1):
289-
reports = reports_by_line.get((file_path, lineno))
290-
comment_match = re.search(r"(?P<indent>\s+)(?P<comment># [EWN]: .+)$", source_line)
291-
if comment_match:
292-
source_line = source_line[: comment_match.start("indent")] # strip old comment
293-
if reports:
294-
indent = comment_match.group("indent") if comment_match else " "
295-
# multiline comments are on the first line and then on subsequent lines emtpy lines
296-
# with a continuation backslash
297-
for j, (severity, msg) in enumerate(reports):
298-
out_l = source_line if j == 0 else " " * len(source_line)
299-
is_last = j == len(reports) - 1
300-
severity_char = severity[0].upper()
301-
continuation = "" if is_last else " \\"
302-
fix_lines.append(f"{out_l}{indent}# {severity_char}: {msg}{continuation}")
303-
else:
304-
fix_lines.append(source_line)
305-
306-
yield DataFileFix(
307-
lineno=testcase.line + test_item.line - 1,
308-
end_lineno=testcase.line + test_item.end_line - 1,
309-
lines=fix_lines + [""] * test_item.trimmed_newlines,
310-
)
311-
312230
def verify_cache(
313231
self,
314232
module_data: list[tuple[str, str, str]],

mypy/test/update.py

Whitespace-only changes.

mypy/test/update_data.py

+85
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
from __future__ import annotations
2+
3+
import re
4+
from collections import defaultdict
5+
from typing import Iterator
6+
7+
from mypy.test.data import DataDrivenTestCase, DataFileCollector, DataFileFix, parse_test_data
8+
9+
10+
def update_testcase_output(
11+
testcase: DataDrivenTestCase, actual: list[str], *, incremental_step: int
12+
) -> None:
13+
collector = testcase.parent
14+
assert isinstance(collector, DataFileCollector)
15+
for fix in _iter_fixes(testcase, actual, incremental_step=incremental_step):
16+
collector.enqueue_fix(fix)
17+
18+
19+
def _iter_fixes(
20+
testcase: DataDrivenTestCase, actual: list[str], *, incremental_step: int
21+
) -> Iterator[DataFileFix]:
22+
reports_by_line: dict[tuple[str, int], list[tuple[str, str]]] = defaultdict(list)
23+
for error_line in actual:
24+
comment_match = re.match(
25+
r"^(?P<filename>[^:]+):(?P<lineno>\d+): (?P<severity>error|note|warning): (?P<msg>.+)$",
26+
error_line,
27+
)
28+
if comment_match:
29+
filename = comment_match.group("filename")
30+
lineno = int(comment_match.group("lineno"))
31+
severity = comment_match.group("severity")
32+
msg = comment_match.group("msg")
33+
reports_by_line[filename, lineno].append((severity, msg))
34+
35+
test_items = parse_test_data(testcase.data, testcase.name)
36+
37+
# If we have [out] and/or [outN], we update just those sections.
38+
if any(re.match(r"^out\d*$", test_item.id) for test_item in test_items):
39+
for test_item in test_items:
40+
if (incremental_step < 2 and test_item.id == "out") or (
41+
incremental_step >= 2 and test_item.id == f"out{incremental_step}"
42+
):
43+
yield DataFileFix(
44+
lineno=testcase.line + test_item.line - 1,
45+
end_lineno=testcase.line + test_item.end_line - 1,
46+
lines=actual + [""] * test_item.trimmed_newlines,
47+
)
48+
49+
return
50+
51+
# Update assertion comments within the sections
52+
for test_item in test_items:
53+
if test_item.id == "case":
54+
source_lines = test_item.data
55+
file_path = "main"
56+
elif test_item.id == "file":
57+
source_lines = test_item.data
58+
file_path = f"tmp/{test_item.arg}"
59+
else:
60+
continue # other sections we don't touch
61+
62+
fix_lines = []
63+
for lineno, source_line in enumerate(source_lines, start=1):
64+
reports = reports_by_line.get((file_path, lineno))
65+
comment_match = re.search(r"(?P<indent>\s+)(?P<comment># [EWN]: .+)$", source_line)
66+
if comment_match:
67+
source_line = source_line[: comment_match.start("indent")] # strip old comment
68+
if reports:
69+
indent = comment_match.group("indent") if comment_match else " "
70+
# multiline comments are on the first line and then on subsequent lines emtpy lines
71+
# with a continuation backslash
72+
for j, (severity, msg) in enumerate(reports):
73+
out_l = source_line if j == 0 else " " * len(source_line)
74+
is_last = j == len(reports) - 1
75+
severity_char = severity[0].upper()
76+
continuation = "" if is_last else " \\"
77+
fix_lines.append(f"{out_l}{indent}# {severity_char}: {msg}{continuation}")
78+
else:
79+
fix_lines.append(source_line)
80+
81+
yield DataFileFix(
82+
lineno=testcase.line + test_item.line - 1,
83+
end_lineno=testcase.line + test_item.end_line - 1,
84+
lines=fix_lines + [""] * test_item.trimmed_newlines,
85+
)

0 commit comments

Comments
 (0)