Skip to content

Commit 3e1baae

Browse files
authored
Optimize fine-grained update by using Graph as the cache (#4622)
Make fine-grained update use a Graph as the source of truth instead of a SavedCache. We modify load_graph to optionally use an existing graph as a starting point. This allows us to skip the expensive full load_graph and preserve_full_cache operations that essentially only convert between Graph and SavedCache. This requires a nontrivial change to the loading of dependency information in build. Since we are no longer doing a full `load_graph()`, we can't rely on it fully populating `missing_modules` if it is cleared between updates. If `missing_modules` isn't fully populated, then semanal will spuriously reject `from x import y` where `x.y` is a missing module. We can't just *not* clear `missing_modules`, however, because `parse_file` uses it to decide that modules are suppressed. But this is actually wrong! It can lead to an import failure message not being generated because some other module has already failed to import it (and perhaps even ignored the error). `parse_file()` shouldn't actually *need* to compute `suppressed`, though. If it is called on a file with no cache, `load_graph()` will handle moving deps to `suppressed`, and if called on a file that has had cache info loaded, then the dependency information has all been loaded from the cache. So we refactor things so that dep information from the cache is used when available and `parse_file` doesn't need to deal with it. I strongly suspect that this refactor obviates the need for `fix_suppressed_dependencies`, but I was not able to quickly produce a test case from the description in #2036, so I am not comfortable making that change.
1 parent 574744e commit 3e1baae

11 files changed

+335
-228
lines changed

mypy/build.py

Lines changed: 40 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,6 @@ def default_lib_path(data_dir: str,
389389
CacheMeta = NamedTuple('CacheMeta',
390390
[('id', str),
391391
('path', str),
392-
('memory_only', bool), # no corresponding json files (fine-grained only)
393392
('mtime', int),
394393
('size', int),
395394
('hash', str),
@@ -415,7 +414,6 @@ def cache_meta_from_dict(meta: Dict[str, Any], data_json: str) -> CacheMeta:
415414
return CacheMeta(
416415
meta.get('id', sentinel),
417416
meta.get('path', sentinel),
418-
meta.get('memory_only', False),
419417
int(meta['mtime']) if 'mtime' in meta else sentinel,
420418
meta.get('size', sentinel),
421419
meta.get('hash', sentinel),
@@ -569,7 +567,7 @@ class BuildManager:
569567
plugin: Active mypy plugin(s)
570568
errors: Used for reporting all errors
571569
flush_errors: A function for processing errors after each SCC
572-
saved_cache: Dict with saved cache state for dmypy and fine-grained incremental mode
570+
saved_cache: Dict with saved cache state for coarse-grained dmypy
573571
(read-write!)
574572
stats: Dict with various instrumentation numbers
575573
"""
@@ -626,6 +624,8 @@ def all_imported_modules_in_file(self,
626624
627625
Return list of tuples (priority, module id, import line number)
628626
for all modules imported in file; lower numbers == higher priority.
627+
628+
Can generate blocking errors on bogus relative imports.
629629
"""
630630

631631
def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str:
@@ -640,6 +640,12 @@ def correct_rel_imp(imp: Union[ImportFrom, ImportAll]) -> str:
640640
file_id = ".".join(file_id.split(".")[:-rel])
641641
new_id = file_id + "." + imp.id if imp.id else file_id
642642

643+
if not new_id:
644+
self.errors.set_file(file.path, file.name())
645+
self.errors.report(imp.line, 0,
646+
"No parent module -- cannot perform relative import",
647+
blocker=True)
648+
643649
return new_id
644650

645651
res = [] # type: List[Tuple[int, str, int]]
@@ -1129,12 +1135,6 @@ def validate_meta(meta: Optional[CacheMeta], id: str, path: Optional[str],
11291135
manager.log('Metadata abandoned for {}: errors were previously ignored'.format(id))
11301136
return None
11311137

1132-
if meta.memory_only:
1133-
# Special case for fine-grained incremental mode when the JSON file is missing but
1134-
# we want to cache the module anyway.
1135-
manager.log('Memory-only metadata for {}'.format(id))
1136-
return meta
1137-
11381138
assert path is not None, "Internal error: meta was provided without a path"
11391139
# Check data_json; assume if its mtime matches it's good.
11401140
# TODO: stat() errors
@@ -1623,7 +1623,8 @@ def __init__(self,
16231623
self.ignore_all = True
16241624
else:
16251625
# In 'error' mode, produce special error messages.
1626-
manager.log("Skipping %s (%s)" % (path, id))
1626+
if id not in manager.missing_modules:
1627+
manager.log("Skipping %s (%s)" % (path, id))
16271628
if follow_imports == 'error':
16281629
if ancestor_for:
16291630
self.skipping_ancestor(id, path, ancestor_for)
@@ -1675,7 +1676,7 @@ def __init__(self,
16751676
else:
16761677
# Parse the file (and then some) to get the dependencies.
16771678
self.parse_file()
1678-
self.suppressed = []
1679+
self.compute_dependencies()
16791680
self.child_modules = set()
16801681

16811682
def skipping_ancestor(self, id: str, path: str, ancestor_for: 'State') -> None:
@@ -1830,6 +1831,8 @@ def fix_suppressed_dependencies(self, graph: Graph) -> None:
18301831
"""
18311832
# TODO: See if it's possible to move this check directly into parse_file in some way.
18321833
# TODO: Find a way to write a test case for this fix.
1834+
# TODO: I suspect that splitting compute_dependencies() out from parse_file
1835+
# obviates the need for this but lacking a test case for the problem this fixed...
18331836
silent_mode = (self.options.ignore_missing_imports or
18341837
self.options.follow_imports == 'skip')
18351838
if not silent_mode:
@@ -1896,49 +1899,48 @@ def parse_file(self) -> None:
18961899
# TODO: Why can't SemanticAnalyzerPass1 .analyze() do this?
18971900
self.tree.names = manager.semantic_analyzer.globals
18981901

1902+
self.check_blockers()
1903+
1904+
def compute_dependencies(self) -> None:
1905+
"""Compute a module's dependencies after parsing it.
1906+
1907+
This is used when we parse a file that we didn't have
1908+
up-to-date cache information for. When we have an up-to-date
1909+
cache, we just use the cached info.
1910+
"""
1911+
manager = self.manager
1912+
assert self.tree is not None
1913+
18991914
# Compute (direct) dependencies.
19001915
# Add all direct imports (this is why we needed the first pass).
19011916
# Also keep track of each dependency's source line.
19021917
dependencies = []
1903-
suppressed = []
19041918
priorities = {} # type: Dict[str, int] # id -> priority
19051919
dep_line_map = {} # type: Dict[str, int] # id -> line
19061920
for pri, id, line in manager.all_imported_modules_in_file(self.tree):
19071921
priorities[id] = min(pri, priorities.get(id, PRI_ALL))
19081922
if id == self.id:
19091923
continue
1910-
# Omit missing modules, as otherwise we could not type-check
1911-
# programs with missing modules.
1912-
if id in manager.missing_modules:
1913-
if id not in dep_line_map:
1914-
suppressed.append(id)
1915-
dep_line_map[id] = line
1916-
continue
1917-
if id == '':
1918-
# Must be from a relative import.
1919-
manager.errors.set_file(self.xpath, self.id)
1920-
manager.errors.report(line, 0,
1921-
"No parent module -- cannot perform relative import",
1922-
blocker=True)
1923-
continue
19241924
if id not in dep_line_map:
19251925
dependencies.append(id)
19261926
dep_line_map[id] = line
19271927
# Every module implicitly depends on builtins.
19281928
if self.id != 'builtins' and 'builtins' not in dep_line_map:
19291929
dependencies.append('builtins')
19301930

1931-
# If self.dependencies is already set, it was read from the
1932-
# cache, but for some reason we're re-parsing the file.
19331931
# NOTE: What to do about race conditions (like editing the
19341932
# file while mypy runs)? A previous version of this code
19351933
# explicitly checked for this, but ran afoul of other reasons
19361934
# for differences (e.g. silent mode).
1935+
1936+
# Missing dependencies will be moved from dependencies to
1937+
# suppressed when they fail to be loaded in load_graph.
19371938
self.dependencies = dependencies
1938-
self.suppressed = suppressed
1939+
self.suppressed = []
19391940
self.priorities = priorities
19401941
self.dep_line_map = dep_line_map
1941-
self.check_blockers()
1942+
1943+
self.check_blockers() # Can fail due to bogus relative imports
19421944

19431945
def semantic_analysis(self) -> None:
19441946
assert self.tree is not None, "Internal error: method must be called on parsed file only"
@@ -2195,13 +2197,19 @@ def dump_graph(graph: Graph) -> None:
21952197
print("[" + ",\n ".join(node.dumps() for node in nodes) + "\n]")
21962198

21972199

2198-
def load_graph(sources: List[BuildSource], manager: BuildManager) -> Graph:
2200+
def load_graph(sources: List[BuildSource], manager: BuildManager,
2201+
old_graph: Optional[Graph] = None) -> Graph:
21992202
"""Given some source files, load the full dependency graph.
22002203
2204+
If an old_graph is passed in, it is used as the starting point and
2205+
modified during graph loading.
2206+
22012207
As this may need to parse files, this can raise CompileError in case
22022208
there are syntax errors.
22032209
"""
2204-
graph = {} # type: Graph
2210+
2211+
graph = old_graph or {} # type: Graph
2212+
22052213
# The deque is used to implement breadth-first traversal.
22062214
# TODO: Consider whether to go depth-first instead. This may
22072215
# affect the order in which we process files within import cycles.

mypy/checker.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,24 @@ def __init__(self, errors: Errors, modules: Dict[str, MypyFile], options: Option
211211
# for processing module top levels in fine-grained incremental mode.
212212
self.recurse_into_functions = True
213213

214+
def reset(self) -> None:
215+
"""Cleanup stale state that might be left over from a typechecking run.
216+
217+
This allows us to reuse TypeChecker objects in fine-grained
218+
incremental mode.
219+
"""
220+
# TODO: verify this is still actually worth it over creating new checkers
221+
self.partial_reported.clear()
222+
self.module_refs.clear()
223+
self.binder = ConditionalTypeBinder()
224+
self.type_map.clear()
225+
226+
assert self.inferred_attribute_types is None
227+
assert self.partial_types == []
228+
assert self.deferred_nodes == []
229+
assert len(self.scope.stack) == 1
230+
assert self.partial_types == []
231+
214232
def check_first_pass(self) -> None:
215233
"""Type check the entire file, but defer functions with unresolved references.
216234

mypy/dmypy_server.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -282,10 +282,12 @@ def initialize_fine_grained(self, sources: List[mypy.build.BuildSource]) -> Dict
282282
if self.options.use_fine_grained_cache:
283283
# Pull times and hashes out of the saved_cache and stick them into
284284
# the fswatcher, so we pick up the changes.
285-
for meta, mypyfile, type_map in manager.saved_cache.values():
286-
if meta.mtime is None: continue
285+
for state in self.fine_grained_manager.graph.values():
286+
meta = state.meta
287+
if meta is None: continue
288+
assert state.path is not None
287289
self.fswatcher.set_file_data(
288-
mypyfile.path,
290+
state.path,
289291
FileData(st_mtime=float(meta.mtime), st_size=meta.size, md5=meta.hash))
290292

291293
# Run an update

0 commit comments

Comments
 (0)