13
13
import dill
14
14
15
15
from pylint import reporters
16
- from pylint .lint .utils import _patch_sys_path
16
+ from pylint .config .find_default_config_files import find_default_config_files
17
+ from pylint .lint .utils import _patch_sys_path , extract_results_from_linter , insert_results_to_linter , _merge_mapreduce_data
17
18
from pylint .message import Message
18
19
from pylint .typing import FileItem
19
20
from pylint .utils import LinterStats , merge_stats
28
29
29
30
# PyLinter object used by worker processes when checking files using multiprocessing
30
31
# should only be used by the worker processes
31
- _worker_linter : PyLinter | None = None
32
+ _worker_linters : PyLinter | None = None
32
33
33
34
34
35
def _worker_initialize (
35
- linter : bytes , arguments : None | str | Sequence [str ] = None
36
+ linters : bytes , arguments : None | str | Sequence [str ] = None
36
37
) -> None :
37
38
"""Function called to initialize a worker for a Process within a multiprocessing Pool.
38
39
39
40
:param linter: A linter-class (PyLinter) instance pickled with dill
40
41
:param arguments: File or module name(s) to lint and to be added to sys.path
41
42
"""
42
- global _worker_linter # pylint: disable=global-statement
43
- _worker_linter = dill .loads (linter )
44
- assert _worker_linter
43
+ global _worker_linters # pylint: disable=global-statement
44
+ _worker_linters = dill .loads (linters )
45
+ assert _worker_linters
45
46
46
47
# On the worker process side the messages are just collected and passed back to
47
48
# parent process as _worker_check_file function's return value
48
- _worker_linter .set_reporter (reporters .CollectingReporter ())
49
- _worker_linter .open ()
49
+ for _worker_linter in _worker_linters .values ():
50
+ _worker_linter .set_reporter (reporters .CollectingReporter ())
51
+ _worker_linter .open ()
50
52
51
53
# Patch sys.path so that each argument is importable just like in single job mode
52
54
_patch_sys_path (arguments or ())
@@ -65,66 +67,39 @@ def _worker_check_single_file(
65
67
int ,
66
68
defaultdict [str , list [Any ]],
67
69
]:
68
- if not _worker_linter :
70
+ rcfiles = file_item [0 ]
71
+ file_item = file_item [1 ]
72
+
73
+ if not _worker_linters [rcfiles ]:
69
74
raise Exception ("Worker linter not yet initialised" )
70
- _worker_linter .open ()
71
- _worker_linter .check_single_file_item (file_item )
72
- mapreduce_data = defaultdict (list )
73
- for checker in _worker_linter .get_checkers ():
74
- data = checker .get_map_data ()
75
- if data is not None :
76
- mapreduce_data [checker .name ].append (data )
77
- msgs = _worker_linter .reporter .messages
78
- assert isinstance (_worker_linter .reporter , reporters .CollectingReporter )
79
- _worker_linter .reporter .reset ()
80
- if _worker_linter .current_name is None :
81
- warnings .warn (
82
- (
83
- "In pylint 3.0 the current_name attribute of the linter object should be a string. "
84
- "If unknown it should be initialized as an empty string."
85
- ),
86
- DeprecationWarning ,
87
- )
75
+ _worker_linters [rcfiles ].open ()
76
+ _worker_linters [rcfiles ].check_single_file_item (file_item )
77
+ (
78
+ linter_current_name ,
79
+ _ ,
80
+ base_name ,
81
+ msgs ,
82
+ linter_stats ,
83
+ linter_msg_status ,
84
+ mapreduce_data ,
85
+ ) = extract_results_from_linter (_worker_linters [rcfiles ])
88
86
return (
89
87
id (multiprocessing .current_process ()),
90
- _worker_linter . current_name ,
88
+ linter_current_name ,
91
89
file_item .filepath ,
92
- _worker_linter . file_state . base_name ,
90
+ base_name ,
93
91
msgs ,
94
- _worker_linter . stats ,
95
- _worker_linter . msg_status ,
92
+ linter_stats ,
93
+ linter_msg_status ,
96
94
mapreduce_data ,
97
95
)
98
96
99
97
100
- def _merge_mapreduce_data (
101
- linter : PyLinter ,
102
- all_mapreduce_data : defaultdict [int , list [defaultdict [str , list [Any ]]]],
103
- ) -> None :
104
- """Merges map/reduce data across workers, invoking relevant APIs on checkers."""
105
- # First collate the data and prepare it, so we can send it to the checkers for
106
- # validation. The intent here is to collect all the mapreduce data for all checker-
107
- # runs across processes - that will then be passed to a static method on the
108
- # checkers to be reduced and further processed.
109
- collated_map_reduce_data : defaultdict [str , list [Any ]] = defaultdict (list )
110
- for linter_data in all_mapreduce_data .values ():
111
- for run_data in linter_data :
112
- for checker_name , data in run_data .items ():
113
- collated_map_reduce_data [checker_name ].extend (data )
114
-
115
- # Send the data to checkers that support/require consolidated data
116
- original_checkers = linter .get_checkers ()
117
- for checker in original_checkers :
118
- if checker .name in collated_map_reduce_data :
119
- # Assume that if the check has returned map/reduce data that it has the
120
- # reducer function
121
- checker .reduce_map_data (linter , collated_map_reduce_data [checker .name ])
122
-
123
-
124
98
def check_parallel (
125
99
linter : PyLinter ,
100
+ linters ,
126
101
jobs : int ,
127
- files : Iterable [ FileItem ],
102
+ files , #[(conf, FileItem) ],
128
103
arguments : None | str | Sequence [str ] = None ,
129
104
) -> None :
130
105
"""Use the given linter to lint the files with given amount of workers (jobs).
@@ -137,7 +112,7 @@ def check_parallel(
137
112
# a custom PyLinter object can be used.
138
113
initializer = functools .partial (_worker_initialize , arguments = arguments )
139
114
with multiprocessing .Pool (
140
- jobs , initializer = initializer , initargs = [dill .dumps (linter )]
115
+ jobs , initializer = initializer , initargs = [dill .dumps (linters )]
141
116
) as pool :
142
117
linter .open ()
143
118
all_stats = []
@@ -158,13 +133,11 @@ def check_parallel(
158
133
msg_status ,
159
134
mapreduce_data ,
160
135
) in pool .imap_unordered (_worker_check_single_file , files ):
161
- linter .file_state .base_name = base_name
162
- linter .set_current_module (module , file_path )
163
- for msg in messages :
164
- linter .reporter .handle_message (msg )
136
+ insert_results_to_linter (
137
+ linter , module , file_path , base_name , messages , msg_status
138
+ )
165
139
all_stats .append (stats )
166
140
all_mapreduce_data [worker_idx ].append (mapreduce_data )
167
- linter .msg_status |= msg_status
168
141
169
142
pool .close ()
170
143
pool .join ()
0 commit comments