1 | n/a | # Copyright 2006 Google, Inc. All Rights Reserved. |
---|
2 | n/a | # Licensed to PSF under a Contributor Agreement. |
---|
3 | n/a | |
---|
4 | n/a | """Refactoring framework. |
---|
5 | n/a | |
---|
6 | n/a | Used as a main program, this can refactor any number of files and/or |
---|
7 | n/a | recursively descend down directories. Imported as a module, this |
---|
8 | n/a | provides infrastructure to write your own refactoring tool. |
---|
9 | n/a | """ |
---|
10 | n/a | |
---|
11 | n/a | __author__ = "Guido van Rossum <guido@python.org>" |
---|
12 | n/a | |
---|
13 | n/a | |
---|
14 | n/a | # Python imports |
---|
15 | n/a | import os |
---|
16 | n/a | import sys |
---|
17 | n/a | import logging |
---|
18 | n/a | import operator |
---|
19 | n/a | import collections |
---|
20 | n/a | import io |
---|
21 | n/a | from itertools import chain |
---|
22 | n/a | |
---|
23 | n/a | # Local imports |
---|
24 | n/a | from .pgen2 import driver, tokenize, token |
---|
25 | n/a | from .fixer_util import find_root |
---|
26 | n/a | from . import pytree, pygram |
---|
27 | n/a | from . import btm_matcher as bm |
---|
28 | n/a | |
---|
29 | n/a | |
---|
30 | n/a | def get_all_fix_names(fixer_pkg, remove_prefix=True): |
---|
31 | n/a | """Return a sorted list of all available fix names in the given package.""" |
---|
32 | n/a | pkg = __import__(fixer_pkg, [], [], ["*"]) |
---|
33 | n/a | fixer_dir = os.path.dirname(pkg.__file__) |
---|
34 | n/a | fix_names = [] |
---|
35 | n/a | for name in sorted(os.listdir(fixer_dir)): |
---|
36 | n/a | if name.startswith("fix_") and name.endswith(".py"): |
---|
37 | n/a | if remove_prefix: |
---|
38 | n/a | name = name[4:] |
---|
39 | n/a | fix_names.append(name[:-3]) |
---|
40 | n/a | return fix_names |
---|
41 | n/a | |
---|
42 | n/a | |
---|
43 | n/a | class _EveryNode(Exception): |
---|
44 | n/a | pass |
---|
45 | n/a | |
---|
46 | n/a | |
---|
47 | n/a | def _get_head_types(pat): |
---|
48 | n/a | """ Accepts a pytree Pattern Node and returns a set |
---|
49 | n/a | of the pattern types which will match first. """ |
---|
50 | n/a | |
---|
51 | n/a | if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): |
---|
52 | n/a | # NodePatters must either have no type and no content |
---|
53 | n/a | # or a type and content -- so they don't get any farther |
---|
54 | n/a | # Always return leafs |
---|
55 | n/a | if pat.type is None: |
---|
56 | n/a | raise _EveryNode |
---|
57 | n/a | return {pat.type} |
---|
58 | n/a | |
---|
59 | n/a | if isinstance(pat, pytree.NegatedPattern): |
---|
60 | n/a | if pat.content: |
---|
61 | n/a | return _get_head_types(pat.content) |
---|
62 | n/a | raise _EveryNode # Negated Patterns don't have a type |
---|
63 | n/a | |
---|
64 | n/a | if isinstance(pat, pytree.WildcardPattern): |
---|
65 | n/a | # Recurse on each node in content |
---|
66 | n/a | r = set() |
---|
67 | n/a | for p in pat.content: |
---|
68 | n/a | for x in p: |
---|
69 | n/a | r.update(_get_head_types(x)) |
---|
70 | n/a | return r |
---|
71 | n/a | |
---|
72 | n/a | raise Exception("Oh no! I don't understand pattern %s" %(pat)) |
---|
73 | n/a | |
---|
74 | n/a | |
---|
75 | n/a | def _get_headnode_dict(fixer_list): |
---|
76 | n/a | """ Accepts a list of fixers and returns a dictionary |
---|
77 | n/a | of head node type --> fixer list. """ |
---|
78 | n/a | head_nodes = collections.defaultdict(list) |
---|
79 | n/a | every = [] |
---|
80 | n/a | for fixer in fixer_list: |
---|
81 | n/a | if fixer.pattern: |
---|
82 | n/a | try: |
---|
83 | n/a | heads = _get_head_types(fixer.pattern) |
---|
84 | n/a | except _EveryNode: |
---|
85 | n/a | every.append(fixer) |
---|
86 | n/a | else: |
---|
87 | n/a | for node_type in heads: |
---|
88 | n/a | head_nodes[node_type].append(fixer) |
---|
89 | n/a | else: |
---|
90 | n/a | if fixer._accept_type is not None: |
---|
91 | n/a | head_nodes[fixer._accept_type].append(fixer) |
---|
92 | n/a | else: |
---|
93 | n/a | every.append(fixer) |
---|
94 | n/a | for node_type in chain(pygram.python_grammar.symbol2number.values(), |
---|
95 | n/a | pygram.python_grammar.tokens): |
---|
96 | n/a | head_nodes[node_type].extend(every) |
---|
97 | n/a | return dict(head_nodes) |
---|
98 | n/a | |
---|
99 | n/a | |
---|
100 | n/a | def get_fixers_from_package(pkg_name): |
---|
101 | n/a | """ |
---|
102 | n/a | Return the fully qualified names for fixers in the package pkg_name. |
---|
103 | n/a | """ |
---|
104 | n/a | return [pkg_name + "." + fix_name |
---|
105 | n/a | for fix_name in get_all_fix_names(pkg_name, False)] |
---|
106 | n/a | |
---|
107 | n/a | def _identity(obj): |
---|
108 | n/a | return obj |
---|
109 | n/a | |
---|
110 | n/a | if sys.version_info < (3, 0): |
---|
111 | n/a | import codecs |
---|
112 | n/a | _open_with_encoding = codecs.open |
---|
113 | n/a | # codecs.open doesn't translate newlines sadly. |
---|
114 | n/a | def _from_system_newlines(input): |
---|
115 | n/a | return input.replace("\r\n", "\n") |
---|
116 | n/a | def _to_system_newlines(input): |
---|
117 | n/a | if os.linesep != "\n": |
---|
118 | n/a | return input.replace("\n", os.linesep) |
---|
119 | n/a | else: |
---|
120 | n/a | return input |
---|
121 | n/a | else: |
---|
122 | n/a | _open_with_encoding = open |
---|
123 | n/a | _from_system_newlines = _identity |
---|
124 | n/a | _to_system_newlines = _identity |
---|
125 | n/a | |
---|
126 | n/a | |
---|
127 | n/a | def _detect_future_features(source): |
---|
128 | n/a | have_docstring = False |
---|
129 | n/a | gen = tokenize.generate_tokens(io.StringIO(source).readline) |
---|
130 | n/a | def advance(): |
---|
131 | n/a | tok = next(gen) |
---|
132 | n/a | return tok[0], tok[1] |
---|
133 | n/a | ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT}) |
---|
134 | n/a | features = set() |
---|
135 | n/a | try: |
---|
136 | n/a | while True: |
---|
137 | n/a | tp, value = advance() |
---|
138 | n/a | if tp in ignore: |
---|
139 | n/a | continue |
---|
140 | n/a | elif tp == token.STRING: |
---|
141 | n/a | if have_docstring: |
---|
142 | n/a | break |
---|
143 | n/a | have_docstring = True |
---|
144 | n/a | elif tp == token.NAME and value == "from": |
---|
145 | n/a | tp, value = advance() |
---|
146 | n/a | if tp != token.NAME or value != "__future__": |
---|
147 | n/a | break |
---|
148 | n/a | tp, value = advance() |
---|
149 | n/a | if tp != token.NAME or value != "import": |
---|
150 | n/a | break |
---|
151 | n/a | tp, value = advance() |
---|
152 | n/a | if tp == token.OP and value == "(": |
---|
153 | n/a | tp, value = advance() |
---|
154 | n/a | while tp == token.NAME: |
---|
155 | n/a | features.add(value) |
---|
156 | n/a | tp, value = advance() |
---|
157 | n/a | if tp != token.OP or value != ",": |
---|
158 | n/a | break |
---|
159 | n/a | tp, value = advance() |
---|
160 | n/a | else: |
---|
161 | n/a | break |
---|
162 | n/a | except StopIteration: |
---|
163 | n/a | pass |
---|
164 | n/a | return frozenset(features) |
---|
165 | n/a | |
---|
166 | n/a | |
---|
167 | n/a | class FixerError(Exception): |
---|
168 | n/a | """A fixer could not be loaded.""" |
---|
169 | n/a | |
---|
170 | n/a | |
---|
171 | n/a | class RefactoringTool(object): |
---|
172 | n/a | |
---|
173 | n/a | _default_options = {"print_function" : False, |
---|
174 | n/a | "write_unchanged_files" : False} |
---|
175 | n/a | |
---|
176 | n/a | CLASS_PREFIX = "Fix" # The prefix for fixer classes |
---|
177 | n/a | FILE_PREFIX = "fix_" # The prefix for modules with a fixer within |
---|
178 | n/a | |
---|
179 | n/a | def __init__(self, fixer_names, options=None, explicit=None): |
---|
180 | n/a | """Initializer. |
---|
181 | n/a | |
---|
182 | n/a | Args: |
---|
183 | n/a | fixer_names: a list of fixers to import |
---|
184 | n/a | options: a dict with configuration. |
---|
185 | n/a | explicit: a list of fixers to run even if they are explicit. |
---|
186 | n/a | """ |
---|
187 | n/a | self.fixers = fixer_names |
---|
188 | n/a | self.explicit = explicit or [] |
---|
189 | n/a | self.options = self._default_options.copy() |
---|
190 | n/a | if options is not None: |
---|
191 | n/a | self.options.update(options) |
---|
192 | n/a | if self.options["print_function"]: |
---|
193 | n/a | self.grammar = pygram.python_grammar_no_print_statement |
---|
194 | n/a | else: |
---|
195 | n/a | self.grammar = pygram.python_grammar |
---|
196 | n/a | # When this is True, the refactor*() methods will call write_file() for |
---|
197 | n/a | # files processed even if they were not changed during refactoring. If |
---|
198 | n/a | # and only if the refactor method's write parameter was True. |
---|
199 | n/a | self.write_unchanged_files = self.options.get("write_unchanged_files") |
---|
200 | n/a | self.errors = [] |
---|
201 | n/a | self.logger = logging.getLogger("RefactoringTool") |
---|
202 | n/a | self.fixer_log = [] |
---|
203 | n/a | self.wrote = False |
---|
204 | n/a | self.driver = driver.Driver(self.grammar, |
---|
205 | n/a | convert=pytree.convert, |
---|
206 | n/a | logger=self.logger) |
---|
207 | n/a | self.pre_order, self.post_order = self.get_fixers() |
---|
208 | n/a | |
---|
209 | n/a | |
---|
210 | n/a | self.files = [] # List of files that were or should be modified |
---|
211 | n/a | |
---|
212 | n/a | self.BM = bm.BottomMatcher() |
---|
213 | n/a | self.bmi_pre_order = [] # Bottom Matcher incompatible fixers |
---|
214 | n/a | self.bmi_post_order = [] |
---|
215 | n/a | |
---|
216 | n/a | for fixer in chain(self.post_order, self.pre_order): |
---|
217 | n/a | if fixer.BM_compatible: |
---|
218 | n/a | self.BM.add_fixer(fixer) |
---|
219 | n/a | # remove fixers that will be handled by the bottom-up |
---|
220 | n/a | # matcher |
---|
221 | n/a | elif fixer in self.pre_order: |
---|
222 | n/a | self.bmi_pre_order.append(fixer) |
---|
223 | n/a | elif fixer in self.post_order: |
---|
224 | n/a | self.bmi_post_order.append(fixer) |
---|
225 | n/a | |
---|
226 | n/a | self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) |
---|
227 | n/a | self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) |
---|
228 | n/a | |
---|
229 | n/a | |
---|
230 | n/a | |
---|
231 | n/a | def get_fixers(self): |
---|
232 | n/a | """Inspects the options to load the requested patterns and handlers. |
---|
233 | n/a | |
---|
234 | n/a | Returns: |
---|
235 | n/a | (pre_order, post_order), where pre_order is the list of fixers that |
---|
236 | n/a | want a pre-order AST traversal, and post_order is the list that want |
---|
237 | n/a | post-order traversal. |
---|
238 | n/a | """ |
---|
239 | n/a | pre_order_fixers = [] |
---|
240 | n/a | post_order_fixers = [] |
---|
241 | n/a | for fix_mod_path in self.fixers: |
---|
242 | n/a | mod = __import__(fix_mod_path, {}, {}, ["*"]) |
---|
243 | n/a | fix_name = fix_mod_path.rsplit(".", 1)[-1] |
---|
244 | n/a | if fix_name.startswith(self.FILE_PREFIX): |
---|
245 | n/a | fix_name = fix_name[len(self.FILE_PREFIX):] |
---|
246 | n/a | parts = fix_name.split("_") |
---|
247 | n/a | class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) |
---|
248 | n/a | try: |
---|
249 | n/a | fix_class = getattr(mod, class_name) |
---|
250 | n/a | except AttributeError: |
---|
251 | n/a | raise FixerError("Can't find %s.%s" % (fix_name, class_name)) |
---|
252 | n/a | fixer = fix_class(self.options, self.fixer_log) |
---|
253 | n/a | if fixer.explicit and self.explicit is not True and \ |
---|
254 | n/a | fix_mod_path not in self.explicit: |
---|
255 | n/a | self.log_message("Skipping optional fixer: %s", fix_name) |
---|
256 | n/a | continue |
---|
257 | n/a | |
---|
258 | n/a | self.log_debug("Adding transformation: %s", fix_name) |
---|
259 | n/a | if fixer.order == "pre": |
---|
260 | n/a | pre_order_fixers.append(fixer) |
---|
261 | n/a | elif fixer.order == "post": |
---|
262 | n/a | post_order_fixers.append(fixer) |
---|
263 | n/a | else: |
---|
264 | n/a | raise FixerError("Illegal fixer order: %r" % fixer.order) |
---|
265 | n/a | |
---|
266 | n/a | key_func = operator.attrgetter("run_order") |
---|
267 | n/a | pre_order_fixers.sort(key=key_func) |
---|
268 | n/a | post_order_fixers.sort(key=key_func) |
---|
269 | n/a | return (pre_order_fixers, post_order_fixers) |
---|
270 | n/a | |
---|
271 | n/a | def log_error(self, msg, *args, **kwds): |
---|
272 | n/a | """Called when an error occurs.""" |
---|
273 | n/a | raise |
---|
274 | n/a | |
---|
275 | n/a | def log_message(self, msg, *args): |
---|
276 | n/a | """Hook to log a message.""" |
---|
277 | n/a | if args: |
---|
278 | n/a | msg = msg % args |
---|
279 | n/a | self.logger.info(msg) |
---|
280 | n/a | |
---|
281 | n/a | def log_debug(self, msg, *args): |
---|
282 | n/a | if args: |
---|
283 | n/a | msg = msg % args |
---|
284 | n/a | self.logger.debug(msg) |
---|
285 | n/a | |
---|
286 | n/a | def print_output(self, old_text, new_text, filename, equal): |
---|
287 | n/a | """Called with the old version, new version, and filename of a |
---|
288 | n/a | refactored file.""" |
---|
289 | n/a | pass |
---|
290 | n/a | |
---|
291 | n/a | def refactor(self, items, write=False, doctests_only=False): |
---|
292 | n/a | """Refactor a list of files and directories.""" |
---|
293 | n/a | |
---|
294 | n/a | for dir_or_file in items: |
---|
295 | n/a | if os.path.isdir(dir_or_file): |
---|
296 | n/a | self.refactor_dir(dir_or_file, write, doctests_only) |
---|
297 | n/a | else: |
---|
298 | n/a | self.refactor_file(dir_or_file, write, doctests_only) |
---|
299 | n/a | |
---|
300 | n/a | def refactor_dir(self, dir_name, write=False, doctests_only=False): |
---|
301 | n/a | """Descends down a directory and refactor every Python file found. |
---|
302 | n/a | |
---|
303 | n/a | Python files are assumed to have a .py extension. |
---|
304 | n/a | |
---|
305 | n/a | Files and subdirectories starting with '.' are skipped. |
---|
306 | n/a | """ |
---|
307 | n/a | py_ext = os.extsep + "py" |
---|
308 | n/a | for dirpath, dirnames, filenames in os.walk(dir_name): |
---|
309 | n/a | self.log_debug("Descending into %s", dirpath) |
---|
310 | n/a | dirnames.sort() |
---|
311 | n/a | filenames.sort() |
---|
312 | n/a | for name in filenames: |
---|
313 | n/a | if (not name.startswith(".") and |
---|
314 | n/a | os.path.splitext(name)[1] == py_ext): |
---|
315 | n/a | fullname = os.path.join(dirpath, name) |
---|
316 | n/a | self.refactor_file(fullname, write, doctests_only) |
---|
317 | n/a | # Modify dirnames in-place to remove subdirs with leading dots |
---|
318 | n/a | dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] |
---|
319 | n/a | |
---|
320 | n/a | def _read_python_source(self, filename): |
---|
321 | n/a | """ |
---|
322 | n/a | Do our best to decode a Python source file correctly. |
---|
323 | n/a | """ |
---|
324 | n/a | try: |
---|
325 | n/a | f = open(filename, "rb") |
---|
326 | n/a | except OSError as err: |
---|
327 | n/a | self.log_error("Can't open %s: %s", filename, err) |
---|
328 | n/a | return None, None |
---|
329 | n/a | try: |
---|
330 | n/a | encoding = tokenize.detect_encoding(f.readline)[0] |
---|
331 | n/a | finally: |
---|
332 | n/a | f.close() |
---|
333 | n/a | with _open_with_encoding(filename, "r", encoding=encoding) as f: |
---|
334 | n/a | return _from_system_newlines(f.read()), encoding |
---|
335 | n/a | |
---|
336 | n/a | def refactor_file(self, filename, write=False, doctests_only=False): |
---|
337 | n/a | """Refactors a file.""" |
---|
338 | n/a | input, encoding = self._read_python_source(filename) |
---|
339 | n/a | if input is None: |
---|
340 | n/a | # Reading the file failed. |
---|
341 | n/a | return |
---|
342 | n/a | input += "\n" # Silence certain parse errors |
---|
343 | n/a | if doctests_only: |
---|
344 | n/a | self.log_debug("Refactoring doctests in %s", filename) |
---|
345 | n/a | output = self.refactor_docstring(input, filename) |
---|
346 | n/a | if self.write_unchanged_files or output != input: |
---|
347 | n/a | self.processed_file(output, filename, input, write, encoding) |
---|
348 | n/a | else: |
---|
349 | n/a | self.log_debug("No doctest changes in %s", filename) |
---|
350 | n/a | else: |
---|
351 | n/a | tree = self.refactor_string(input, filename) |
---|
352 | n/a | if self.write_unchanged_files or (tree and tree.was_changed): |
---|
353 | n/a | # The [:-1] is to take off the \n we added earlier |
---|
354 | n/a | self.processed_file(str(tree)[:-1], filename, |
---|
355 | n/a | write=write, encoding=encoding) |
---|
356 | n/a | else: |
---|
357 | n/a | self.log_debug("No changes in %s", filename) |
---|
358 | n/a | |
---|
359 | n/a | def refactor_string(self, data, name): |
---|
360 | n/a | """Refactor a given input string. |
---|
361 | n/a | |
---|
362 | n/a | Args: |
---|
363 | n/a | data: a string holding the code to be refactored. |
---|
364 | n/a | name: a human-readable name for use in error/log messages. |
---|
365 | n/a | |
---|
366 | n/a | Returns: |
---|
367 | n/a | An AST corresponding to the refactored input stream; None if |
---|
368 | n/a | there were errors during the parse. |
---|
369 | n/a | """ |
---|
370 | n/a | features = _detect_future_features(data) |
---|
371 | n/a | if "print_function" in features: |
---|
372 | n/a | self.driver.grammar = pygram.python_grammar_no_print_statement |
---|
373 | n/a | try: |
---|
374 | n/a | tree = self.driver.parse_string(data) |
---|
375 | n/a | except Exception as err: |
---|
376 | n/a | self.log_error("Can't parse %s: %s: %s", |
---|
377 | n/a | name, err.__class__.__name__, err) |
---|
378 | n/a | return |
---|
379 | n/a | finally: |
---|
380 | n/a | self.driver.grammar = self.grammar |
---|
381 | n/a | tree.future_features = features |
---|
382 | n/a | self.log_debug("Refactoring %s", name) |
---|
383 | n/a | self.refactor_tree(tree, name) |
---|
384 | n/a | return tree |
---|
385 | n/a | |
---|
386 | n/a | def refactor_stdin(self, doctests_only=False): |
---|
387 | n/a | input = sys.stdin.read() |
---|
388 | n/a | if doctests_only: |
---|
389 | n/a | self.log_debug("Refactoring doctests in stdin") |
---|
390 | n/a | output = self.refactor_docstring(input, "<stdin>") |
---|
391 | n/a | if self.write_unchanged_files or output != input: |
---|
392 | n/a | self.processed_file(output, "<stdin>", input) |
---|
393 | n/a | else: |
---|
394 | n/a | self.log_debug("No doctest changes in stdin") |
---|
395 | n/a | else: |
---|
396 | n/a | tree = self.refactor_string(input, "<stdin>") |
---|
397 | n/a | if self.write_unchanged_files or (tree and tree.was_changed): |
---|
398 | n/a | self.processed_file(str(tree), "<stdin>", input) |
---|
399 | n/a | else: |
---|
400 | n/a | self.log_debug("No changes in stdin") |
---|
401 | n/a | |
---|
402 | n/a | def refactor_tree(self, tree, name): |
---|
403 | n/a | """Refactors a parse tree (modifying the tree in place). |
---|
404 | n/a | |
---|
405 | n/a | For compatible patterns the bottom matcher module is |
---|
406 | n/a | used. Otherwise the tree is traversed node-to-node for |
---|
407 | n/a | matches. |
---|
408 | n/a | |
---|
409 | n/a | Args: |
---|
410 | n/a | tree: a pytree.Node instance representing the root of the tree |
---|
411 | n/a | to be refactored. |
---|
412 | n/a | name: a human-readable name for this tree. |
---|
413 | n/a | |
---|
414 | n/a | Returns: |
---|
415 | n/a | True if the tree was modified, False otherwise. |
---|
416 | n/a | """ |
---|
417 | n/a | |
---|
418 | n/a | for fixer in chain(self.pre_order, self.post_order): |
---|
419 | n/a | fixer.start_tree(tree, name) |
---|
420 | n/a | |
---|
421 | n/a | #use traditional matching for the incompatible fixers |
---|
422 | n/a | self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) |
---|
423 | n/a | self.traverse_by(self.bmi_post_order_heads, tree.post_order()) |
---|
424 | n/a | |
---|
425 | n/a | # obtain a set of candidate nodes |
---|
426 | n/a | match_set = self.BM.run(tree.leaves()) |
---|
427 | n/a | |
---|
428 | n/a | while any(match_set.values()): |
---|
429 | n/a | for fixer in self.BM.fixers: |
---|
430 | n/a | if fixer in match_set and match_set[fixer]: |
---|
431 | n/a | #sort by depth; apply fixers from bottom(of the AST) to top |
---|
432 | n/a | match_set[fixer].sort(key=pytree.Base.depth, reverse=True) |
---|
433 | n/a | |
---|
434 | n/a | if fixer.keep_line_order: |
---|
435 | n/a | #some fixers(eg fix_imports) must be applied |
---|
436 | n/a | #with the original file's line order |
---|
437 | n/a | match_set[fixer].sort(key=pytree.Base.get_lineno) |
---|
438 | n/a | |
---|
439 | n/a | for node in list(match_set[fixer]): |
---|
440 | n/a | if node in match_set[fixer]: |
---|
441 | n/a | match_set[fixer].remove(node) |
---|
442 | n/a | |
---|
443 | n/a | try: |
---|
444 | n/a | find_root(node) |
---|
445 | n/a | except ValueError: |
---|
446 | n/a | # this node has been cut off from a |
---|
447 | n/a | # previous transformation ; skip |
---|
448 | n/a | continue |
---|
449 | n/a | |
---|
450 | n/a | if node.fixers_applied and fixer in node.fixers_applied: |
---|
451 | n/a | # do not apply the same fixer again |
---|
452 | n/a | continue |
---|
453 | n/a | |
---|
454 | n/a | results = fixer.match(node) |
---|
455 | n/a | |
---|
456 | n/a | if results: |
---|
457 | n/a | new = fixer.transform(node, results) |
---|
458 | n/a | if new is not None: |
---|
459 | n/a | node.replace(new) |
---|
460 | n/a | #new.fixers_applied.append(fixer) |
---|
461 | n/a | for node in new.post_order(): |
---|
462 | n/a | # do not apply the fixer again to |
---|
463 | n/a | # this or any subnode |
---|
464 | n/a | if not node.fixers_applied: |
---|
465 | n/a | node.fixers_applied = [] |
---|
466 | n/a | node.fixers_applied.append(fixer) |
---|
467 | n/a | |
---|
468 | n/a | # update the original match set for |
---|
469 | n/a | # the added code |
---|
470 | n/a | new_matches = self.BM.run(new.leaves()) |
---|
471 | n/a | for fxr in new_matches: |
---|
472 | n/a | if not fxr in match_set: |
---|
473 | n/a | match_set[fxr]=[] |
---|
474 | n/a | |
---|
475 | n/a | match_set[fxr].extend(new_matches[fxr]) |
---|
476 | n/a | |
---|
477 | n/a | for fixer in chain(self.pre_order, self.post_order): |
---|
478 | n/a | fixer.finish_tree(tree, name) |
---|
479 | n/a | return tree.was_changed |
---|
480 | n/a | |
---|
481 | n/a | def traverse_by(self, fixers, traversal): |
---|
482 | n/a | """Traverse an AST, applying a set of fixers to each node. |
---|
483 | n/a | |
---|
484 | n/a | This is a helper method for refactor_tree(). |
---|
485 | n/a | |
---|
486 | n/a | Args: |
---|
487 | n/a | fixers: a list of fixer instances. |
---|
488 | n/a | traversal: a generator that yields AST nodes. |
---|
489 | n/a | |
---|
490 | n/a | Returns: |
---|
491 | n/a | None |
---|
492 | n/a | """ |
---|
493 | n/a | if not fixers: |
---|
494 | n/a | return |
---|
495 | n/a | for node in traversal: |
---|
496 | n/a | for fixer in fixers[node.type]: |
---|
497 | n/a | results = fixer.match(node) |
---|
498 | n/a | if results: |
---|
499 | n/a | new = fixer.transform(node, results) |
---|
500 | n/a | if new is not None: |
---|
501 | n/a | node.replace(new) |
---|
502 | n/a | node = new |
---|
503 | n/a | |
---|
504 | n/a | def processed_file(self, new_text, filename, old_text=None, write=False, |
---|
505 | n/a | encoding=None): |
---|
506 | n/a | """ |
---|
507 | n/a | Called when a file has been refactored and there may be changes. |
---|
508 | n/a | """ |
---|
509 | n/a | self.files.append(filename) |
---|
510 | n/a | if old_text is None: |
---|
511 | n/a | old_text = self._read_python_source(filename)[0] |
---|
512 | n/a | if old_text is None: |
---|
513 | n/a | return |
---|
514 | n/a | equal = old_text == new_text |
---|
515 | n/a | self.print_output(old_text, new_text, filename, equal) |
---|
516 | n/a | if equal: |
---|
517 | n/a | self.log_debug("No changes to %s", filename) |
---|
518 | n/a | if not self.write_unchanged_files: |
---|
519 | n/a | return |
---|
520 | n/a | if write: |
---|
521 | n/a | self.write_file(new_text, filename, old_text, encoding) |
---|
522 | n/a | else: |
---|
523 | n/a | self.log_debug("Not writing changes to %s", filename) |
---|
524 | n/a | |
---|
525 | n/a | def write_file(self, new_text, filename, old_text, encoding=None): |
---|
526 | n/a | """Writes a string to a file. |
---|
527 | n/a | |
---|
528 | n/a | It first shows a unified diff between the old text and the new text, and |
---|
529 | n/a | then rewrites the file; the latter is only done if the write option is |
---|
530 | n/a | set. |
---|
531 | n/a | """ |
---|
532 | n/a | try: |
---|
533 | n/a | f = _open_with_encoding(filename, "w", encoding=encoding) |
---|
534 | n/a | except OSError as err: |
---|
535 | n/a | self.log_error("Can't create %s: %s", filename, err) |
---|
536 | n/a | return |
---|
537 | n/a | try: |
---|
538 | n/a | f.write(_to_system_newlines(new_text)) |
---|
539 | n/a | except OSError as err: |
---|
540 | n/a | self.log_error("Can't write %s: %s", filename, err) |
---|
541 | n/a | finally: |
---|
542 | n/a | f.close() |
---|
543 | n/a | self.log_debug("Wrote changes to %s", filename) |
---|
544 | n/a | self.wrote = True |
---|
545 | n/a | |
---|
546 | n/a | PS1 = ">>> " |
---|
547 | n/a | PS2 = "... " |
---|
548 | n/a | |
---|
549 | n/a | def refactor_docstring(self, input, filename): |
---|
550 | n/a | """Refactors a docstring, looking for doctests. |
---|
551 | n/a | |
---|
552 | n/a | This returns a modified version of the input string. It looks |
---|
553 | n/a | for doctests, which start with a ">>>" prompt, and may be |
---|
554 | n/a | continued with "..." prompts, as long as the "..." is indented |
---|
555 | n/a | the same as the ">>>". |
---|
556 | n/a | |
---|
557 | n/a | (Unfortunately we can't use the doctest module's parser, |
---|
558 | n/a | since, like most parsers, it is not geared towards preserving |
---|
559 | n/a | the original source.) |
---|
560 | n/a | """ |
---|
561 | n/a | result = [] |
---|
562 | n/a | block = None |
---|
563 | n/a | block_lineno = None |
---|
564 | n/a | indent = None |
---|
565 | n/a | lineno = 0 |
---|
566 | n/a | for line in input.splitlines(keepends=True): |
---|
567 | n/a | lineno += 1 |
---|
568 | n/a | if line.lstrip().startswith(self.PS1): |
---|
569 | n/a | if block is not None: |
---|
570 | n/a | result.extend(self.refactor_doctest(block, block_lineno, |
---|
571 | n/a | indent, filename)) |
---|
572 | n/a | block_lineno = lineno |
---|
573 | n/a | block = [line] |
---|
574 | n/a | i = line.find(self.PS1) |
---|
575 | n/a | indent = line[:i] |
---|
576 | n/a | elif (indent is not None and |
---|
577 | n/a | (line.startswith(indent + self.PS2) or |
---|
578 | n/a | line == indent + self.PS2.rstrip() + "\n")): |
---|
579 | n/a | block.append(line) |
---|
580 | n/a | else: |
---|
581 | n/a | if block is not None: |
---|
582 | n/a | result.extend(self.refactor_doctest(block, block_lineno, |
---|
583 | n/a | indent, filename)) |
---|
584 | n/a | block = None |
---|
585 | n/a | indent = None |
---|
586 | n/a | result.append(line) |
---|
587 | n/a | if block is not None: |
---|
588 | n/a | result.extend(self.refactor_doctest(block, block_lineno, |
---|
589 | n/a | indent, filename)) |
---|
590 | n/a | return "".join(result) |
---|
591 | n/a | |
---|
592 | n/a | def refactor_doctest(self, block, lineno, indent, filename): |
---|
593 | n/a | """Refactors one doctest. |
---|
594 | n/a | |
---|
595 | n/a | A doctest is given as a block of lines, the first of which starts |
---|
596 | n/a | with ">>>" (possibly indented), while the remaining lines start |
---|
597 | n/a | with "..." (identically indented). |
---|
598 | n/a | |
---|
599 | n/a | """ |
---|
600 | n/a | try: |
---|
601 | n/a | tree = self.parse_block(block, lineno, indent) |
---|
602 | n/a | except Exception as err: |
---|
603 | n/a | if self.logger.isEnabledFor(logging.DEBUG): |
---|
604 | n/a | for line in block: |
---|
605 | n/a | self.log_debug("Source: %s", line.rstrip("\n")) |
---|
606 | n/a | self.log_error("Can't parse docstring in %s line %s: %s: %s", |
---|
607 | n/a | filename, lineno, err.__class__.__name__, err) |
---|
608 | n/a | return block |
---|
609 | n/a | if self.refactor_tree(tree, filename): |
---|
610 | n/a | new = str(tree).splitlines(keepends=True) |
---|
611 | n/a | # Undo the adjustment of the line numbers in wrap_toks() below. |
---|
612 | n/a | clipped, new = new[:lineno-1], new[lineno-1:] |
---|
613 | n/a | assert clipped == ["\n"] * (lineno-1), clipped |
---|
614 | n/a | if not new[-1].endswith("\n"): |
---|
615 | n/a | new[-1] += "\n" |
---|
616 | n/a | block = [indent + self.PS1 + new.pop(0)] |
---|
617 | n/a | if new: |
---|
618 | n/a | block += [indent + self.PS2 + line for line in new] |
---|
619 | n/a | return block |
---|
620 | n/a | |
---|
621 | n/a | def summarize(self): |
---|
622 | n/a | if self.wrote: |
---|
623 | n/a | were = "were" |
---|
624 | n/a | else: |
---|
625 | n/a | were = "need to be" |
---|
626 | n/a | if not self.files: |
---|
627 | n/a | self.log_message("No files %s modified.", were) |
---|
628 | n/a | else: |
---|
629 | n/a | self.log_message("Files that %s modified:", were) |
---|
630 | n/a | for file in self.files: |
---|
631 | n/a | self.log_message(file) |
---|
632 | n/a | if self.fixer_log: |
---|
633 | n/a | self.log_message("Warnings/messages while refactoring:") |
---|
634 | n/a | for message in self.fixer_log: |
---|
635 | n/a | self.log_message(message) |
---|
636 | n/a | if self.errors: |
---|
637 | n/a | if len(self.errors) == 1: |
---|
638 | n/a | self.log_message("There was 1 error:") |
---|
639 | n/a | else: |
---|
640 | n/a | self.log_message("There were %d errors:", len(self.errors)) |
---|
641 | n/a | for msg, args, kwds in self.errors: |
---|
642 | n/a | self.log_message(msg, *args, **kwds) |
---|
643 | n/a | |
---|
644 | n/a | def parse_block(self, block, lineno, indent): |
---|
645 | n/a | """Parses a block into a tree. |
---|
646 | n/a | |
---|
647 | n/a | This is necessary to get correct line number / offset information |
---|
648 | n/a | in the parser diagnostics and embedded into the parse tree. |
---|
649 | n/a | """ |
---|
650 | n/a | tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) |
---|
651 | n/a | tree.future_features = frozenset() |
---|
652 | n/a | return tree |
---|
653 | n/a | |
---|
654 | n/a | def wrap_toks(self, block, lineno, indent): |
---|
655 | n/a | """Wraps a tokenize stream to systematically modify start/end.""" |
---|
656 | n/a | tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__) |
---|
657 | n/a | for type, value, (line0, col0), (line1, col1), line_text in tokens: |
---|
658 | n/a | line0 += lineno - 1 |
---|
659 | n/a | line1 += lineno - 1 |
---|
660 | n/a | # Don't bother updating the columns; this is too complicated |
---|
661 | n/a | # since line_text would also have to be updated and it would |
---|
662 | n/a | # still break for tokens spanning lines. Let the user guess |
---|
663 | n/a | # that the column numbers for doctests are relative to the |
---|
664 | n/a | # end of the prompt string (PS1 or PS2). |
---|
665 | n/a | yield type, value, (line0, col0), (line1, col1), line_text |
---|
666 | n/a | |
---|
667 | n/a | |
---|
668 | n/a | def gen_lines(self, block, indent): |
---|
669 | n/a | """Generates lines as expected by tokenize from a list of lines. |
---|
670 | n/a | |
---|
671 | n/a | This strips the first len(indent + self.PS1) characters off each line. |
---|
672 | n/a | """ |
---|
673 | n/a | prefix1 = indent + self.PS1 |
---|
674 | n/a | prefix2 = indent + self.PS2 |
---|
675 | n/a | prefix = prefix1 |
---|
676 | n/a | for line in block: |
---|
677 | n/a | if line.startswith(prefix): |
---|
678 | n/a | yield line[len(prefix):] |
---|
679 | n/a | elif line == prefix.rstrip() + "\n": |
---|
680 | n/a | yield "\n" |
---|
681 | n/a | else: |
---|
682 | n/a | raise AssertionError("line=%r, prefix=%r" % (line, prefix)) |
---|
683 | n/a | prefix = prefix2 |
---|
684 | n/a | while True: |
---|
685 | n/a | yield "" |
---|
686 | n/a | |
---|
687 | n/a | |
---|
688 | n/a | class MultiprocessingUnsupported(Exception): |
---|
689 | n/a | pass |
---|
690 | n/a | |
---|
691 | n/a | |
---|
692 | n/a | class MultiprocessRefactoringTool(RefactoringTool): |
---|
693 | n/a | |
---|
694 | n/a | def __init__(self, *args, **kwargs): |
---|
695 | n/a | super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) |
---|
696 | n/a | self.queue = None |
---|
697 | n/a | self.output_lock = None |
---|
698 | n/a | |
---|
699 | n/a | def refactor(self, items, write=False, doctests_only=False, |
---|
700 | n/a | num_processes=1): |
---|
701 | n/a | if num_processes == 1: |
---|
702 | n/a | return super(MultiprocessRefactoringTool, self).refactor( |
---|
703 | n/a | items, write, doctests_only) |
---|
704 | n/a | try: |
---|
705 | n/a | import multiprocessing |
---|
706 | n/a | except ImportError: |
---|
707 | n/a | raise MultiprocessingUnsupported |
---|
708 | n/a | if self.queue is not None: |
---|
709 | n/a | raise RuntimeError("already doing multiple processes") |
---|
710 | n/a | self.queue = multiprocessing.JoinableQueue() |
---|
711 | n/a | self.output_lock = multiprocessing.Lock() |
---|
712 | n/a | processes = [multiprocessing.Process(target=self._child) |
---|
713 | n/a | for i in range(num_processes)] |
---|
714 | n/a | try: |
---|
715 | n/a | for p in processes: |
---|
716 | n/a | p.start() |
---|
717 | n/a | super(MultiprocessRefactoringTool, self).refactor(items, write, |
---|
718 | n/a | doctests_only) |
---|
719 | n/a | finally: |
---|
720 | n/a | self.queue.join() |
---|
721 | n/a | for i in range(num_processes): |
---|
722 | n/a | self.queue.put(None) |
---|
723 | n/a | for p in processes: |
---|
724 | n/a | if p.is_alive(): |
---|
725 | n/a | p.join() |
---|
726 | n/a | self.queue = None |
---|
727 | n/a | |
---|
728 | n/a | def _child(self): |
---|
729 | n/a | task = self.queue.get() |
---|
730 | n/a | while task is not None: |
---|
731 | n/a | args, kwargs = task |
---|
732 | n/a | try: |
---|
733 | n/a | super(MultiprocessRefactoringTool, self).refactor_file( |
---|
734 | n/a | *args, **kwargs) |
---|
735 | n/a | finally: |
---|
736 | n/a | self.queue.task_done() |
---|
737 | n/a | task = self.queue.get() |
---|
738 | n/a | |
---|
739 | n/a | def refactor_file(self, *args, **kwargs): |
---|
740 | n/a | if self.queue is not None: |
---|
741 | n/a | self.queue.put((args, kwargs)) |
---|
742 | n/a | else: |
---|
743 | n/a | return super(MultiprocessRefactoringTool, self).refactor_file( |
---|
744 | n/a | *args, **kwargs) |
---|