1 | n/a | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. |
---|
2 | n/a | # All rights reserved. |
---|
3 | n/a | |
---|
4 | n/a | """Tokenization help for Python programs. |
---|
5 | n/a | |
---|
6 | n/a | generate_tokens(readline) is a generator that breaks a stream of |
---|
7 | n/a | text into Python tokens. It accepts a readline-like method which is called |
---|
8 | n/a | repeatedly to get the next line of input (or "" for EOF). It generates |
---|
9 | n/a | 5-tuples with these members: |
---|
10 | n/a | |
---|
11 | n/a | the token type (see token.py) |
---|
12 | n/a | the token (a string) |
---|
13 | n/a | the starting (row, column) indices of the token (a 2-tuple of ints) |
---|
14 | n/a | the ending (row, column) indices of the token (a 2-tuple of ints) |
---|
15 | n/a | the original line (string) |
---|
16 | n/a | |
---|
17 | n/a | It is designed to match the working of the Python tokenizer exactly, except |
---|
18 | n/a | that it produces COMMENT tokens for comments and gives type OP for all |
---|
19 | n/a | operators |
---|
20 | n/a | |
---|
21 | n/a | Older entry points |
---|
22 | n/a | tokenize_loop(readline, tokeneater) |
---|
23 | n/a | tokenize(readline, tokeneater=printtoken) |
---|
24 | n/a | are the same, except instead of generating tokens, tokeneater is a callback |
---|
25 | n/a | function to which the 5 fields described above are passed as 5 arguments, |
---|
26 | n/a | each time a new token is found.""" |
---|
27 | n/a | |
---|
28 | n/a | __author__ = 'Ka-Ping Yee <ping@lfw.org>' |
---|
29 | n/a | __credits__ = \ |
---|
30 | n/a | 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' |
---|
31 | n/a | |
---|
32 | n/a | import string, re |
---|
33 | n/a | from codecs import BOM_UTF8, lookup |
---|
34 | n/a | from lib2to3.pgen2.token import * |
---|
35 | n/a | |
---|
36 | n/a | from . import token |
---|
37 | n/a | __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", |
---|
38 | n/a | "generate_tokens", "untokenize"] |
---|
39 | n/a | del token |
---|
40 | n/a | |
---|
41 | n/a | try: |
---|
42 | n/a | bytes |
---|
43 | n/a | except NameError: |
---|
44 | n/a | # Support bytes type in Python <= 2.5, so 2to3 turns itself into |
---|
45 | n/a | # valid Python 3 code. |
---|
46 | n/a | bytes = str |
---|
47 | n/a | |
---|
48 | n/a | def group(*choices): return '(' + '|'.join(choices) + ')' |
---|
49 | n/a | def any(*choices): return group(*choices) + '*' |
---|
50 | n/a | def maybe(*choices): return group(*choices) + '?' |
---|
51 | n/a | |
---|
52 | n/a | Whitespace = r'[ \f\t]*' |
---|
53 | n/a | Comment = r'#[^\r\n]*' |
---|
54 | n/a | Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) |
---|
55 | n/a | Name = r'[a-zA-Z_]\w*' |
---|
56 | n/a | |
---|
57 | n/a | Binnumber = r'0[bB][01]*' |
---|
58 | n/a | Hexnumber = r'0[xX][\da-fA-F]*[lL]?' |
---|
59 | n/a | Octnumber = r'0[oO]?[0-7]*[lL]?' |
---|
60 | n/a | Decnumber = r'[1-9]\d*[lL]?' |
---|
61 | n/a | Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) |
---|
62 | n/a | Exponent = r'[eE][-+]?\d+' |
---|
63 | n/a | Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) |
---|
64 | n/a | Expfloat = r'\d+' + Exponent |
---|
65 | n/a | Floatnumber = group(Pointfloat, Expfloat) |
---|
66 | n/a | Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') |
---|
67 | n/a | Number = group(Imagnumber, Floatnumber, Intnumber) |
---|
68 | n/a | |
---|
69 | n/a | # Tail end of ' string. |
---|
70 | n/a | Single = r"[^'\\]*(?:\\.[^'\\]*)*'" |
---|
71 | n/a | # Tail end of " string. |
---|
72 | n/a | Double = r'[^"\\]*(?:\\.[^"\\]*)*"' |
---|
73 | n/a | # Tail end of ''' string. |
---|
74 | n/a | Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" |
---|
75 | n/a | # Tail end of """ string. |
---|
76 | n/a | Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' |
---|
77 | n/a | Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""') |
---|
78 | n/a | # Single-line ' or " string. |
---|
79 | n/a | String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", |
---|
80 | n/a | r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') |
---|
81 | n/a | |
---|
82 | n/a | # Because of leftmost-then-longest match semantics, be sure to put the |
---|
83 | n/a | # longest operators first (e.g., if = came before ==, == would get |
---|
84 | n/a | # recognized as two instances of =). |
---|
85 | n/a | Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", |
---|
86 | n/a | r"//=?", r"->", |
---|
87 | n/a | r"[+\-*/%&@|^=<>]=?", |
---|
88 | n/a | r"~") |
---|
89 | n/a | |
---|
90 | n/a | Bracket = '[][(){}]' |
---|
91 | n/a | Special = group(r'\r?\n', r'[:;.,`@]') |
---|
92 | n/a | Funny = group(Operator, Bracket, Special) |
---|
93 | n/a | |
---|
94 | n/a | PlainToken = group(Number, Funny, String, Name) |
---|
95 | n/a | Token = Ignore + PlainToken |
---|
96 | n/a | |
---|
97 | n/a | # First (or only) line of ' or " string. |
---|
98 | n/a | ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + |
---|
99 | n/a | group("'", r'\\\r?\n'), |
---|
100 | n/a | r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + |
---|
101 | n/a | group('"', r'\\\r?\n')) |
---|
102 | n/a | PseudoExtras = group(r'\\\r?\n', Comment, Triple) |
---|
103 | n/a | PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) |
---|
104 | n/a | |
---|
105 | n/a | tokenprog, pseudoprog, single3prog, double3prog = list(map( |
---|
106 | n/a | re.compile, (Token, PseudoToken, Single3, Double3))) |
---|
107 | n/a | endprogs = {"'": re.compile(Single), '"': re.compile(Double), |
---|
108 | n/a | "'''": single3prog, '"""': double3prog, |
---|
109 | n/a | "r'''": single3prog, 'r"""': double3prog, |
---|
110 | n/a | "u'''": single3prog, 'u"""': double3prog, |
---|
111 | n/a | "b'''": single3prog, 'b"""': double3prog, |
---|
112 | n/a | "ur'''": single3prog, 'ur"""': double3prog, |
---|
113 | n/a | "br'''": single3prog, 'br"""': double3prog, |
---|
114 | n/a | "R'''": single3prog, 'R"""': double3prog, |
---|
115 | n/a | "U'''": single3prog, 'U"""': double3prog, |
---|
116 | n/a | "B'''": single3prog, 'B"""': double3prog, |
---|
117 | n/a | "uR'''": single3prog, 'uR"""': double3prog, |
---|
118 | n/a | "Ur'''": single3prog, 'Ur"""': double3prog, |
---|
119 | n/a | "UR'''": single3prog, 'UR"""': double3prog, |
---|
120 | n/a | "bR'''": single3prog, 'bR"""': double3prog, |
---|
121 | n/a | "Br'''": single3prog, 'Br"""': double3prog, |
---|
122 | n/a | "BR'''": single3prog, 'BR"""': double3prog, |
---|
123 | n/a | 'r': None, 'R': None, |
---|
124 | n/a | 'u': None, 'U': None, |
---|
125 | n/a | 'b': None, 'B': None} |
---|
126 | n/a | |
---|
127 | n/a | triple_quoted = {} |
---|
128 | n/a | for t in ("'''", '"""', |
---|
129 | n/a | "r'''", 'r"""', "R'''", 'R"""', |
---|
130 | n/a | "u'''", 'u"""', "U'''", 'U"""', |
---|
131 | n/a | "b'''", 'b"""', "B'''", 'B"""', |
---|
132 | n/a | "ur'''", 'ur"""', "Ur'''", 'Ur"""', |
---|
133 | n/a | "uR'''", 'uR"""', "UR'''", 'UR"""', |
---|
134 | n/a | "br'''", 'br"""', "Br'''", 'Br"""', |
---|
135 | n/a | "bR'''", 'bR"""', "BR'''", 'BR"""',): |
---|
136 | n/a | triple_quoted[t] = t |
---|
137 | n/a | single_quoted = {} |
---|
138 | n/a | for t in ("'", '"', |
---|
139 | n/a | "r'", 'r"', "R'", 'R"', |
---|
140 | n/a | "u'", 'u"', "U'", 'U"', |
---|
141 | n/a | "b'", 'b"', "B'", 'B"', |
---|
142 | n/a | "ur'", 'ur"', "Ur'", 'Ur"', |
---|
143 | n/a | "uR'", 'uR"', "UR'", 'UR"', |
---|
144 | n/a | "br'", 'br"', "Br'", 'Br"', |
---|
145 | n/a | "bR'", 'bR"', "BR'", 'BR"', ): |
---|
146 | n/a | single_quoted[t] = t |
---|
147 | n/a | |
---|
148 | n/a | tabsize = 8 |
---|
149 | n/a | |
---|
150 | n/a | class TokenError(Exception): pass |
---|
151 | n/a | |
---|
152 | n/a | class StopTokenizing(Exception): pass |
---|
153 | n/a | |
---|
154 | n/a | def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing |
---|
155 | n/a | (srow, scol) = xxx_todo_changeme |
---|
156 | n/a | (erow, ecol) = xxx_todo_changeme1 |
---|
157 | n/a | print("%d,%d-%d,%d:\t%s\t%s" % \ |
---|
158 | n/a | (srow, scol, erow, ecol, tok_name[type], repr(token))) |
---|
159 | n/a | |
---|
160 | n/a | def tokenize(readline, tokeneater=printtoken): |
---|
161 | n/a | """ |
---|
162 | n/a | The tokenize() function accepts two parameters: one representing the |
---|
163 | n/a | input stream, and one providing an output mechanism for tokenize(). |
---|
164 | n/a | |
---|
165 | n/a | The first parameter, readline, must be a callable object which provides |
---|
166 | n/a | the same interface as the readline() method of built-in file objects. |
---|
167 | n/a | Each call to the function should return one line of input as a string. |
---|
168 | n/a | |
---|
169 | n/a | The second parameter, tokeneater, must also be a callable object. It is |
---|
170 | n/a | called once for each token, with five arguments, corresponding to the |
---|
171 | n/a | tuples generated by generate_tokens(). |
---|
172 | n/a | """ |
---|
173 | n/a | try: |
---|
174 | n/a | tokenize_loop(readline, tokeneater) |
---|
175 | n/a | except StopTokenizing: |
---|
176 | n/a | pass |
---|
177 | n/a | |
---|
178 | n/a | # backwards compatible interface |
---|
179 | n/a | def tokenize_loop(readline, tokeneater): |
---|
180 | n/a | for token_info in generate_tokens(readline): |
---|
181 | n/a | tokeneater(*token_info) |
---|
182 | n/a | |
---|
183 | n/a | class Untokenizer: |
---|
184 | n/a | |
---|
185 | n/a | def __init__(self): |
---|
186 | n/a | self.tokens = [] |
---|
187 | n/a | self.prev_row = 1 |
---|
188 | n/a | self.prev_col = 0 |
---|
189 | n/a | |
---|
190 | n/a | def add_whitespace(self, start): |
---|
191 | n/a | row, col = start |
---|
192 | n/a | assert row <= self.prev_row |
---|
193 | n/a | col_offset = col - self.prev_col |
---|
194 | n/a | if col_offset: |
---|
195 | n/a | self.tokens.append(" " * col_offset) |
---|
196 | n/a | |
---|
197 | n/a | def untokenize(self, iterable): |
---|
198 | n/a | for t in iterable: |
---|
199 | n/a | if len(t) == 2: |
---|
200 | n/a | self.compat(t, iterable) |
---|
201 | n/a | break |
---|
202 | n/a | tok_type, token, start, end, line = t |
---|
203 | n/a | self.add_whitespace(start) |
---|
204 | n/a | self.tokens.append(token) |
---|
205 | n/a | self.prev_row, self.prev_col = end |
---|
206 | n/a | if tok_type in (NEWLINE, NL): |
---|
207 | n/a | self.prev_row += 1 |
---|
208 | n/a | self.prev_col = 0 |
---|
209 | n/a | return "".join(self.tokens) |
---|
210 | n/a | |
---|
211 | n/a | def compat(self, token, iterable): |
---|
212 | n/a | startline = False |
---|
213 | n/a | indents = [] |
---|
214 | n/a | toks_append = self.tokens.append |
---|
215 | n/a | toknum, tokval = token |
---|
216 | n/a | if toknum in (NAME, NUMBER): |
---|
217 | n/a | tokval += ' ' |
---|
218 | n/a | if toknum in (NEWLINE, NL): |
---|
219 | n/a | startline = True |
---|
220 | n/a | for tok in iterable: |
---|
221 | n/a | toknum, tokval = tok[:2] |
---|
222 | n/a | |
---|
223 | n/a | if toknum in (NAME, NUMBER, ASYNC, AWAIT): |
---|
224 | n/a | tokval += ' ' |
---|
225 | n/a | |
---|
226 | n/a | if toknum == INDENT: |
---|
227 | n/a | indents.append(tokval) |
---|
228 | n/a | continue |
---|
229 | n/a | elif toknum == DEDENT: |
---|
230 | n/a | indents.pop() |
---|
231 | n/a | continue |
---|
232 | n/a | elif toknum in (NEWLINE, NL): |
---|
233 | n/a | startline = True |
---|
234 | n/a | elif startline and indents: |
---|
235 | n/a | toks_append(indents[-1]) |
---|
236 | n/a | startline = False |
---|
237 | n/a | toks_append(tokval) |
---|
238 | n/a | |
---|
239 | n/a | cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) |
---|
240 | n/a | blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) |
---|
241 | n/a | |
---|
242 | n/a | def _get_normal_name(orig_enc): |
---|
243 | n/a | """Imitates get_normal_name in tokenizer.c.""" |
---|
244 | n/a | # Only care about the first 12 characters. |
---|
245 | n/a | enc = orig_enc[:12].lower().replace("_", "-") |
---|
246 | n/a | if enc == "utf-8" or enc.startswith("utf-8-"): |
---|
247 | n/a | return "utf-8" |
---|
248 | n/a | if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ |
---|
249 | n/a | enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): |
---|
250 | n/a | return "iso-8859-1" |
---|
251 | n/a | return orig_enc |
---|
252 | n/a | |
---|
253 | n/a | def detect_encoding(readline): |
---|
254 | n/a | """ |
---|
255 | n/a | The detect_encoding() function is used to detect the encoding that should |
---|
256 | n/a | be used to decode a Python source file. It requires one argument, readline, |
---|
257 | n/a | in the same way as the tokenize() generator. |
---|
258 | n/a | |
---|
259 | n/a | It will call readline a maximum of twice, and return the encoding used |
---|
260 | n/a | (as a string) and a list of any lines (left as bytes) it has read |
---|
261 | n/a | in. |
---|
262 | n/a | |
---|
263 | n/a | It detects the encoding from the presence of a utf-8 bom or an encoding |
---|
264 | n/a | cookie as specified in pep-0263. If both a bom and a cookie are present, but |
---|
265 | n/a | disagree, a SyntaxError will be raised. If the encoding cookie is an invalid |
---|
266 | n/a | charset, raise a SyntaxError. Note that if a utf-8 bom is found, |
---|
267 | n/a | 'utf-8-sig' is returned. |
---|
268 | n/a | |
---|
269 | n/a | If no encoding is specified, then the default of 'utf-8' will be returned. |
---|
270 | n/a | """ |
---|
271 | n/a | bom_found = False |
---|
272 | n/a | encoding = None |
---|
273 | n/a | default = 'utf-8' |
---|
274 | n/a | def read_or_stop(): |
---|
275 | n/a | try: |
---|
276 | n/a | return readline() |
---|
277 | n/a | except StopIteration: |
---|
278 | n/a | return bytes() |
---|
279 | n/a | |
---|
280 | n/a | def find_cookie(line): |
---|
281 | n/a | try: |
---|
282 | n/a | line_string = line.decode('ascii') |
---|
283 | n/a | except UnicodeDecodeError: |
---|
284 | n/a | return None |
---|
285 | n/a | match = cookie_re.match(line_string) |
---|
286 | n/a | if not match: |
---|
287 | n/a | return None |
---|
288 | n/a | encoding = _get_normal_name(match.group(1)) |
---|
289 | n/a | try: |
---|
290 | n/a | codec = lookup(encoding) |
---|
291 | n/a | except LookupError: |
---|
292 | n/a | # This behaviour mimics the Python interpreter |
---|
293 | n/a | raise SyntaxError("unknown encoding: " + encoding) |
---|
294 | n/a | |
---|
295 | n/a | if bom_found: |
---|
296 | n/a | if codec.name != 'utf-8': |
---|
297 | n/a | # This behaviour mimics the Python interpreter |
---|
298 | n/a | raise SyntaxError('encoding problem: utf-8') |
---|
299 | n/a | encoding += '-sig' |
---|
300 | n/a | return encoding |
---|
301 | n/a | |
---|
302 | n/a | first = read_or_stop() |
---|
303 | n/a | if first.startswith(BOM_UTF8): |
---|
304 | n/a | bom_found = True |
---|
305 | n/a | first = first[3:] |
---|
306 | n/a | default = 'utf-8-sig' |
---|
307 | n/a | if not first: |
---|
308 | n/a | return default, [] |
---|
309 | n/a | |
---|
310 | n/a | encoding = find_cookie(first) |
---|
311 | n/a | if encoding: |
---|
312 | n/a | return encoding, [first] |
---|
313 | n/a | if not blank_re.match(first): |
---|
314 | n/a | return default, [first] |
---|
315 | n/a | |
---|
316 | n/a | second = read_or_stop() |
---|
317 | n/a | if not second: |
---|
318 | n/a | return default, [first] |
---|
319 | n/a | |
---|
320 | n/a | encoding = find_cookie(second) |
---|
321 | n/a | if encoding: |
---|
322 | n/a | return encoding, [first, second] |
---|
323 | n/a | |
---|
324 | n/a | return default, [first, second] |
---|
325 | n/a | |
---|
326 | n/a | def untokenize(iterable): |
---|
327 | n/a | """Transform tokens back into Python source code. |
---|
328 | n/a | |
---|
329 | n/a | Each element returned by the iterable must be a token sequence |
---|
330 | n/a | with at least two elements, a token number and token value. If |
---|
331 | n/a | only two tokens are passed, the resulting output is poor. |
---|
332 | n/a | |
---|
333 | n/a | Round-trip invariant for full input: |
---|
334 | n/a | Untokenized source will match input source exactly |
---|
335 | n/a | |
---|
336 | n/a | Round-trip invariant for limited intput: |
---|
337 | n/a | # Output text will tokenize the back to the input |
---|
338 | n/a | t1 = [tok[:2] for tok in generate_tokens(f.readline)] |
---|
339 | n/a | newcode = untokenize(t1) |
---|
340 | n/a | readline = iter(newcode.splitlines(1)).next |
---|
341 | n/a | t2 = [tok[:2] for tokin generate_tokens(readline)] |
---|
342 | n/a | assert t1 == t2 |
---|
343 | n/a | """ |
---|
344 | n/a | ut = Untokenizer() |
---|
345 | n/a | return ut.untokenize(iterable) |
---|
346 | n/a | |
---|
347 | n/a | def generate_tokens(readline): |
---|
348 | n/a | """ |
---|
349 | n/a | The generate_tokens() generator requires one argument, readline, which |
---|
350 | n/a | must be a callable object which provides the same interface as the |
---|
351 | n/a | readline() method of built-in file objects. Each call to the function |
---|
352 | n/a | should return one line of input as a string. Alternately, readline |
---|
353 | n/a | can be a callable function terminating with StopIteration: |
---|
354 | n/a | readline = open(myfile).next # Example of alternate readline |
---|
355 | n/a | |
---|
356 | n/a | The generator produces 5-tuples with these members: the token type; the |
---|
357 | n/a | token string; a 2-tuple (srow, scol) of ints specifying the row and |
---|
358 | n/a | column where the token begins in the source; a 2-tuple (erow, ecol) of |
---|
359 | n/a | ints specifying the row and column where the token ends in the source; |
---|
360 | n/a | and the line on which the token was found. The line passed is the |
---|
361 | n/a | logical line; continuation lines are included. |
---|
362 | n/a | """ |
---|
363 | n/a | lnum = parenlev = continued = 0 |
---|
364 | n/a | namechars, numchars = string.ascii_letters + '_', '0123456789' |
---|
365 | n/a | contstr, needcont = '', 0 |
---|
366 | n/a | contline = None |
---|
367 | n/a | indents = [0] |
---|
368 | n/a | |
---|
369 | n/a | # 'stashed' and 'async_*' are used for async/await parsing |
---|
370 | n/a | stashed = None |
---|
371 | n/a | async_def = False |
---|
372 | n/a | async_def_indent = 0 |
---|
373 | n/a | async_def_nl = False |
---|
374 | n/a | |
---|
375 | n/a | while 1: # loop over lines in stream |
---|
376 | n/a | try: |
---|
377 | n/a | line = readline() |
---|
378 | n/a | except StopIteration: |
---|
379 | n/a | line = '' |
---|
380 | n/a | lnum = lnum + 1 |
---|
381 | n/a | pos, max = 0, len(line) |
---|
382 | n/a | |
---|
383 | n/a | if contstr: # continued string |
---|
384 | n/a | if not line: |
---|
385 | n/a | raise TokenError("EOF in multi-line string", strstart) |
---|
386 | n/a | endmatch = endprog.match(line) |
---|
387 | n/a | if endmatch: |
---|
388 | n/a | pos = end = endmatch.end(0) |
---|
389 | n/a | yield (STRING, contstr + line[:end], |
---|
390 | n/a | strstart, (lnum, end), contline + line) |
---|
391 | n/a | contstr, needcont = '', 0 |
---|
392 | n/a | contline = None |
---|
393 | n/a | elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': |
---|
394 | n/a | yield (ERRORTOKEN, contstr + line, |
---|
395 | n/a | strstart, (lnum, len(line)), contline) |
---|
396 | n/a | contstr = '' |
---|
397 | n/a | contline = None |
---|
398 | n/a | continue |
---|
399 | n/a | else: |
---|
400 | n/a | contstr = contstr + line |
---|
401 | n/a | contline = contline + line |
---|
402 | n/a | continue |
---|
403 | n/a | |
---|
404 | n/a | elif parenlev == 0 and not continued: # new statement |
---|
405 | n/a | if not line: break |
---|
406 | n/a | column = 0 |
---|
407 | n/a | while pos < max: # measure leading whitespace |
---|
408 | n/a | if line[pos] == ' ': column = column + 1 |
---|
409 | n/a | elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize |
---|
410 | n/a | elif line[pos] == '\f': column = 0 |
---|
411 | n/a | else: break |
---|
412 | n/a | pos = pos + 1 |
---|
413 | n/a | if pos == max: break |
---|
414 | n/a | |
---|
415 | n/a | if stashed: |
---|
416 | n/a | yield stashed |
---|
417 | n/a | stashed = None |
---|
418 | n/a | |
---|
419 | n/a | if line[pos] in '#\r\n': # skip comments or blank lines |
---|
420 | n/a | if line[pos] == '#': |
---|
421 | n/a | comment_token = line[pos:].rstrip('\r\n') |
---|
422 | n/a | nl_pos = pos + len(comment_token) |
---|
423 | n/a | yield (COMMENT, comment_token, |
---|
424 | n/a | (lnum, pos), (lnum, pos + len(comment_token)), line) |
---|
425 | n/a | yield (NL, line[nl_pos:], |
---|
426 | n/a | (lnum, nl_pos), (lnum, len(line)), line) |
---|
427 | n/a | else: |
---|
428 | n/a | yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], |
---|
429 | n/a | (lnum, pos), (lnum, len(line)), line) |
---|
430 | n/a | continue |
---|
431 | n/a | |
---|
432 | n/a | if column > indents[-1]: # count indents or dedents |
---|
433 | n/a | indents.append(column) |
---|
434 | n/a | yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) |
---|
435 | n/a | while column < indents[-1]: |
---|
436 | n/a | if column not in indents: |
---|
437 | n/a | raise IndentationError( |
---|
438 | n/a | "unindent does not match any outer indentation level", |
---|
439 | n/a | ("<tokenize>", lnum, pos, line)) |
---|
440 | n/a | indents = indents[:-1] |
---|
441 | n/a | |
---|
442 | n/a | if async_def and async_def_indent >= indents[-1]: |
---|
443 | n/a | async_def = False |
---|
444 | n/a | async_def_nl = False |
---|
445 | n/a | async_def_indent = 0 |
---|
446 | n/a | |
---|
447 | n/a | yield (DEDENT, '', (lnum, pos), (lnum, pos), line) |
---|
448 | n/a | |
---|
449 | n/a | if async_def and async_def_nl and async_def_indent >= indents[-1]: |
---|
450 | n/a | async_def = False |
---|
451 | n/a | async_def_nl = False |
---|
452 | n/a | async_def_indent = 0 |
---|
453 | n/a | |
---|
454 | n/a | else: # continued statement |
---|
455 | n/a | if not line: |
---|
456 | n/a | raise TokenError("EOF in multi-line statement", (lnum, 0)) |
---|
457 | n/a | continued = 0 |
---|
458 | n/a | |
---|
459 | n/a | while pos < max: |
---|
460 | n/a | pseudomatch = pseudoprog.match(line, pos) |
---|
461 | n/a | if pseudomatch: # scan for tokens |
---|
462 | n/a | start, end = pseudomatch.span(1) |
---|
463 | n/a | spos, epos, pos = (lnum, start), (lnum, end), end |
---|
464 | n/a | token, initial = line[start:end], line[start] |
---|
465 | n/a | |
---|
466 | n/a | if initial in numchars or \ |
---|
467 | n/a | (initial == '.' and token != '.'): # ordinary number |
---|
468 | n/a | yield (NUMBER, token, spos, epos, line) |
---|
469 | n/a | elif initial in '\r\n': |
---|
470 | n/a | newline = NEWLINE |
---|
471 | n/a | if parenlev > 0: |
---|
472 | n/a | newline = NL |
---|
473 | n/a | elif async_def: |
---|
474 | n/a | async_def_nl = True |
---|
475 | n/a | if stashed: |
---|
476 | n/a | yield stashed |
---|
477 | n/a | stashed = None |
---|
478 | n/a | yield (newline, token, spos, epos, line) |
---|
479 | n/a | |
---|
480 | n/a | elif initial == '#': |
---|
481 | n/a | assert not token.endswith("\n") |
---|
482 | n/a | if stashed: |
---|
483 | n/a | yield stashed |
---|
484 | n/a | stashed = None |
---|
485 | n/a | yield (COMMENT, token, spos, epos, line) |
---|
486 | n/a | elif token in triple_quoted: |
---|
487 | n/a | endprog = endprogs[token] |
---|
488 | n/a | endmatch = endprog.match(line, pos) |
---|
489 | n/a | if endmatch: # all on one line |
---|
490 | n/a | pos = endmatch.end(0) |
---|
491 | n/a | token = line[start:pos] |
---|
492 | n/a | if stashed: |
---|
493 | n/a | yield stashed |
---|
494 | n/a | stashed = None |
---|
495 | n/a | yield (STRING, token, spos, (lnum, pos), line) |
---|
496 | n/a | else: |
---|
497 | n/a | strstart = (lnum, start) # multiple lines |
---|
498 | n/a | contstr = line[start:] |
---|
499 | n/a | contline = line |
---|
500 | n/a | break |
---|
501 | n/a | elif initial in single_quoted or \ |
---|
502 | n/a | token[:2] in single_quoted or \ |
---|
503 | n/a | token[:3] in single_quoted: |
---|
504 | n/a | if token[-1] == '\n': # continued string |
---|
505 | n/a | strstart = (lnum, start) |
---|
506 | n/a | endprog = (endprogs[initial] or endprogs[token[1]] or |
---|
507 | n/a | endprogs[token[2]]) |
---|
508 | n/a | contstr, needcont = line[start:], 1 |
---|
509 | n/a | contline = line |
---|
510 | n/a | break |
---|
511 | n/a | else: # ordinary string |
---|
512 | n/a | if stashed: |
---|
513 | n/a | yield stashed |
---|
514 | n/a | stashed = None |
---|
515 | n/a | yield (STRING, token, spos, epos, line) |
---|
516 | n/a | elif initial in namechars: # ordinary name |
---|
517 | n/a | if token in ('async', 'await'): |
---|
518 | n/a | if async_def: |
---|
519 | n/a | yield (ASYNC if token == 'async' else AWAIT, |
---|
520 | n/a | token, spos, epos, line) |
---|
521 | n/a | continue |
---|
522 | n/a | |
---|
523 | n/a | tok = (NAME, token, spos, epos, line) |
---|
524 | n/a | if token == 'async' and not stashed: |
---|
525 | n/a | stashed = tok |
---|
526 | n/a | continue |
---|
527 | n/a | |
---|
528 | n/a | if token == 'def': |
---|
529 | n/a | if (stashed |
---|
530 | n/a | and stashed[0] == NAME |
---|
531 | n/a | and stashed[1] == 'async'): |
---|
532 | n/a | |
---|
533 | n/a | async_def = True |
---|
534 | n/a | async_def_indent = indents[-1] |
---|
535 | n/a | |
---|
536 | n/a | yield (ASYNC, stashed[1], |
---|
537 | n/a | stashed[2], stashed[3], |
---|
538 | n/a | stashed[4]) |
---|
539 | n/a | stashed = None |
---|
540 | n/a | |
---|
541 | n/a | if stashed: |
---|
542 | n/a | yield stashed |
---|
543 | n/a | stashed = None |
---|
544 | n/a | |
---|
545 | n/a | yield tok |
---|
546 | n/a | elif initial == '\\': # continued stmt |
---|
547 | n/a | # This yield is new; needed for better idempotency: |
---|
548 | n/a | if stashed: |
---|
549 | n/a | yield stashed |
---|
550 | n/a | stashed = None |
---|
551 | n/a | yield (NL, token, spos, (lnum, pos), line) |
---|
552 | n/a | continued = 1 |
---|
553 | n/a | else: |
---|
554 | n/a | if initial in '([{': parenlev = parenlev + 1 |
---|
555 | n/a | elif initial in ')]}': parenlev = parenlev - 1 |
---|
556 | n/a | if stashed: |
---|
557 | n/a | yield stashed |
---|
558 | n/a | stashed = None |
---|
559 | n/a | yield (OP, token, spos, epos, line) |
---|
560 | n/a | else: |
---|
561 | n/a | yield (ERRORTOKEN, line[pos], |
---|
562 | n/a | (lnum, pos), (lnum, pos+1), line) |
---|
563 | n/a | pos = pos + 1 |
---|
564 | n/a | |
---|
565 | n/a | if stashed: |
---|
566 | n/a | yield stashed |
---|
567 | n/a | stashed = None |
---|
568 | n/a | |
---|
569 | n/a | for indent in indents[1:]: # pop remaining indent levels |
---|
570 | n/a | yield (DEDENT, '', (lnum, 0), (lnum, 0), '') |
---|
571 | n/a | yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') |
---|
572 | n/a | |
---|
573 | n/a | if __name__ == '__main__': # testing |
---|
574 | n/a | import sys |
---|
575 | n/a | if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) |
---|
576 | n/a | else: tokenize(sys.stdin.readline) |
---|