| 1 | n/a | from test import support |
|---|
| 2 | n/a | from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, |
|---|
| 3 | n/a | STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, |
|---|
| 4 | n/a | open as tokenize_open, Untokenizer) |
|---|
| 5 | n/a | from io import BytesIO |
|---|
| 6 | n/a | from unittest import TestCase, mock |
|---|
| 7 | n/a | from test.test_grammar import (VALID_UNDERSCORE_LITERALS, |
|---|
| 8 | n/a | INVALID_UNDERSCORE_LITERALS) |
|---|
| 9 | n/a | import os |
|---|
| 10 | n/a | import token |
|---|
| 11 | n/a | |
|---|
| 12 | n/a | |
|---|
| 13 | n/a | class TokenizeTest(TestCase): |
|---|
| 14 | n/a | # Tests for the tokenize module. |
|---|
| 15 | n/a | |
|---|
| 16 | n/a | # The tests can be really simple. Given a small fragment of source |
|---|
| 17 | n/a | # code, print out a table with tokens. The ENDMARKER is omitted for |
|---|
| 18 | n/a | # brevity. |
|---|
| 19 | n/a | |
|---|
| 20 | n/a | def check_tokenize(self, s, expected): |
|---|
| 21 | n/a | # Format the tokens in s in a table format. |
|---|
| 22 | n/a | # The ENDMARKER is omitted. |
|---|
| 23 | n/a | result = [] |
|---|
| 24 | n/a | f = BytesIO(s.encode('utf-8')) |
|---|
| 25 | n/a | for type, token, start, end, line in tokenize(f.readline): |
|---|
| 26 | n/a | if type == ENDMARKER: |
|---|
| 27 | n/a | break |
|---|
| 28 | n/a | type = tok_name[type] |
|---|
| 29 | n/a | result.append(f" {type:10} {token!r:13} {start} {end}") |
|---|
| 30 | n/a | self.assertEqual(result, |
|---|
| 31 | n/a | [" ENCODING 'utf-8' (0, 0) (0, 0)"] + |
|---|
| 32 | n/a | expected.rstrip().splitlines()) |
|---|
| 33 | n/a | |
|---|
| 34 | n/a | def test_basic(self): |
|---|
| 35 | n/a | self.check_tokenize("1 + 1", """\ |
|---|
| 36 | n/a | NUMBER '1' (1, 0) (1, 1) |
|---|
| 37 | n/a | OP '+' (1, 2) (1, 3) |
|---|
| 38 | n/a | NUMBER '1' (1, 4) (1, 5) |
|---|
| 39 | n/a | """) |
|---|
| 40 | n/a | self.check_tokenize("if False:\n" |
|---|
| 41 | n/a | " # NL\n" |
|---|
| 42 | n/a | " True = False # NEWLINE\n", """\ |
|---|
| 43 | n/a | NAME 'if' (1, 0) (1, 2) |
|---|
| 44 | n/a | NAME 'False' (1, 3) (1, 8) |
|---|
| 45 | n/a | OP ':' (1, 8) (1, 9) |
|---|
| 46 | n/a | NEWLINE '\\n' (1, 9) (1, 10) |
|---|
| 47 | n/a | COMMENT '# NL' (2, 4) (2, 8) |
|---|
| 48 | n/a | NL '\\n' (2, 8) (2, 9) |
|---|
| 49 | n/a | INDENT ' ' (3, 0) (3, 4) |
|---|
| 50 | n/a | NAME 'True' (3, 4) (3, 8) |
|---|
| 51 | n/a | OP '=' (3, 9) (3, 10) |
|---|
| 52 | n/a | NAME 'False' (3, 11) (3, 16) |
|---|
| 53 | n/a | COMMENT '# NEWLINE' (3, 17) (3, 26) |
|---|
| 54 | n/a | NEWLINE '\\n' (3, 26) (3, 27) |
|---|
| 55 | n/a | DEDENT '' (4, 0) (4, 0) |
|---|
| 56 | n/a | """) |
|---|
| 57 | n/a | indent_error_file = b"""\ |
|---|
| 58 | n/a | def k(x): |
|---|
| 59 | n/a | x += 2 |
|---|
| 60 | n/a | x += 5 |
|---|
| 61 | n/a | """ |
|---|
| 62 | n/a | readline = BytesIO(indent_error_file).readline |
|---|
| 63 | n/a | with self.assertRaisesRegex(IndentationError, |
|---|
| 64 | n/a | "unindent does not match any " |
|---|
| 65 | n/a | "outer indentation level"): |
|---|
| 66 | n/a | for tok in tokenize(readline): |
|---|
| 67 | n/a | pass |
|---|
| 68 | n/a | |
|---|
| 69 | n/a | def test_int(self): |
|---|
| 70 | n/a | # Ordinary integers and binary operators |
|---|
| 71 | n/a | self.check_tokenize("0xff <= 255", """\ |
|---|
| 72 | n/a | NUMBER '0xff' (1, 0) (1, 4) |
|---|
| 73 | n/a | OP '<=' (1, 5) (1, 7) |
|---|
| 74 | n/a | NUMBER '255' (1, 8) (1, 11) |
|---|
| 75 | n/a | """) |
|---|
| 76 | n/a | self.check_tokenize("0b10 <= 255", """\ |
|---|
| 77 | n/a | NUMBER '0b10' (1, 0) (1, 4) |
|---|
| 78 | n/a | OP '<=' (1, 5) (1, 7) |
|---|
| 79 | n/a | NUMBER '255' (1, 8) (1, 11) |
|---|
| 80 | n/a | """) |
|---|
| 81 | n/a | self.check_tokenize("0o123 <= 0O123", """\ |
|---|
| 82 | n/a | NUMBER '0o123' (1, 0) (1, 5) |
|---|
| 83 | n/a | OP '<=' (1, 6) (1, 8) |
|---|
| 84 | n/a | NUMBER '0O123' (1, 9) (1, 14) |
|---|
| 85 | n/a | """) |
|---|
| 86 | n/a | self.check_tokenize("1234567 > ~0x15", """\ |
|---|
| 87 | n/a | NUMBER '1234567' (1, 0) (1, 7) |
|---|
| 88 | n/a | OP '>' (1, 8) (1, 9) |
|---|
| 89 | n/a | OP '~' (1, 10) (1, 11) |
|---|
| 90 | n/a | NUMBER '0x15' (1, 11) (1, 15) |
|---|
| 91 | n/a | """) |
|---|
| 92 | n/a | self.check_tokenize("2134568 != 1231515", """\ |
|---|
| 93 | n/a | NUMBER '2134568' (1, 0) (1, 7) |
|---|
| 94 | n/a | OP '!=' (1, 8) (1, 10) |
|---|
| 95 | n/a | NUMBER '1231515' (1, 11) (1, 18) |
|---|
| 96 | n/a | """) |
|---|
| 97 | n/a | self.check_tokenize("(-124561-1) & 200000000", """\ |
|---|
| 98 | n/a | OP '(' (1, 0) (1, 1) |
|---|
| 99 | n/a | OP '-' (1, 1) (1, 2) |
|---|
| 100 | n/a | NUMBER '124561' (1, 2) (1, 8) |
|---|
| 101 | n/a | OP '-' (1, 8) (1, 9) |
|---|
| 102 | n/a | NUMBER '1' (1, 9) (1, 10) |
|---|
| 103 | n/a | OP ')' (1, 10) (1, 11) |
|---|
| 104 | n/a | OP '&' (1, 12) (1, 13) |
|---|
| 105 | n/a | NUMBER '200000000' (1, 14) (1, 23) |
|---|
| 106 | n/a | """) |
|---|
| 107 | n/a | self.check_tokenize("0xdeadbeef != -1", """\ |
|---|
| 108 | n/a | NUMBER '0xdeadbeef' (1, 0) (1, 10) |
|---|
| 109 | n/a | OP '!=' (1, 11) (1, 13) |
|---|
| 110 | n/a | OP '-' (1, 14) (1, 15) |
|---|
| 111 | n/a | NUMBER '1' (1, 15) (1, 16) |
|---|
| 112 | n/a | """) |
|---|
| 113 | n/a | self.check_tokenize("0xdeadc0de & 12345", """\ |
|---|
| 114 | n/a | NUMBER '0xdeadc0de' (1, 0) (1, 10) |
|---|
| 115 | n/a | OP '&' (1, 11) (1, 12) |
|---|
| 116 | n/a | NUMBER '12345' (1, 13) (1, 18) |
|---|
| 117 | n/a | """) |
|---|
| 118 | n/a | self.check_tokenize("0xFF & 0x15 | 1234", """\ |
|---|
| 119 | n/a | NUMBER '0xFF' (1, 0) (1, 4) |
|---|
| 120 | n/a | OP '&' (1, 5) (1, 6) |
|---|
| 121 | n/a | NUMBER '0x15' (1, 7) (1, 11) |
|---|
| 122 | n/a | OP '|' (1, 12) (1, 13) |
|---|
| 123 | n/a | NUMBER '1234' (1, 14) (1, 18) |
|---|
| 124 | n/a | """) |
|---|
| 125 | n/a | |
|---|
| 126 | n/a | def test_long(self): |
|---|
| 127 | n/a | # Long integers |
|---|
| 128 | n/a | self.check_tokenize("x = 0", """\ |
|---|
| 129 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 130 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 131 | n/a | NUMBER '0' (1, 4) (1, 5) |
|---|
| 132 | n/a | """) |
|---|
| 133 | n/a | self.check_tokenize("x = 0xfffffffffff", """\ |
|---|
| 134 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 135 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 136 | n/a | NUMBER '0xfffffffffff' (1, 4) (1, 17) |
|---|
| 137 | n/a | """) |
|---|
| 138 | n/a | self.check_tokenize("x = 123141242151251616110", """\ |
|---|
| 139 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 140 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 141 | n/a | NUMBER '123141242151251616110' (1, 4) (1, 25) |
|---|
| 142 | n/a | """) |
|---|
| 143 | n/a | self.check_tokenize("x = -15921590215012591", """\ |
|---|
| 144 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 145 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 146 | n/a | OP '-' (1, 4) (1, 5) |
|---|
| 147 | n/a | NUMBER '15921590215012591' (1, 5) (1, 22) |
|---|
| 148 | n/a | """) |
|---|
| 149 | n/a | |
|---|
| 150 | n/a | def test_float(self): |
|---|
| 151 | n/a | # Floating point numbers |
|---|
| 152 | n/a | self.check_tokenize("x = 3.14159", """\ |
|---|
| 153 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 154 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 155 | n/a | NUMBER '3.14159' (1, 4) (1, 11) |
|---|
| 156 | n/a | """) |
|---|
| 157 | n/a | self.check_tokenize("x = 314159.", """\ |
|---|
| 158 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 159 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 160 | n/a | NUMBER '314159.' (1, 4) (1, 11) |
|---|
| 161 | n/a | """) |
|---|
| 162 | n/a | self.check_tokenize("x = .314159", """\ |
|---|
| 163 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 164 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 165 | n/a | NUMBER '.314159' (1, 4) (1, 11) |
|---|
| 166 | n/a | """) |
|---|
| 167 | n/a | self.check_tokenize("x = 3e14159", """\ |
|---|
| 168 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 169 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 170 | n/a | NUMBER '3e14159' (1, 4) (1, 11) |
|---|
| 171 | n/a | """) |
|---|
| 172 | n/a | self.check_tokenize("x = 3E123", """\ |
|---|
| 173 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 174 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 175 | n/a | NUMBER '3E123' (1, 4) (1, 9) |
|---|
| 176 | n/a | """) |
|---|
| 177 | n/a | self.check_tokenize("x+y = 3e-1230", """\ |
|---|
| 178 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 179 | n/a | OP '+' (1, 1) (1, 2) |
|---|
| 180 | n/a | NAME 'y' (1, 2) (1, 3) |
|---|
| 181 | n/a | OP '=' (1, 4) (1, 5) |
|---|
| 182 | n/a | NUMBER '3e-1230' (1, 6) (1, 13) |
|---|
| 183 | n/a | """) |
|---|
| 184 | n/a | self.check_tokenize("x = 3.14e159", """\ |
|---|
| 185 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 186 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 187 | n/a | NUMBER '3.14e159' (1, 4) (1, 12) |
|---|
| 188 | n/a | """) |
|---|
| 189 | n/a | |
|---|
| 190 | n/a | def test_underscore_literals(self): |
|---|
| 191 | n/a | def number_token(s): |
|---|
| 192 | n/a | f = BytesIO(s.encode('utf-8')) |
|---|
| 193 | n/a | for toktype, token, start, end, line in tokenize(f.readline): |
|---|
| 194 | n/a | if toktype == NUMBER: |
|---|
| 195 | n/a | return token |
|---|
| 196 | n/a | return 'invalid token' |
|---|
| 197 | n/a | for lit in VALID_UNDERSCORE_LITERALS: |
|---|
| 198 | n/a | if '(' in lit: |
|---|
| 199 | n/a | # this won't work with compound complex inputs |
|---|
| 200 | n/a | continue |
|---|
| 201 | n/a | self.assertEqual(number_token(lit), lit) |
|---|
| 202 | n/a | for lit in INVALID_UNDERSCORE_LITERALS: |
|---|
| 203 | n/a | self.assertNotEqual(number_token(lit), lit) |
|---|
| 204 | n/a | |
|---|
| 205 | n/a | def test_string(self): |
|---|
| 206 | n/a | # String literals |
|---|
| 207 | n/a | self.check_tokenize("x = ''; y = \"\"", """\ |
|---|
| 208 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 209 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 210 | n/a | STRING "''" (1, 4) (1, 6) |
|---|
| 211 | n/a | OP ';' (1, 6) (1, 7) |
|---|
| 212 | n/a | NAME 'y' (1, 8) (1, 9) |
|---|
| 213 | n/a | OP '=' (1, 10) (1, 11) |
|---|
| 214 | n/a | STRING '""' (1, 12) (1, 14) |
|---|
| 215 | n/a | """) |
|---|
| 216 | n/a | self.check_tokenize("x = '\"'; y = \"'\"", """\ |
|---|
| 217 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 218 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 219 | n/a | STRING '\\'"\\'' (1, 4) (1, 7) |
|---|
| 220 | n/a | OP ';' (1, 7) (1, 8) |
|---|
| 221 | n/a | NAME 'y' (1, 9) (1, 10) |
|---|
| 222 | n/a | OP '=' (1, 11) (1, 12) |
|---|
| 223 | n/a | STRING '"\\'"' (1, 13) (1, 16) |
|---|
| 224 | n/a | """) |
|---|
| 225 | n/a | self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\ |
|---|
| 226 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 227 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 228 | n/a | STRING '"doesn\\'t "' (1, 4) (1, 14) |
|---|
| 229 | n/a | NAME 'shrink' (1, 14) (1, 20) |
|---|
| 230 | n/a | STRING '", does it"' (1, 20) (1, 31) |
|---|
| 231 | n/a | """) |
|---|
| 232 | n/a | self.check_tokenize("x = 'abc' + 'ABC'", """\ |
|---|
| 233 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 234 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 235 | n/a | STRING "'abc'" (1, 4) (1, 9) |
|---|
| 236 | n/a | OP '+' (1, 10) (1, 11) |
|---|
| 237 | n/a | STRING "'ABC'" (1, 12) (1, 17) |
|---|
| 238 | n/a | """) |
|---|
| 239 | n/a | self.check_tokenize('y = "ABC" + "ABC"', """\ |
|---|
| 240 | n/a | NAME 'y' (1, 0) (1, 1) |
|---|
| 241 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 242 | n/a | STRING '"ABC"' (1, 4) (1, 9) |
|---|
| 243 | n/a | OP '+' (1, 10) (1, 11) |
|---|
| 244 | n/a | STRING '"ABC"' (1, 12) (1, 17) |
|---|
| 245 | n/a | """) |
|---|
| 246 | n/a | self.check_tokenize("x = r'abc' + r'ABC' + R'ABC' + R'ABC'", """\ |
|---|
| 247 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 248 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 249 | n/a | STRING "r'abc'" (1, 4) (1, 10) |
|---|
| 250 | n/a | OP '+' (1, 11) (1, 12) |
|---|
| 251 | n/a | STRING "r'ABC'" (1, 13) (1, 19) |
|---|
| 252 | n/a | OP '+' (1, 20) (1, 21) |
|---|
| 253 | n/a | STRING "R'ABC'" (1, 22) (1, 28) |
|---|
| 254 | n/a | OP '+' (1, 29) (1, 30) |
|---|
| 255 | n/a | STRING "R'ABC'" (1, 31) (1, 37) |
|---|
| 256 | n/a | """) |
|---|
| 257 | n/a | self.check_tokenize('y = r"abc" + r"ABC" + R"ABC" + R"ABC"', """\ |
|---|
| 258 | n/a | NAME 'y' (1, 0) (1, 1) |
|---|
| 259 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 260 | n/a | STRING 'r"abc"' (1, 4) (1, 10) |
|---|
| 261 | n/a | OP '+' (1, 11) (1, 12) |
|---|
| 262 | n/a | STRING 'r"ABC"' (1, 13) (1, 19) |
|---|
| 263 | n/a | OP '+' (1, 20) (1, 21) |
|---|
| 264 | n/a | STRING 'R"ABC"' (1, 22) (1, 28) |
|---|
| 265 | n/a | OP '+' (1, 29) (1, 30) |
|---|
| 266 | n/a | STRING 'R"ABC"' (1, 31) (1, 37) |
|---|
| 267 | n/a | """) |
|---|
| 268 | n/a | |
|---|
| 269 | n/a | self.check_tokenize("u'abc' + U'abc'", """\ |
|---|
| 270 | n/a | STRING "u'abc'" (1, 0) (1, 6) |
|---|
| 271 | n/a | OP '+' (1, 7) (1, 8) |
|---|
| 272 | n/a | STRING "U'abc'" (1, 9) (1, 15) |
|---|
| 273 | n/a | """) |
|---|
| 274 | n/a | self.check_tokenize('u"abc" + U"abc"', """\ |
|---|
| 275 | n/a | STRING 'u"abc"' (1, 0) (1, 6) |
|---|
| 276 | n/a | OP '+' (1, 7) (1, 8) |
|---|
| 277 | n/a | STRING 'U"abc"' (1, 9) (1, 15) |
|---|
| 278 | n/a | """) |
|---|
| 279 | n/a | |
|---|
| 280 | n/a | self.check_tokenize("b'abc' + B'abc'", """\ |
|---|
| 281 | n/a | STRING "b'abc'" (1, 0) (1, 6) |
|---|
| 282 | n/a | OP '+' (1, 7) (1, 8) |
|---|
| 283 | n/a | STRING "B'abc'" (1, 9) (1, 15) |
|---|
| 284 | n/a | """) |
|---|
| 285 | n/a | self.check_tokenize('b"abc" + B"abc"', """\ |
|---|
| 286 | n/a | STRING 'b"abc"' (1, 0) (1, 6) |
|---|
| 287 | n/a | OP '+' (1, 7) (1, 8) |
|---|
| 288 | n/a | STRING 'B"abc"' (1, 9) (1, 15) |
|---|
| 289 | n/a | """) |
|---|
| 290 | n/a | self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\ |
|---|
| 291 | n/a | STRING "br'abc'" (1, 0) (1, 7) |
|---|
| 292 | n/a | OP '+' (1, 8) (1, 9) |
|---|
| 293 | n/a | STRING "bR'abc'" (1, 10) (1, 17) |
|---|
| 294 | n/a | OP '+' (1, 18) (1, 19) |
|---|
| 295 | n/a | STRING "Br'abc'" (1, 20) (1, 27) |
|---|
| 296 | n/a | OP '+' (1, 28) (1, 29) |
|---|
| 297 | n/a | STRING "BR'abc'" (1, 30) (1, 37) |
|---|
| 298 | n/a | """) |
|---|
| 299 | n/a | self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\ |
|---|
| 300 | n/a | STRING 'br"abc"' (1, 0) (1, 7) |
|---|
| 301 | n/a | OP '+' (1, 8) (1, 9) |
|---|
| 302 | n/a | STRING 'bR"abc"' (1, 10) (1, 17) |
|---|
| 303 | n/a | OP '+' (1, 18) (1, 19) |
|---|
| 304 | n/a | STRING 'Br"abc"' (1, 20) (1, 27) |
|---|
| 305 | n/a | OP '+' (1, 28) (1, 29) |
|---|
| 306 | n/a | STRING 'BR"abc"' (1, 30) (1, 37) |
|---|
| 307 | n/a | """) |
|---|
| 308 | n/a | self.check_tokenize("rb'abc' + rB'abc' + Rb'abc' + RB'abc'", """\ |
|---|
| 309 | n/a | STRING "rb'abc'" (1, 0) (1, 7) |
|---|
| 310 | n/a | OP '+' (1, 8) (1, 9) |
|---|
| 311 | n/a | STRING "rB'abc'" (1, 10) (1, 17) |
|---|
| 312 | n/a | OP '+' (1, 18) (1, 19) |
|---|
| 313 | n/a | STRING "Rb'abc'" (1, 20) (1, 27) |
|---|
| 314 | n/a | OP '+' (1, 28) (1, 29) |
|---|
| 315 | n/a | STRING "RB'abc'" (1, 30) (1, 37) |
|---|
| 316 | n/a | """) |
|---|
| 317 | n/a | self.check_tokenize('rb"abc" + rB"abc" + Rb"abc" + RB"abc"', """\ |
|---|
| 318 | n/a | STRING 'rb"abc"' (1, 0) (1, 7) |
|---|
| 319 | n/a | OP '+' (1, 8) (1, 9) |
|---|
| 320 | n/a | STRING 'rB"abc"' (1, 10) (1, 17) |
|---|
| 321 | n/a | OP '+' (1, 18) (1, 19) |
|---|
| 322 | n/a | STRING 'Rb"abc"' (1, 20) (1, 27) |
|---|
| 323 | n/a | OP '+' (1, 28) (1, 29) |
|---|
| 324 | n/a | STRING 'RB"abc"' (1, 30) (1, 37) |
|---|
| 325 | n/a | """) |
|---|
| 326 | n/a | # Check 0, 1, and 2 character string prefixes. |
|---|
| 327 | n/a | self.check_tokenize(r'"a\ |
|---|
| 328 | n/a | de\ |
|---|
| 329 | n/a | fg"', """\ |
|---|
| 330 | n/a | STRING '"a\\\\\\nde\\\\\\nfg"\' (1, 0) (3, 3) |
|---|
| 331 | n/a | """) |
|---|
| 332 | n/a | self.check_tokenize(r'u"a\ |
|---|
| 333 | n/a | de"', """\ |
|---|
| 334 | n/a | STRING 'u"a\\\\\\nde"\' (1, 0) (2, 3) |
|---|
| 335 | n/a | """) |
|---|
| 336 | n/a | self.check_tokenize(r'rb"a\ |
|---|
| 337 | n/a | d"', """\ |
|---|
| 338 | n/a | STRING 'rb"a\\\\\\nd"\' (1, 0) (2, 2) |
|---|
| 339 | n/a | """) |
|---|
| 340 | n/a | self.check_tokenize(r'"""a\ |
|---|
| 341 | n/a | b"""', """\ |
|---|
| 342 | n/a | STRING '\"\""a\\\\\\nb\"\""' (1, 0) (2, 4) |
|---|
| 343 | n/a | """) |
|---|
| 344 | n/a | self.check_tokenize(r'u"""a\ |
|---|
| 345 | n/a | b"""', """\ |
|---|
| 346 | n/a | STRING 'u\"\""a\\\\\\nb\"\""' (1, 0) (2, 4) |
|---|
| 347 | n/a | """) |
|---|
| 348 | n/a | self.check_tokenize(r'rb"""a\ |
|---|
| 349 | n/a | b\ |
|---|
| 350 | n/a | c"""', """\ |
|---|
| 351 | n/a | STRING 'rb"\""a\\\\\\nb\\\\\\nc"\""' (1, 0) (3, 4) |
|---|
| 352 | n/a | """) |
|---|
| 353 | n/a | self.check_tokenize('f"abc"', """\ |
|---|
| 354 | n/a | STRING 'f"abc"' (1, 0) (1, 6) |
|---|
| 355 | n/a | """) |
|---|
| 356 | n/a | self.check_tokenize('fR"a{b}c"', """\ |
|---|
| 357 | n/a | STRING 'fR"a{b}c"' (1, 0) (1, 9) |
|---|
| 358 | n/a | """) |
|---|
| 359 | n/a | self.check_tokenize('f"""abc"""', """\ |
|---|
| 360 | n/a | STRING 'f\"\"\"abc\"\"\"' (1, 0) (1, 10) |
|---|
| 361 | n/a | """) |
|---|
| 362 | n/a | self.check_tokenize(r'f"abc\ |
|---|
| 363 | n/a | def"', """\ |
|---|
| 364 | n/a | STRING 'f"abc\\\\\\ndef"' (1, 0) (2, 4) |
|---|
| 365 | n/a | """) |
|---|
| 366 | n/a | self.check_tokenize(r'Rf"abc\ |
|---|
| 367 | n/a | def"', """\ |
|---|
| 368 | n/a | STRING 'Rf"abc\\\\\\ndef"' (1, 0) (2, 4) |
|---|
| 369 | n/a | """) |
|---|
| 370 | n/a | |
|---|
| 371 | n/a | def test_function(self): |
|---|
| 372 | n/a | self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\ |
|---|
| 373 | n/a | NAME 'def' (1, 0) (1, 3) |
|---|
| 374 | n/a | NAME 'd22' (1, 4) (1, 7) |
|---|
| 375 | n/a | OP '(' (1, 7) (1, 8) |
|---|
| 376 | n/a | NAME 'a' (1, 8) (1, 9) |
|---|
| 377 | n/a | OP ',' (1, 9) (1, 10) |
|---|
| 378 | n/a | NAME 'b' (1, 11) (1, 12) |
|---|
| 379 | n/a | OP ',' (1, 12) (1, 13) |
|---|
| 380 | n/a | NAME 'c' (1, 14) (1, 15) |
|---|
| 381 | n/a | OP '=' (1, 15) (1, 16) |
|---|
| 382 | n/a | NUMBER '2' (1, 16) (1, 17) |
|---|
| 383 | n/a | OP ',' (1, 17) (1, 18) |
|---|
| 384 | n/a | NAME 'd' (1, 19) (1, 20) |
|---|
| 385 | n/a | OP '=' (1, 20) (1, 21) |
|---|
| 386 | n/a | NUMBER '2' (1, 21) (1, 22) |
|---|
| 387 | n/a | OP ',' (1, 22) (1, 23) |
|---|
| 388 | n/a | OP '*' (1, 24) (1, 25) |
|---|
| 389 | n/a | NAME 'k' (1, 25) (1, 26) |
|---|
| 390 | n/a | OP ')' (1, 26) (1, 27) |
|---|
| 391 | n/a | OP ':' (1, 27) (1, 28) |
|---|
| 392 | n/a | NAME 'pass' (1, 29) (1, 33) |
|---|
| 393 | n/a | """) |
|---|
| 394 | n/a | self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\ |
|---|
| 395 | n/a | NAME 'def' (1, 0) (1, 3) |
|---|
| 396 | n/a | NAME 'd01v_' (1, 4) (1, 9) |
|---|
| 397 | n/a | OP '(' (1, 9) (1, 10) |
|---|
| 398 | n/a | NAME 'a' (1, 10) (1, 11) |
|---|
| 399 | n/a | OP '=' (1, 11) (1, 12) |
|---|
| 400 | n/a | NUMBER '1' (1, 12) (1, 13) |
|---|
| 401 | n/a | OP ',' (1, 13) (1, 14) |
|---|
| 402 | n/a | OP '*' (1, 15) (1, 16) |
|---|
| 403 | n/a | NAME 'k' (1, 16) (1, 17) |
|---|
| 404 | n/a | OP ',' (1, 17) (1, 18) |
|---|
| 405 | n/a | OP '**' (1, 19) (1, 21) |
|---|
| 406 | n/a | NAME 'w' (1, 21) (1, 22) |
|---|
| 407 | n/a | OP ')' (1, 22) (1, 23) |
|---|
| 408 | n/a | OP ':' (1, 23) (1, 24) |
|---|
| 409 | n/a | NAME 'pass' (1, 25) (1, 29) |
|---|
| 410 | n/a | """) |
|---|
| 411 | n/a | |
|---|
| 412 | n/a | def test_comparison(self): |
|---|
| 413 | n/a | # Comparison |
|---|
| 414 | n/a | self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " |
|---|
| 415 | n/a | "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\ |
|---|
| 416 | n/a | NAME 'if' (1, 0) (1, 2) |
|---|
| 417 | n/a | NUMBER '1' (1, 3) (1, 4) |
|---|
| 418 | n/a | OP '<' (1, 5) (1, 6) |
|---|
| 419 | n/a | NUMBER '1' (1, 7) (1, 8) |
|---|
| 420 | n/a | OP '>' (1, 9) (1, 10) |
|---|
| 421 | n/a | NUMBER '1' (1, 11) (1, 12) |
|---|
| 422 | n/a | OP '==' (1, 13) (1, 15) |
|---|
| 423 | n/a | NUMBER '1' (1, 16) (1, 17) |
|---|
| 424 | n/a | OP '>=' (1, 18) (1, 20) |
|---|
| 425 | n/a | NUMBER '5' (1, 21) (1, 22) |
|---|
| 426 | n/a | OP '<=' (1, 23) (1, 25) |
|---|
| 427 | n/a | NUMBER '0x15' (1, 26) (1, 30) |
|---|
| 428 | n/a | OP '<=' (1, 31) (1, 33) |
|---|
| 429 | n/a | NUMBER '0x12' (1, 34) (1, 38) |
|---|
| 430 | n/a | OP '!=' (1, 39) (1, 41) |
|---|
| 431 | n/a | NUMBER '1' (1, 42) (1, 43) |
|---|
| 432 | n/a | NAME 'and' (1, 44) (1, 47) |
|---|
| 433 | n/a | NUMBER '5' (1, 48) (1, 49) |
|---|
| 434 | n/a | NAME 'in' (1, 50) (1, 52) |
|---|
| 435 | n/a | NUMBER '1' (1, 53) (1, 54) |
|---|
| 436 | n/a | NAME 'not' (1, 55) (1, 58) |
|---|
| 437 | n/a | NAME 'in' (1, 59) (1, 61) |
|---|
| 438 | n/a | NUMBER '1' (1, 62) (1, 63) |
|---|
| 439 | n/a | NAME 'is' (1, 64) (1, 66) |
|---|
| 440 | n/a | NUMBER '1' (1, 67) (1, 68) |
|---|
| 441 | n/a | NAME 'or' (1, 69) (1, 71) |
|---|
| 442 | n/a | NUMBER '5' (1, 72) (1, 73) |
|---|
| 443 | n/a | NAME 'is' (1, 74) (1, 76) |
|---|
| 444 | n/a | NAME 'not' (1, 77) (1, 80) |
|---|
| 445 | n/a | NUMBER '1' (1, 81) (1, 82) |
|---|
| 446 | n/a | OP ':' (1, 82) (1, 83) |
|---|
| 447 | n/a | NAME 'pass' (1, 84) (1, 88) |
|---|
| 448 | n/a | """) |
|---|
| 449 | n/a | |
|---|
| 450 | n/a | def test_shift(self): |
|---|
| 451 | n/a | # Shift |
|---|
| 452 | n/a | self.check_tokenize("x = 1 << 1 >> 5", """\ |
|---|
| 453 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 454 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 455 | n/a | NUMBER '1' (1, 4) (1, 5) |
|---|
| 456 | n/a | OP '<<' (1, 6) (1, 8) |
|---|
| 457 | n/a | NUMBER '1' (1, 9) (1, 10) |
|---|
| 458 | n/a | OP '>>' (1, 11) (1, 13) |
|---|
| 459 | n/a | NUMBER '5' (1, 14) (1, 15) |
|---|
| 460 | n/a | """) |
|---|
| 461 | n/a | |
|---|
| 462 | n/a | def test_additive(self): |
|---|
| 463 | n/a | # Additive |
|---|
| 464 | n/a | self.check_tokenize("x = 1 - y + 15 - 1 + 0x124 + z + a[5]", """\ |
|---|
| 465 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 466 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 467 | n/a | NUMBER '1' (1, 4) (1, 5) |
|---|
| 468 | n/a | OP '-' (1, 6) (1, 7) |
|---|
| 469 | n/a | NAME 'y' (1, 8) (1, 9) |
|---|
| 470 | n/a | OP '+' (1, 10) (1, 11) |
|---|
| 471 | n/a | NUMBER '15' (1, 12) (1, 14) |
|---|
| 472 | n/a | OP '-' (1, 15) (1, 16) |
|---|
| 473 | n/a | NUMBER '1' (1, 17) (1, 18) |
|---|
| 474 | n/a | OP '+' (1, 19) (1, 20) |
|---|
| 475 | n/a | NUMBER '0x124' (1, 21) (1, 26) |
|---|
| 476 | n/a | OP '+' (1, 27) (1, 28) |
|---|
| 477 | n/a | NAME 'z' (1, 29) (1, 30) |
|---|
| 478 | n/a | OP '+' (1, 31) (1, 32) |
|---|
| 479 | n/a | NAME 'a' (1, 33) (1, 34) |
|---|
| 480 | n/a | OP '[' (1, 34) (1, 35) |
|---|
| 481 | n/a | NUMBER '5' (1, 35) (1, 36) |
|---|
| 482 | n/a | OP ']' (1, 36) (1, 37) |
|---|
| 483 | n/a | """) |
|---|
| 484 | n/a | |
|---|
| 485 | n/a | def test_multiplicative(self): |
|---|
| 486 | n/a | # Multiplicative |
|---|
| 487 | n/a | self.check_tokenize("x = 1//1*1/5*12%0x12@42", """\ |
|---|
| 488 | n/a | NAME 'x' (1, 0) (1, 1) |
|---|
| 489 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 490 | n/a | NUMBER '1' (1, 4) (1, 5) |
|---|
| 491 | n/a | OP '//' (1, 5) (1, 7) |
|---|
| 492 | n/a | NUMBER '1' (1, 7) (1, 8) |
|---|
| 493 | n/a | OP '*' (1, 8) (1, 9) |
|---|
| 494 | n/a | NUMBER '1' (1, 9) (1, 10) |
|---|
| 495 | n/a | OP '/' (1, 10) (1, 11) |
|---|
| 496 | n/a | NUMBER '5' (1, 11) (1, 12) |
|---|
| 497 | n/a | OP '*' (1, 12) (1, 13) |
|---|
| 498 | n/a | NUMBER '12' (1, 13) (1, 15) |
|---|
| 499 | n/a | OP '%' (1, 15) (1, 16) |
|---|
| 500 | n/a | NUMBER '0x12' (1, 16) (1, 20) |
|---|
| 501 | n/a | OP '@' (1, 20) (1, 21) |
|---|
| 502 | n/a | NUMBER '42' (1, 21) (1, 23) |
|---|
| 503 | n/a | """) |
|---|
| 504 | n/a | |
|---|
| 505 | n/a | def test_unary(self): |
|---|
| 506 | n/a | # Unary |
|---|
| 507 | n/a | self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\ |
|---|
| 508 | n/a | OP '~' (1, 0) (1, 1) |
|---|
| 509 | n/a | NUMBER '1' (1, 1) (1, 2) |
|---|
| 510 | n/a | OP '^' (1, 3) (1, 4) |
|---|
| 511 | n/a | NUMBER '1' (1, 5) (1, 6) |
|---|
| 512 | n/a | OP '&' (1, 7) (1, 8) |
|---|
| 513 | n/a | NUMBER '1' (1, 9) (1, 10) |
|---|
| 514 | n/a | OP '|' (1, 11) (1, 12) |
|---|
| 515 | n/a | NUMBER '1' (1, 12) (1, 13) |
|---|
| 516 | n/a | OP '^' (1, 14) (1, 15) |
|---|
| 517 | n/a | OP '-' (1, 16) (1, 17) |
|---|
| 518 | n/a | NUMBER '1' (1, 17) (1, 18) |
|---|
| 519 | n/a | """) |
|---|
| 520 | n/a | self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\ |
|---|
| 521 | n/a | OP '-' (1, 0) (1, 1) |
|---|
| 522 | n/a | NUMBER '1' (1, 1) (1, 2) |
|---|
| 523 | n/a | OP '*' (1, 2) (1, 3) |
|---|
| 524 | n/a | NUMBER '1' (1, 3) (1, 4) |
|---|
| 525 | n/a | OP '/' (1, 4) (1, 5) |
|---|
| 526 | n/a | NUMBER '1' (1, 5) (1, 6) |
|---|
| 527 | n/a | OP '+' (1, 6) (1, 7) |
|---|
| 528 | n/a | NUMBER '1' (1, 7) (1, 8) |
|---|
| 529 | n/a | OP '*' (1, 8) (1, 9) |
|---|
| 530 | n/a | NUMBER '1' (1, 9) (1, 10) |
|---|
| 531 | n/a | OP '//' (1, 10) (1, 12) |
|---|
| 532 | n/a | NUMBER '1' (1, 12) (1, 13) |
|---|
| 533 | n/a | OP '-' (1, 14) (1, 15) |
|---|
| 534 | n/a | OP '-' (1, 16) (1, 17) |
|---|
| 535 | n/a | OP '-' (1, 17) (1, 18) |
|---|
| 536 | n/a | OP '-' (1, 18) (1, 19) |
|---|
| 537 | n/a | NUMBER '1' (1, 19) (1, 20) |
|---|
| 538 | n/a | OP '**' (1, 20) (1, 22) |
|---|
| 539 | n/a | NUMBER '1' (1, 22) (1, 23) |
|---|
| 540 | n/a | """) |
|---|
| 541 | n/a | |
|---|
| 542 | n/a | def test_selector(self): |
|---|
| 543 | n/a | # Selector |
|---|
| 544 | n/a | self.check_tokenize("import sys, time\nx = sys.modules['time'].time()", """\ |
|---|
| 545 | n/a | NAME 'import' (1, 0) (1, 6) |
|---|
| 546 | n/a | NAME 'sys' (1, 7) (1, 10) |
|---|
| 547 | n/a | OP ',' (1, 10) (1, 11) |
|---|
| 548 | n/a | NAME 'time' (1, 12) (1, 16) |
|---|
| 549 | n/a | NEWLINE '\\n' (1, 16) (1, 17) |
|---|
| 550 | n/a | NAME 'x' (2, 0) (2, 1) |
|---|
| 551 | n/a | OP '=' (2, 2) (2, 3) |
|---|
| 552 | n/a | NAME 'sys' (2, 4) (2, 7) |
|---|
| 553 | n/a | OP '.' (2, 7) (2, 8) |
|---|
| 554 | n/a | NAME 'modules' (2, 8) (2, 15) |
|---|
| 555 | n/a | OP '[' (2, 15) (2, 16) |
|---|
| 556 | n/a | STRING "'time'" (2, 16) (2, 22) |
|---|
| 557 | n/a | OP ']' (2, 22) (2, 23) |
|---|
| 558 | n/a | OP '.' (2, 23) (2, 24) |
|---|
| 559 | n/a | NAME 'time' (2, 24) (2, 28) |
|---|
| 560 | n/a | OP '(' (2, 28) (2, 29) |
|---|
| 561 | n/a | OP ')' (2, 29) (2, 30) |
|---|
| 562 | n/a | """) |
|---|
| 563 | n/a | |
|---|
| 564 | n/a | def test_method(self): |
|---|
| 565 | n/a | # Methods |
|---|
| 566 | n/a | self.check_tokenize("@staticmethod\ndef foo(x,y): pass", """\ |
|---|
| 567 | n/a | OP '@' (1, 0) (1, 1) |
|---|
| 568 | n/a | NAME 'staticmethod' (1, 1) (1, 13) |
|---|
| 569 | n/a | NEWLINE '\\n' (1, 13) (1, 14) |
|---|
| 570 | n/a | NAME 'def' (2, 0) (2, 3) |
|---|
| 571 | n/a | NAME 'foo' (2, 4) (2, 7) |
|---|
| 572 | n/a | OP '(' (2, 7) (2, 8) |
|---|
| 573 | n/a | NAME 'x' (2, 8) (2, 9) |
|---|
| 574 | n/a | OP ',' (2, 9) (2, 10) |
|---|
| 575 | n/a | NAME 'y' (2, 10) (2, 11) |
|---|
| 576 | n/a | OP ')' (2, 11) (2, 12) |
|---|
| 577 | n/a | OP ':' (2, 12) (2, 13) |
|---|
| 578 | n/a | NAME 'pass' (2, 14) (2, 18) |
|---|
| 579 | n/a | """) |
|---|
| 580 | n/a | |
|---|
| 581 | n/a | def test_tabs(self): |
|---|
| 582 | n/a | # Evil tabs |
|---|
| 583 | n/a | self.check_tokenize("def f():\n" |
|---|
| 584 | n/a | "\tif x\n" |
|---|
| 585 | n/a | " \tpass", """\ |
|---|
| 586 | n/a | NAME 'def' (1, 0) (1, 3) |
|---|
| 587 | n/a | NAME 'f' (1, 4) (1, 5) |
|---|
| 588 | n/a | OP '(' (1, 5) (1, 6) |
|---|
| 589 | n/a | OP ')' (1, 6) (1, 7) |
|---|
| 590 | n/a | OP ':' (1, 7) (1, 8) |
|---|
| 591 | n/a | NEWLINE '\\n' (1, 8) (1, 9) |
|---|
| 592 | n/a | INDENT '\\t' (2, 0) (2, 1) |
|---|
| 593 | n/a | NAME 'if' (2, 1) (2, 3) |
|---|
| 594 | n/a | NAME 'x' (2, 4) (2, 5) |
|---|
| 595 | n/a | NEWLINE '\\n' (2, 5) (2, 6) |
|---|
| 596 | n/a | INDENT ' \\t' (3, 0) (3, 9) |
|---|
| 597 | n/a | NAME 'pass' (3, 9) (3, 13) |
|---|
| 598 | n/a | DEDENT '' (4, 0) (4, 0) |
|---|
| 599 | n/a | DEDENT '' (4, 0) (4, 0) |
|---|
| 600 | n/a | """) |
|---|
| 601 | n/a | |
|---|
| 602 | n/a | def test_non_ascii_identifiers(self): |
|---|
| 603 | n/a | # Non-ascii identifiers |
|---|
| 604 | n/a | self.check_tokenize("รrter = 'places'\ngrรยผn = 'green'", """\ |
|---|
| 605 | n/a | NAME 'รrter' (1, 0) (1, 5) |
|---|
| 606 | n/a | OP '=' (1, 6) (1, 7) |
|---|
| 607 | n/a | STRING "'places'" (1, 8) (1, 16) |
|---|
| 608 | n/a | NEWLINE '\\n' (1, 16) (1, 17) |
|---|
| 609 | n/a | NAME 'grรยผn' (2, 0) (2, 4) |
|---|
| 610 | n/a | OP '=' (2, 5) (2, 6) |
|---|
| 611 | n/a | STRING "'green'" (2, 7) (2, 14) |
|---|
| 612 | n/a | """) |
|---|
| 613 | n/a | |
|---|
| 614 | n/a | def test_unicode(self): |
|---|
| 615 | n/a | # Legacy unicode literals: |
|---|
| 616 | n/a | self.check_tokenize("รrter = u'places'\ngrรยผn = U'green'", """\ |
|---|
| 617 | n/a | NAME 'รrter' (1, 0) (1, 5) |
|---|
| 618 | n/a | OP '=' (1, 6) (1, 7) |
|---|
| 619 | n/a | STRING "u'places'" (1, 8) (1, 17) |
|---|
| 620 | n/a | NEWLINE '\\n' (1, 17) (1, 18) |
|---|
| 621 | n/a | NAME 'grรยผn' (2, 0) (2, 4) |
|---|
| 622 | n/a | OP '=' (2, 5) (2, 6) |
|---|
| 623 | n/a | STRING "U'green'" (2, 7) (2, 15) |
|---|
| 624 | n/a | """) |
|---|
| 625 | n/a | |
|---|
| 626 | n/a | def test_async(self): |
|---|
| 627 | n/a | # Async/await extension: |
|---|
| 628 | n/a | self.check_tokenize("async = 1", """\ |
|---|
| 629 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 630 | n/a | OP '=' (1, 6) (1, 7) |
|---|
| 631 | n/a | NUMBER '1' (1, 8) (1, 9) |
|---|
| 632 | n/a | """) |
|---|
| 633 | n/a | |
|---|
| 634 | n/a | self.check_tokenize("a = (async = 1)", """\ |
|---|
| 635 | n/a | NAME 'a' (1, 0) (1, 1) |
|---|
| 636 | n/a | OP '=' (1, 2) (1, 3) |
|---|
| 637 | n/a | OP '(' (1, 4) (1, 5) |
|---|
| 638 | n/a | NAME 'async' (1, 5) (1, 10) |
|---|
| 639 | n/a | OP '=' (1, 11) (1, 12) |
|---|
| 640 | n/a | NUMBER '1' (1, 13) (1, 14) |
|---|
| 641 | n/a | OP ')' (1, 14) (1, 15) |
|---|
| 642 | n/a | """) |
|---|
| 643 | n/a | |
|---|
| 644 | n/a | self.check_tokenize("async()", """\ |
|---|
| 645 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 646 | n/a | OP '(' (1, 5) (1, 6) |
|---|
| 647 | n/a | OP ')' (1, 6) (1, 7) |
|---|
| 648 | n/a | """) |
|---|
| 649 | n/a | |
|---|
| 650 | n/a | self.check_tokenize("class async(Bar):pass", """\ |
|---|
| 651 | n/a | NAME 'class' (1, 0) (1, 5) |
|---|
| 652 | n/a | NAME 'async' (1, 6) (1, 11) |
|---|
| 653 | n/a | OP '(' (1, 11) (1, 12) |
|---|
| 654 | n/a | NAME 'Bar' (1, 12) (1, 15) |
|---|
| 655 | n/a | OP ')' (1, 15) (1, 16) |
|---|
| 656 | n/a | OP ':' (1, 16) (1, 17) |
|---|
| 657 | n/a | NAME 'pass' (1, 17) (1, 21) |
|---|
| 658 | n/a | """) |
|---|
| 659 | n/a | |
|---|
| 660 | n/a | self.check_tokenize("class async:pass", """\ |
|---|
| 661 | n/a | NAME 'class' (1, 0) (1, 5) |
|---|
| 662 | n/a | NAME 'async' (1, 6) (1, 11) |
|---|
| 663 | n/a | OP ':' (1, 11) (1, 12) |
|---|
| 664 | n/a | NAME 'pass' (1, 12) (1, 16) |
|---|
| 665 | n/a | """) |
|---|
| 666 | n/a | |
|---|
| 667 | n/a | self.check_tokenize("await = 1", """\ |
|---|
| 668 | n/a | NAME 'await' (1, 0) (1, 5) |
|---|
| 669 | n/a | OP '=' (1, 6) (1, 7) |
|---|
| 670 | n/a | NUMBER '1' (1, 8) (1, 9) |
|---|
| 671 | n/a | """) |
|---|
| 672 | n/a | |
|---|
| 673 | n/a | self.check_tokenize("foo.async", """\ |
|---|
| 674 | n/a | NAME 'foo' (1, 0) (1, 3) |
|---|
| 675 | n/a | OP '.' (1, 3) (1, 4) |
|---|
| 676 | n/a | NAME 'async' (1, 4) (1, 9) |
|---|
| 677 | n/a | """) |
|---|
| 678 | n/a | |
|---|
| 679 | n/a | self.check_tokenize("async for a in b: pass", """\ |
|---|
| 680 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 681 | n/a | NAME 'for' (1, 6) (1, 9) |
|---|
| 682 | n/a | NAME 'a' (1, 10) (1, 11) |
|---|
| 683 | n/a | NAME 'in' (1, 12) (1, 14) |
|---|
| 684 | n/a | NAME 'b' (1, 15) (1, 16) |
|---|
| 685 | n/a | OP ':' (1, 16) (1, 17) |
|---|
| 686 | n/a | NAME 'pass' (1, 18) (1, 22) |
|---|
| 687 | n/a | """) |
|---|
| 688 | n/a | |
|---|
| 689 | n/a | self.check_tokenize("async with a as b: pass", """\ |
|---|
| 690 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 691 | n/a | NAME 'with' (1, 6) (1, 10) |
|---|
| 692 | n/a | NAME 'a' (1, 11) (1, 12) |
|---|
| 693 | n/a | NAME 'as' (1, 13) (1, 15) |
|---|
| 694 | n/a | NAME 'b' (1, 16) (1, 17) |
|---|
| 695 | n/a | OP ':' (1, 17) (1, 18) |
|---|
| 696 | n/a | NAME 'pass' (1, 19) (1, 23) |
|---|
| 697 | n/a | """) |
|---|
| 698 | n/a | |
|---|
| 699 | n/a | self.check_tokenize("async.foo", """\ |
|---|
| 700 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 701 | n/a | OP '.' (1, 5) (1, 6) |
|---|
| 702 | n/a | NAME 'foo' (1, 6) (1, 9) |
|---|
| 703 | n/a | """) |
|---|
| 704 | n/a | |
|---|
| 705 | n/a | self.check_tokenize("async", """\ |
|---|
| 706 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 707 | n/a | """) |
|---|
| 708 | n/a | |
|---|
| 709 | n/a | self.check_tokenize("async\n#comment\nawait", """\ |
|---|
| 710 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 711 | n/a | NEWLINE '\\n' (1, 5) (1, 6) |
|---|
| 712 | n/a | COMMENT '#comment' (2, 0) (2, 8) |
|---|
| 713 | n/a | NL '\\n' (2, 8) (2, 9) |
|---|
| 714 | n/a | NAME 'await' (3, 0) (3, 5) |
|---|
| 715 | n/a | """) |
|---|
| 716 | n/a | |
|---|
| 717 | n/a | self.check_tokenize("async\n...\nawait", """\ |
|---|
| 718 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 719 | n/a | NEWLINE '\\n' (1, 5) (1, 6) |
|---|
| 720 | n/a | OP '...' (2, 0) (2, 3) |
|---|
| 721 | n/a | NEWLINE '\\n' (2, 3) (2, 4) |
|---|
| 722 | n/a | NAME 'await' (3, 0) (3, 5) |
|---|
| 723 | n/a | """) |
|---|
| 724 | n/a | |
|---|
| 725 | n/a | self.check_tokenize("async\nawait", """\ |
|---|
| 726 | n/a | NAME 'async' (1, 0) (1, 5) |
|---|
| 727 | n/a | NEWLINE '\\n' (1, 5) (1, 6) |
|---|
| 728 | n/a | NAME 'await' (2, 0) (2, 5) |
|---|
| 729 | n/a | """) |
|---|
| 730 | n/a | |
|---|
| 731 | n/a | self.check_tokenize("foo.async + 1", """\ |
|---|
| 732 | n/a | NAME 'foo' (1, 0) (1, 3) |
|---|
| 733 | n/a | OP '.' (1, 3) (1, 4) |
|---|
| 734 | n/a | NAME 'async' (1, 4) (1, 9) |
|---|
| 735 | n/a | OP '+' (1, 10) (1, 11) |
|---|
| 736 | n/a | NUMBER '1' (1, 12) (1, 13) |
|---|
| 737 | n/a | """) |
|---|
| 738 | n/a | |
|---|
| 739 | n/a | self.check_tokenize("async def foo(): pass", """\ |
|---|
| 740 | n/a | ASYNC 'async' (1, 0) (1, 5) |
|---|
| 741 | n/a | NAME 'def' (1, 6) (1, 9) |
|---|
| 742 | n/a | NAME 'foo' (1, 10) (1, 13) |
|---|
| 743 | n/a | OP '(' (1, 13) (1, 14) |
|---|
| 744 | n/a | OP ')' (1, 14) (1, 15) |
|---|
| 745 | n/a | OP ':' (1, 15) (1, 16) |
|---|
| 746 | n/a | NAME 'pass' (1, 17) (1, 21) |
|---|
| 747 | n/a | """) |
|---|
| 748 | n/a | |
|---|
| 749 | n/a | self.check_tokenize('''\ |
|---|
| 750 | n/a | async def foo(): |
|---|
| 751 | n/a | def foo(await): |
|---|
| 752 | n/a | await = 1 |
|---|
| 753 | n/a | if 1: |
|---|
| 754 | n/a | await |
|---|
| 755 | n/a | async += 1 |
|---|
| 756 | n/a | ''', """\ |
|---|
| 757 | n/a | ASYNC 'async' (1, 0) (1, 5) |
|---|
| 758 | n/a | NAME 'def' (1, 6) (1, 9) |
|---|
| 759 | n/a | NAME 'foo' (1, 10) (1, 13) |
|---|
| 760 | n/a | OP '(' (1, 13) (1, 14) |
|---|
| 761 | n/a | OP ')' (1, 14) (1, 15) |
|---|
| 762 | n/a | OP ':' (1, 15) (1, 16) |
|---|
| 763 | n/a | NEWLINE '\\n' (1, 16) (1, 17) |
|---|
| 764 | n/a | INDENT ' ' (2, 0) (2, 2) |
|---|
| 765 | n/a | NAME 'def' (2, 2) (2, 5) |
|---|
| 766 | n/a | NAME 'foo' (2, 6) (2, 9) |
|---|
| 767 | n/a | OP '(' (2, 9) (2, 10) |
|---|
| 768 | n/a | AWAIT 'await' (2, 10) (2, 15) |
|---|
| 769 | n/a | OP ')' (2, 15) (2, 16) |
|---|
| 770 | n/a | OP ':' (2, 16) (2, 17) |
|---|
| 771 | n/a | NEWLINE '\\n' (2, 17) (2, 18) |
|---|
| 772 | n/a | INDENT ' ' (3, 0) (3, 4) |
|---|
| 773 | n/a | AWAIT 'await' (3, 4) (3, 9) |
|---|
| 774 | n/a | OP '=' (3, 10) (3, 11) |
|---|
| 775 | n/a | NUMBER '1' (3, 12) (3, 13) |
|---|
| 776 | n/a | NEWLINE '\\n' (3, 13) (3, 14) |
|---|
| 777 | n/a | DEDENT '' (4, 2) (4, 2) |
|---|
| 778 | n/a | NAME 'if' (4, 2) (4, 4) |
|---|
| 779 | n/a | NUMBER '1' (4, 5) (4, 6) |
|---|
| 780 | n/a | OP ':' (4, 6) (4, 7) |
|---|
| 781 | n/a | NEWLINE '\\n' (4, 7) (4, 8) |
|---|
| 782 | n/a | INDENT ' ' (5, 0) (5, 4) |
|---|
| 783 | n/a | AWAIT 'await' (5, 4) (5, 9) |
|---|
| 784 | n/a | NEWLINE '\\n' (5, 9) (5, 10) |
|---|
| 785 | n/a | DEDENT '' (6, 0) (6, 0) |
|---|
| 786 | n/a | DEDENT '' (6, 0) (6, 0) |
|---|
| 787 | n/a | NAME 'async' (6, 0) (6, 5) |
|---|
| 788 | n/a | OP '+=' (6, 6) (6, 8) |
|---|
| 789 | n/a | NUMBER '1' (6, 9) (6, 10) |
|---|
| 790 | n/a | NEWLINE '\\n' (6, 10) (6, 11) |
|---|
| 791 | n/a | """) |
|---|
| 792 | n/a | |
|---|
| 793 | n/a | self.check_tokenize('''\ |
|---|
| 794 | n/a | async def foo(): |
|---|
| 795 | n/a | async for i in 1: pass''', """\ |
|---|
| 796 | n/a | ASYNC 'async' (1, 0) (1, 5) |
|---|
| 797 | n/a | NAME 'def' (1, 6) (1, 9) |
|---|
| 798 | n/a | NAME 'foo' (1, 10) (1, 13) |
|---|
| 799 | n/a | OP '(' (1, 13) (1, 14) |
|---|
| 800 | n/a | OP ')' (1, 14) (1, 15) |
|---|
| 801 | n/a | OP ':' (1, 15) (1, 16) |
|---|
| 802 | n/a | NEWLINE '\\n' (1, 16) (1, 17) |
|---|
| 803 | n/a | INDENT ' ' (2, 0) (2, 2) |
|---|
| 804 | n/a | ASYNC 'async' (2, 2) (2, 7) |
|---|
| 805 | n/a | NAME 'for' (2, 8) (2, 11) |
|---|
| 806 | n/a | NAME 'i' (2, 12) (2, 13) |
|---|
| 807 | n/a | NAME 'in' (2, 14) (2, 16) |
|---|
| 808 | n/a | NUMBER '1' (2, 17) (2, 18) |
|---|
| 809 | n/a | OP ':' (2, 18) (2, 19) |
|---|
| 810 | n/a | NAME 'pass' (2, 20) (2, 24) |
|---|
| 811 | n/a | DEDENT '' (3, 0) (3, 0) |
|---|
| 812 | n/a | """) |
|---|
| 813 | n/a | |
|---|
| 814 | n/a | self.check_tokenize('''async def foo(async): await''', """\ |
|---|
| 815 | n/a | ASYNC 'async' (1, 0) (1, 5) |
|---|
| 816 | n/a | NAME 'def' (1, 6) (1, 9) |
|---|
| 817 | n/a | NAME 'foo' (1, 10) (1, 13) |
|---|
| 818 | n/a | OP '(' (1, 13) (1, 14) |
|---|
| 819 | n/a | ASYNC 'async' (1, 14) (1, 19) |
|---|
| 820 | n/a | OP ')' (1, 19) (1, 20) |
|---|
| 821 | n/a | OP ':' (1, 20) (1, 21) |
|---|
| 822 | n/a | AWAIT 'await' (1, 22) (1, 27) |
|---|
| 823 | n/a | """) |
|---|
| 824 | n/a | |
|---|
| 825 | n/a | self.check_tokenize('''\ |
|---|
| 826 | n/a | def f(): |
|---|
| 827 | n/a | |
|---|
| 828 | n/a | def baz(): pass |
|---|
| 829 | n/a | async def bar(): pass |
|---|
| 830 | n/a | |
|---|
| 831 | n/a | await = 2''', """\ |
|---|
| 832 | n/a | NAME 'def' (1, 0) (1, 3) |
|---|
| 833 | n/a | NAME 'f' (1, 4) (1, 5) |
|---|
| 834 | n/a | OP '(' (1, 5) (1, 6) |
|---|
| 835 | n/a | OP ')' (1, 6) (1, 7) |
|---|
| 836 | n/a | OP ':' (1, 7) (1, 8) |
|---|
| 837 | n/a | NEWLINE '\\n' (1, 8) (1, 9) |
|---|
| 838 | n/a | NL '\\n' (2, 0) (2, 1) |
|---|
| 839 | n/a | INDENT ' ' (3, 0) (3, 2) |
|---|
| 840 | n/a | NAME 'def' (3, 2) (3, 5) |
|---|
| 841 | n/a | NAME 'baz' (3, 6) (3, 9) |
|---|
| 842 | n/a | OP '(' (3, 9) (3, 10) |
|---|
| 843 | n/a | OP ')' (3, 10) (3, 11) |
|---|
| 844 | n/a | OP ':' (3, 11) (3, 12) |
|---|
| 845 | n/a | NAME 'pass' (3, 13) (3, 17) |
|---|
| 846 | n/a | NEWLINE '\\n' (3, 17) (3, 18) |
|---|
| 847 | n/a | ASYNC 'async' (4, 2) (4, 7) |
|---|
| 848 | n/a | NAME 'def' (4, 8) (4, 11) |
|---|
| 849 | n/a | NAME 'bar' (4, 12) (4, 15) |
|---|
| 850 | n/a | OP '(' (4, 15) (4, 16) |
|---|
| 851 | n/a | OP ')' (4, 16) (4, 17) |
|---|
| 852 | n/a | OP ':' (4, 17) (4, 18) |
|---|
| 853 | n/a | NAME 'pass' (4, 19) (4, 23) |
|---|
| 854 | n/a | NEWLINE '\\n' (4, 23) (4, 24) |
|---|
| 855 | n/a | NL '\\n' (5, 0) (5, 1) |
|---|
| 856 | n/a | NAME 'await' (6, 2) (6, 7) |
|---|
| 857 | n/a | OP '=' (6, 8) (6, 9) |
|---|
| 858 | n/a | NUMBER '2' (6, 10) (6, 11) |
|---|
| 859 | n/a | DEDENT '' (7, 0) (7, 0) |
|---|
| 860 | n/a | """) |
|---|
| 861 | n/a | |
|---|
| 862 | n/a | self.check_tokenize('''\ |
|---|
| 863 | n/a | async def f(): |
|---|
| 864 | n/a | |
|---|
| 865 | n/a | def baz(): pass |
|---|
| 866 | n/a | async def bar(): pass |
|---|
| 867 | n/a | |
|---|
| 868 | n/a | await = 2''', """\ |
|---|
| 869 | n/a | ASYNC 'async' (1, 0) (1, 5) |
|---|
| 870 | n/a | NAME 'def' (1, 6) (1, 9) |
|---|
| 871 | n/a | NAME 'f' (1, 10) (1, 11) |
|---|
| 872 | n/a | OP '(' (1, 11) (1, 12) |
|---|
| 873 | n/a | OP ')' (1, 12) (1, 13) |
|---|
| 874 | n/a | OP ':' (1, 13) (1, 14) |
|---|
| 875 | n/a | NEWLINE '\\n' (1, 14) (1, 15) |
|---|
| 876 | n/a | NL '\\n' (2, 0) (2, 1) |
|---|
| 877 | n/a | INDENT ' ' (3, 0) (3, 2) |
|---|
| 878 | n/a | NAME 'def' (3, 2) (3, 5) |
|---|
| 879 | n/a | NAME 'baz' (3, 6) (3, 9) |
|---|
| 880 | n/a | OP '(' (3, 9) (3, 10) |
|---|
| 881 | n/a | OP ')' (3, 10) (3, 11) |
|---|
| 882 | n/a | OP ':' (3, 11) (3, 12) |
|---|
| 883 | n/a | NAME 'pass' (3, 13) (3, 17) |
|---|
| 884 | n/a | NEWLINE '\\n' (3, 17) (3, 18) |
|---|
| 885 | n/a | ASYNC 'async' (4, 2) (4, 7) |
|---|
| 886 | n/a | NAME 'def' (4, 8) (4, 11) |
|---|
| 887 | n/a | NAME 'bar' (4, 12) (4, 15) |
|---|
| 888 | n/a | OP '(' (4, 15) (4, 16) |
|---|
| 889 | n/a | OP ')' (4, 16) (4, 17) |
|---|
| 890 | n/a | OP ':' (4, 17) (4, 18) |
|---|
| 891 | n/a | NAME 'pass' (4, 19) (4, 23) |
|---|
| 892 | n/a | NEWLINE '\\n' (4, 23) (4, 24) |
|---|
| 893 | n/a | NL '\\n' (5, 0) (5, 1) |
|---|
| 894 | n/a | AWAIT 'await' (6, 2) (6, 7) |
|---|
| 895 | n/a | OP '=' (6, 8) (6, 9) |
|---|
| 896 | n/a | NUMBER '2' (6, 10) (6, 11) |
|---|
| 897 | n/a | DEDENT '' (7, 0) (7, 0) |
|---|
| 898 | n/a | """) |
|---|
| 899 | n/a | |
|---|
| 900 | n/a | |
|---|
| 901 | n/a | def decistmt(s): |
|---|
| 902 | n/a | result = [] |
|---|
| 903 | n/a | g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string |
|---|
| 904 | n/a | for toknum, tokval, _, _, _ in g: |
|---|
| 905 | n/a | if toknum == NUMBER and '.' in tokval: # replace NUMBER tokens |
|---|
| 906 | n/a | result.extend([ |
|---|
| 907 | n/a | (NAME, 'Decimal'), |
|---|
| 908 | n/a | (OP, '('), |
|---|
| 909 | n/a | (STRING, repr(tokval)), |
|---|
| 910 | n/a | (OP, ')') |
|---|
| 911 | n/a | ]) |
|---|
| 912 | n/a | else: |
|---|
| 913 | n/a | result.append((toknum, tokval)) |
|---|
| 914 | n/a | return untokenize(result).decode('utf-8') |
|---|
| 915 | n/a | |
|---|
| 916 | n/a | class TestMisc(TestCase): |
|---|
| 917 | n/a | |
|---|
| 918 | n/a | def test_decistmt(self): |
|---|
| 919 | n/a | # Substitute Decimals for floats in a string of statements. |
|---|
| 920 | n/a | # This is an example from the docs. |
|---|
| 921 | n/a | |
|---|
| 922 | n/a | from decimal import Decimal |
|---|
| 923 | n/a | s = '+21.3e-5*-.1234/81.7' |
|---|
| 924 | n/a | self.assertEqual(decistmt(s), |
|---|
| 925 | n/a | "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')") |
|---|
| 926 | n/a | |
|---|
| 927 | n/a | # The format of the exponent is inherited from the platform C library. |
|---|
| 928 | n/a | # Known cases are "e-007" (Windows) and "e-07" (not Windows). Since |
|---|
| 929 | n/a | # we're only showing 11 digits, and the 12th isn't close to 5, the |
|---|
| 930 | n/a | # rest of the output should be platform-independent. |
|---|
| 931 | n/a | self.assertRegex(repr(eval(s)), '-3.2171603427[0-9]*e-0+7') |
|---|
| 932 | n/a | |
|---|
| 933 | n/a | # Output from calculations with Decimal should be identical across all |
|---|
| 934 | n/a | # platforms. |
|---|
| 935 | n/a | self.assertEqual(eval(decistmt(s)), |
|---|
| 936 | n/a | Decimal('-3.217160342717258261933904529E-7')) |
|---|
| 937 | n/a | |
|---|
| 938 | n/a | |
|---|
| 939 | n/a | class TestTokenizerAdheresToPep0263(TestCase): |
|---|
| 940 | n/a | """ |
|---|
| 941 | n/a | Test that tokenizer adheres to the coding behaviour stipulated in PEP 0263. |
|---|
| 942 | n/a | """ |
|---|
| 943 | n/a | |
|---|
| 944 | n/a | def _testFile(self, filename): |
|---|
| 945 | n/a | path = os.path.join(os.path.dirname(__file__), filename) |
|---|
| 946 | n/a | TestRoundtrip.check_roundtrip(self, open(path, 'rb')) |
|---|
| 947 | n/a | |
|---|
| 948 | n/a | def test_utf8_coding_cookie_and_no_utf8_bom(self): |
|---|
| 949 | n/a | f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt' |
|---|
| 950 | n/a | self._testFile(f) |
|---|
| 951 | n/a | |
|---|
| 952 | n/a | def test_latin1_coding_cookie_and_utf8_bom(self): |
|---|
| 953 | n/a | """ |
|---|
| 954 | n/a | As per PEP 0263, if a file starts with a utf-8 BOM signature, the only |
|---|
| 955 | n/a | allowed encoding for the comment is 'utf-8'. The text file used in |
|---|
| 956 | n/a | this test starts with a BOM signature, but specifies latin1 as the |
|---|
| 957 | n/a | coding, so verify that a SyntaxError is raised, which matches the |
|---|
| 958 | n/a | behaviour of the interpreter when it encounters a similar condition. |
|---|
| 959 | n/a | """ |
|---|
| 960 | n/a | f = 'tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt' |
|---|
| 961 | n/a | self.assertRaises(SyntaxError, self._testFile, f) |
|---|
| 962 | n/a | |
|---|
| 963 | n/a | def test_no_coding_cookie_and_utf8_bom(self): |
|---|
| 964 | n/a | f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt' |
|---|
| 965 | n/a | self._testFile(f) |
|---|
| 966 | n/a | |
|---|
| 967 | n/a | def test_utf8_coding_cookie_and_utf8_bom(self): |
|---|
| 968 | n/a | f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt' |
|---|
| 969 | n/a | self._testFile(f) |
|---|
| 970 | n/a | |
|---|
| 971 | n/a | def test_bad_coding_cookie(self): |
|---|
| 972 | n/a | self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py') |
|---|
| 973 | n/a | self.assertRaises(SyntaxError, self._testFile, 'bad_coding2.py') |
|---|
| 974 | n/a | |
|---|
| 975 | n/a | |
|---|
| 976 | n/a | class Test_Tokenize(TestCase): |
|---|
| 977 | n/a | |
|---|
| 978 | n/a | def test__tokenize_decodes_with_specified_encoding(self): |
|---|
| 979 | n/a | literal = '"รรรรร"' |
|---|
| 980 | n/a | line = literal.encode('utf-8') |
|---|
| 981 | n/a | first = False |
|---|
| 982 | n/a | def readline(): |
|---|
| 983 | n/a | nonlocal first |
|---|
| 984 | n/a | if not first: |
|---|
| 985 | n/a | first = True |
|---|
| 986 | n/a | return line |
|---|
| 987 | n/a | else: |
|---|
| 988 | n/a | return b'' |
|---|
| 989 | n/a | |
|---|
| 990 | n/a | # skip the initial encoding token and the end token |
|---|
| 991 | n/a | tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1] |
|---|
| 992 | n/a | expected_tokens = [(3, '"รรรรร"', (1, 0), (1, 7), '"รรรรร"')] |
|---|
| 993 | n/a | self.assertEqual(tokens, expected_tokens, |
|---|
| 994 | n/a | "bytes not decoded with encoding") |
|---|
| 995 | n/a | |
|---|
| 996 | n/a | def test__tokenize_does_not_decode_with_encoding_none(self): |
|---|
| 997 | n/a | literal = '"รรรรร"' |
|---|
| 998 | n/a | first = False |
|---|
| 999 | n/a | def readline(): |
|---|
| 1000 | n/a | nonlocal first |
|---|
| 1001 | n/a | if not first: |
|---|
| 1002 | n/a | first = True |
|---|
| 1003 | n/a | return literal |
|---|
| 1004 | n/a | else: |
|---|
| 1005 | n/a | return b'' |
|---|
| 1006 | n/a | |
|---|
| 1007 | n/a | # skip the end token |
|---|
| 1008 | n/a | tokens = list(_tokenize(readline, encoding=None))[:-1] |
|---|
| 1009 | n/a | expected_tokens = [(3, '"รรรรร"', (1, 0), (1, 7), '"รรรรร"')] |
|---|
| 1010 | n/a | self.assertEqual(tokens, expected_tokens, |
|---|
| 1011 | n/a | "string not tokenized when encoding is None") |
|---|
| 1012 | n/a | |
|---|
| 1013 | n/a | |
|---|
| 1014 | n/a | class TestDetectEncoding(TestCase): |
|---|
| 1015 | n/a | |
|---|
| 1016 | n/a | def get_readline(self, lines): |
|---|
| 1017 | n/a | index = 0 |
|---|
| 1018 | n/a | def readline(): |
|---|
| 1019 | n/a | nonlocal index |
|---|
| 1020 | n/a | if index == len(lines): |
|---|
| 1021 | n/a | raise StopIteration |
|---|
| 1022 | n/a | line = lines[index] |
|---|
| 1023 | n/a | index += 1 |
|---|
| 1024 | n/a | return line |
|---|
| 1025 | n/a | return readline |
|---|
| 1026 | n/a | |
|---|
| 1027 | n/a | def test_no_bom_no_encoding_cookie(self): |
|---|
| 1028 | n/a | lines = ( |
|---|
| 1029 | n/a | b'# something\n', |
|---|
| 1030 | n/a | b'print(something)\n', |
|---|
| 1031 | n/a | b'do_something(else)\n' |
|---|
| 1032 | n/a | ) |
|---|
| 1033 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1034 | n/a | self.assertEqual(encoding, 'utf-8') |
|---|
| 1035 | n/a | self.assertEqual(consumed_lines, list(lines[:2])) |
|---|
| 1036 | n/a | |
|---|
| 1037 | n/a | def test_bom_no_cookie(self): |
|---|
| 1038 | n/a | lines = ( |
|---|
| 1039 | n/a | b'\xef\xbb\xbf# something\n', |
|---|
| 1040 | n/a | b'print(something)\n', |
|---|
| 1041 | n/a | b'do_something(else)\n' |
|---|
| 1042 | n/a | ) |
|---|
| 1043 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1044 | n/a | self.assertEqual(encoding, 'utf-8-sig') |
|---|
| 1045 | n/a | self.assertEqual(consumed_lines, |
|---|
| 1046 | n/a | [b'# something\n', b'print(something)\n']) |
|---|
| 1047 | n/a | |
|---|
| 1048 | n/a | def test_cookie_first_line_no_bom(self): |
|---|
| 1049 | n/a | lines = ( |
|---|
| 1050 | n/a | b'# -*- coding: latin-1 -*-\n', |
|---|
| 1051 | n/a | b'print(something)\n', |
|---|
| 1052 | n/a | b'do_something(else)\n' |
|---|
| 1053 | n/a | ) |
|---|
| 1054 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1055 | n/a | self.assertEqual(encoding, 'iso-8859-1') |
|---|
| 1056 | n/a | self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n']) |
|---|
| 1057 | n/a | |
|---|
| 1058 | n/a | def test_matched_bom_and_cookie_first_line(self): |
|---|
| 1059 | n/a | lines = ( |
|---|
| 1060 | n/a | b'\xef\xbb\xbf# coding=utf-8\n', |
|---|
| 1061 | n/a | b'print(something)\n', |
|---|
| 1062 | n/a | b'do_something(else)\n' |
|---|
| 1063 | n/a | ) |
|---|
| 1064 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1065 | n/a | self.assertEqual(encoding, 'utf-8-sig') |
|---|
| 1066 | n/a | self.assertEqual(consumed_lines, [b'# coding=utf-8\n']) |
|---|
| 1067 | n/a | |
|---|
| 1068 | n/a | def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self): |
|---|
| 1069 | n/a | lines = ( |
|---|
| 1070 | n/a | b'\xef\xbb\xbf# vim: set fileencoding=ascii :\n', |
|---|
| 1071 | n/a | b'print(something)\n', |
|---|
| 1072 | n/a | b'do_something(else)\n' |
|---|
| 1073 | n/a | ) |
|---|
| 1074 | n/a | readline = self.get_readline(lines) |
|---|
| 1075 | n/a | self.assertRaises(SyntaxError, detect_encoding, readline) |
|---|
| 1076 | n/a | |
|---|
| 1077 | n/a | def test_cookie_second_line_no_bom(self): |
|---|
| 1078 | n/a | lines = ( |
|---|
| 1079 | n/a | b'#! something\n', |
|---|
| 1080 | n/a | b'# vim: set fileencoding=ascii :\n', |
|---|
| 1081 | n/a | b'print(something)\n', |
|---|
| 1082 | n/a | b'do_something(else)\n' |
|---|
| 1083 | n/a | ) |
|---|
| 1084 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1085 | n/a | self.assertEqual(encoding, 'ascii') |
|---|
| 1086 | n/a | expected = [b'#! something\n', b'# vim: set fileencoding=ascii :\n'] |
|---|
| 1087 | n/a | self.assertEqual(consumed_lines, expected) |
|---|
| 1088 | n/a | |
|---|
| 1089 | n/a | def test_matched_bom_and_cookie_second_line(self): |
|---|
| 1090 | n/a | lines = ( |
|---|
| 1091 | n/a | b'\xef\xbb\xbf#! something\n', |
|---|
| 1092 | n/a | b'f# coding=utf-8\n', |
|---|
| 1093 | n/a | b'print(something)\n', |
|---|
| 1094 | n/a | b'do_something(else)\n' |
|---|
| 1095 | n/a | ) |
|---|
| 1096 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1097 | n/a | self.assertEqual(encoding, 'utf-8-sig') |
|---|
| 1098 | n/a | self.assertEqual(consumed_lines, |
|---|
| 1099 | n/a | [b'#! something\n', b'f# coding=utf-8\n']) |
|---|
| 1100 | n/a | |
|---|
| 1101 | n/a | def test_mismatched_bom_and_cookie_second_line_raises_syntaxerror(self): |
|---|
| 1102 | n/a | lines = ( |
|---|
| 1103 | n/a | b'\xef\xbb\xbf#! something\n', |
|---|
| 1104 | n/a | b'# vim: set fileencoding=ascii :\n', |
|---|
| 1105 | n/a | b'print(something)\n', |
|---|
| 1106 | n/a | b'do_something(else)\n' |
|---|
| 1107 | n/a | ) |
|---|
| 1108 | n/a | readline = self.get_readline(lines) |
|---|
| 1109 | n/a | self.assertRaises(SyntaxError, detect_encoding, readline) |
|---|
| 1110 | n/a | |
|---|
| 1111 | n/a | def test_cookie_second_line_noncommented_first_line(self): |
|---|
| 1112 | n/a | lines = ( |
|---|
| 1113 | n/a | b"print('\xc2\xa3')\n", |
|---|
| 1114 | n/a | b'# vim: set fileencoding=iso8859-15 :\n', |
|---|
| 1115 | n/a | b"print('\xe2\x82\xac')\n" |
|---|
| 1116 | n/a | ) |
|---|
| 1117 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1118 | n/a | self.assertEqual(encoding, 'utf-8') |
|---|
| 1119 | n/a | expected = [b"print('\xc2\xa3')\n"] |
|---|
| 1120 | n/a | self.assertEqual(consumed_lines, expected) |
|---|
| 1121 | n/a | |
|---|
| 1122 | n/a | def test_cookie_second_line_commented_first_line(self): |
|---|
| 1123 | n/a | lines = ( |
|---|
| 1124 | n/a | b"#print('\xc2\xa3')\n", |
|---|
| 1125 | n/a | b'# vim: set fileencoding=iso8859-15 :\n', |
|---|
| 1126 | n/a | b"print('\xe2\x82\xac')\n" |
|---|
| 1127 | n/a | ) |
|---|
| 1128 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1129 | n/a | self.assertEqual(encoding, 'iso8859-15') |
|---|
| 1130 | n/a | expected = [b"#print('\xc2\xa3')\n", b'# vim: set fileencoding=iso8859-15 :\n'] |
|---|
| 1131 | n/a | self.assertEqual(consumed_lines, expected) |
|---|
| 1132 | n/a | |
|---|
| 1133 | n/a | def test_cookie_second_line_empty_first_line(self): |
|---|
| 1134 | n/a | lines = ( |
|---|
| 1135 | n/a | b'\n', |
|---|
| 1136 | n/a | b'# vim: set fileencoding=iso8859-15 :\n', |
|---|
| 1137 | n/a | b"print('\xe2\x82\xac')\n" |
|---|
| 1138 | n/a | ) |
|---|
| 1139 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(lines)) |
|---|
| 1140 | n/a | self.assertEqual(encoding, 'iso8859-15') |
|---|
| 1141 | n/a | expected = [b'\n', b'# vim: set fileencoding=iso8859-15 :\n'] |
|---|
| 1142 | n/a | self.assertEqual(consumed_lines, expected) |
|---|
| 1143 | n/a | |
|---|
| 1144 | n/a | def test_latin1_normalization(self): |
|---|
| 1145 | n/a | # See get_normal_name() in tokenizer.c. |
|---|
| 1146 | n/a | encodings = ("latin-1", "iso-8859-1", "iso-latin-1", "latin-1-unix", |
|---|
| 1147 | n/a | "iso-8859-1-unix", "iso-latin-1-mac") |
|---|
| 1148 | n/a | for encoding in encodings: |
|---|
| 1149 | n/a | for rep in ("-", "_"): |
|---|
| 1150 | n/a | enc = encoding.replace("-", rep) |
|---|
| 1151 | n/a | lines = (b"#!/usr/bin/python\n", |
|---|
| 1152 | n/a | b"# coding: " + enc.encode("ascii") + b"\n", |
|---|
| 1153 | n/a | b"print(things)\n", |
|---|
| 1154 | n/a | b"do_something += 4\n") |
|---|
| 1155 | n/a | rl = self.get_readline(lines) |
|---|
| 1156 | n/a | found, consumed_lines = detect_encoding(rl) |
|---|
| 1157 | n/a | self.assertEqual(found, "iso-8859-1") |
|---|
| 1158 | n/a | |
|---|
| 1159 | n/a | def test_syntaxerror_latin1(self): |
|---|
| 1160 | n/a | # Issue 14629: need to raise SyntaxError if the first |
|---|
| 1161 | n/a | # line(s) have non-UTF-8 characters |
|---|
| 1162 | n/a | lines = ( |
|---|
| 1163 | n/a | b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S |
|---|
| 1164 | n/a | ) |
|---|
| 1165 | n/a | readline = self.get_readline(lines) |
|---|
| 1166 | n/a | self.assertRaises(SyntaxError, detect_encoding, readline) |
|---|
| 1167 | n/a | |
|---|
| 1168 | n/a | |
|---|
| 1169 | n/a | def test_utf8_normalization(self): |
|---|
| 1170 | n/a | # See get_normal_name() in tokenizer.c. |
|---|
| 1171 | n/a | encodings = ("utf-8", "utf-8-mac", "utf-8-unix") |
|---|
| 1172 | n/a | for encoding in encodings: |
|---|
| 1173 | n/a | for rep in ("-", "_"): |
|---|
| 1174 | n/a | enc = encoding.replace("-", rep) |
|---|
| 1175 | n/a | lines = (b"#!/usr/bin/python\n", |
|---|
| 1176 | n/a | b"# coding: " + enc.encode("ascii") + b"\n", |
|---|
| 1177 | n/a | b"1 + 3\n") |
|---|
| 1178 | n/a | rl = self.get_readline(lines) |
|---|
| 1179 | n/a | found, consumed_lines = detect_encoding(rl) |
|---|
| 1180 | n/a | self.assertEqual(found, "utf-8") |
|---|
| 1181 | n/a | |
|---|
| 1182 | n/a | def test_short_files(self): |
|---|
| 1183 | n/a | readline = self.get_readline((b'print(something)\n',)) |
|---|
| 1184 | n/a | encoding, consumed_lines = detect_encoding(readline) |
|---|
| 1185 | n/a | self.assertEqual(encoding, 'utf-8') |
|---|
| 1186 | n/a | self.assertEqual(consumed_lines, [b'print(something)\n']) |
|---|
| 1187 | n/a | |
|---|
| 1188 | n/a | encoding, consumed_lines = detect_encoding(self.get_readline(())) |
|---|
| 1189 | n/a | self.assertEqual(encoding, 'utf-8') |
|---|
| 1190 | n/a | self.assertEqual(consumed_lines, []) |
|---|
| 1191 | n/a | |
|---|
| 1192 | n/a | readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',)) |
|---|
| 1193 | n/a | encoding, consumed_lines = detect_encoding(readline) |
|---|
| 1194 | n/a | self.assertEqual(encoding, 'utf-8-sig') |
|---|
| 1195 | n/a | self.assertEqual(consumed_lines, [b'print(something)\n']) |
|---|
| 1196 | n/a | |
|---|
| 1197 | n/a | readline = self.get_readline((b'\xef\xbb\xbf',)) |
|---|
| 1198 | n/a | encoding, consumed_lines = detect_encoding(readline) |
|---|
| 1199 | n/a | self.assertEqual(encoding, 'utf-8-sig') |
|---|
| 1200 | n/a | self.assertEqual(consumed_lines, []) |
|---|
| 1201 | n/a | |
|---|
| 1202 | n/a | readline = self.get_readline((b'# coding: bad\n',)) |
|---|
| 1203 | n/a | self.assertRaises(SyntaxError, detect_encoding, readline) |
|---|
| 1204 | n/a | |
|---|
| 1205 | n/a | def test_false_encoding(self): |
|---|
| 1206 | n/a | # Issue 18873: "Encoding" detected in non-comment lines |
|---|
| 1207 | n/a | readline = self.get_readline((b'print("#coding=fake")',)) |
|---|
| 1208 | n/a | encoding, consumed_lines = detect_encoding(readline) |
|---|
| 1209 | n/a | self.assertEqual(encoding, 'utf-8') |
|---|
| 1210 | n/a | self.assertEqual(consumed_lines, [b'print("#coding=fake")']) |
|---|
| 1211 | n/a | |
|---|
| 1212 | n/a | def test_open(self): |
|---|
| 1213 | n/a | filename = support.TESTFN + '.py' |
|---|
| 1214 | n/a | self.addCleanup(support.unlink, filename) |
|---|
| 1215 | n/a | |
|---|
| 1216 | n/a | # test coding cookie |
|---|
| 1217 | n/a | for encoding in ('iso-8859-15', 'utf-8'): |
|---|
| 1218 | n/a | with open(filename, 'w', encoding=encoding) as fp: |
|---|
| 1219 | n/a | print("# coding: %s" % encoding, file=fp) |
|---|
| 1220 | n/a | print("print('euro:\u20ac')", file=fp) |
|---|
| 1221 | n/a | with tokenize_open(filename) as fp: |
|---|
| 1222 | n/a | self.assertEqual(fp.encoding, encoding) |
|---|
| 1223 | n/a | self.assertEqual(fp.mode, 'r') |
|---|
| 1224 | n/a | |
|---|
| 1225 | n/a | # test BOM (no coding cookie) |
|---|
| 1226 | n/a | with open(filename, 'w', encoding='utf-8-sig') as fp: |
|---|
| 1227 | n/a | print("print('euro:\u20ac')", file=fp) |
|---|
| 1228 | n/a | with tokenize_open(filename) as fp: |
|---|
| 1229 | n/a | self.assertEqual(fp.encoding, 'utf-8-sig') |
|---|
| 1230 | n/a | self.assertEqual(fp.mode, 'r') |
|---|
| 1231 | n/a | |
|---|
| 1232 | n/a | def test_filename_in_exception(self): |
|---|
| 1233 | n/a | # When possible, include the file name in the exception. |
|---|
| 1234 | n/a | path = 'some_file_path' |
|---|
| 1235 | n/a | lines = ( |
|---|
| 1236 | n/a | b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S |
|---|
| 1237 | n/a | ) |
|---|
| 1238 | n/a | class Bunk: |
|---|
| 1239 | n/a | def __init__(self, lines, path): |
|---|
| 1240 | n/a | self.name = path |
|---|
| 1241 | n/a | self._lines = lines |
|---|
| 1242 | n/a | self._index = 0 |
|---|
| 1243 | n/a | |
|---|
| 1244 | n/a | def readline(self): |
|---|
| 1245 | n/a | if self._index == len(lines): |
|---|
| 1246 | n/a | raise StopIteration |
|---|
| 1247 | n/a | line = lines[self._index] |
|---|
| 1248 | n/a | self._index += 1 |
|---|
| 1249 | n/a | return line |
|---|
| 1250 | n/a | |
|---|
| 1251 | n/a | with self.assertRaises(SyntaxError): |
|---|
| 1252 | n/a | ins = Bunk(lines, path) |
|---|
| 1253 | n/a | # Make sure lacking a name isn't an issue. |
|---|
| 1254 | n/a | del ins.name |
|---|
| 1255 | n/a | detect_encoding(ins.readline) |
|---|
| 1256 | n/a | with self.assertRaisesRegex(SyntaxError, '.*{}'.format(path)): |
|---|
| 1257 | n/a | ins = Bunk(lines, path) |
|---|
| 1258 | n/a | detect_encoding(ins.readline) |
|---|
| 1259 | n/a | |
|---|
| 1260 | n/a | def test_open_error(self): |
|---|
| 1261 | n/a | # Issue #23840: open() must close the binary file on error |
|---|
| 1262 | n/a | m = BytesIO(b'#coding:xxx') |
|---|
| 1263 | n/a | with mock.patch('tokenize._builtin_open', return_value=m): |
|---|
| 1264 | n/a | self.assertRaises(SyntaxError, tokenize_open, 'foobar') |
|---|
| 1265 | n/a | self.assertTrue(m.closed) |
|---|
| 1266 | n/a | |
|---|
| 1267 | n/a | |
|---|
| 1268 | n/a | class TestTokenize(TestCase): |
|---|
| 1269 | n/a | |
|---|
| 1270 | n/a | def test_tokenize(self): |
|---|
| 1271 | n/a | import tokenize as tokenize_module |
|---|
| 1272 | n/a | encoding = object() |
|---|
| 1273 | n/a | encoding_used = None |
|---|
| 1274 | n/a | def mock_detect_encoding(readline): |
|---|
| 1275 | n/a | return encoding, [b'first', b'second'] |
|---|
| 1276 | n/a | |
|---|
| 1277 | n/a | def mock__tokenize(readline, encoding): |
|---|
| 1278 | n/a | nonlocal encoding_used |
|---|
| 1279 | n/a | encoding_used = encoding |
|---|
| 1280 | n/a | out = [] |
|---|
| 1281 | n/a | while True: |
|---|
| 1282 | n/a | next_line = readline() |
|---|
| 1283 | n/a | if next_line: |
|---|
| 1284 | n/a | out.append(next_line) |
|---|
| 1285 | n/a | continue |
|---|
| 1286 | n/a | return out |
|---|
| 1287 | n/a | |
|---|
| 1288 | n/a | counter = 0 |
|---|
| 1289 | n/a | def mock_readline(): |
|---|
| 1290 | n/a | nonlocal counter |
|---|
| 1291 | n/a | counter += 1 |
|---|
| 1292 | n/a | if counter == 5: |
|---|
| 1293 | n/a | return b'' |
|---|
| 1294 | n/a | return str(counter).encode() |
|---|
| 1295 | n/a | |
|---|
| 1296 | n/a | orig_detect_encoding = tokenize_module.detect_encoding |
|---|
| 1297 | n/a | orig__tokenize = tokenize_module._tokenize |
|---|
| 1298 | n/a | tokenize_module.detect_encoding = mock_detect_encoding |
|---|
| 1299 | n/a | tokenize_module._tokenize = mock__tokenize |
|---|
| 1300 | n/a | try: |
|---|
| 1301 | n/a | results = tokenize(mock_readline) |
|---|
| 1302 | n/a | self.assertEqual(list(results), |
|---|
| 1303 | n/a | [b'first', b'second', b'1', b'2', b'3', b'4']) |
|---|
| 1304 | n/a | finally: |
|---|
| 1305 | n/a | tokenize_module.detect_encoding = orig_detect_encoding |
|---|
| 1306 | n/a | tokenize_module._tokenize = orig__tokenize |
|---|
| 1307 | n/a | |
|---|
| 1308 | n/a | self.assertTrue(encoding_used, encoding) |
|---|
| 1309 | n/a | |
|---|
| 1310 | n/a | def test_oneline_defs(self): |
|---|
| 1311 | n/a | buf = [] |
|---|
| 1312 | n/a | for i in range(500): |
|---|
| 1313 | n/a | buf.append('def i{i}(): return {i}'.format(i=i)) |
|---|
| 1314 | n/a | buf.append('OK') |
|---|
| 1315 | n/a | buf = '\n'.join(buf) |
|---|
| 1316 | n/a | |
|---|
| 1317 | n/a | # Test that 500 consequent, one-line defs is OK |
|---|
| 1318 | n/a | toks = list(tokenize(BytesIO(buf.encode('utf-8')).readline)) |
|---|
| 1319 | n/a | self.assertEqual(toks[-2].string, 'OK') # [-1] is always ENDMARKER |
|---|
| 1320 | n/a | |
|---|
| 1321 | n/a | def assertExactTypeEqual(self, opstr, *optypes): |
|---|
| 1322 | n/a | tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline)) |
|---|
| 1323 | n/a | num_optypes = len(optypes) |
|---|
| 1324 | n/a | self.assertEqual(len(tokens), 2 + num_optypes) |
|---|
| 1325 | n/a | self.assertEqual(token.tok_name[tokens[0].exact_type], |
|---|
| 1326 | n/a | token.tok_name[ENCODING]) |
|---|
| 1327 | n/a | for i in range(num_optypes): |
|---|
| 1328 | n/a | self.assertEqual(token.tok_name[tokens[i + 1].exact_type], |
|---|
| 1329 | n/a | token.tok_name[optypes[i]]) |
|---|
| 1330 | n/a | self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type], |
|---|
| 1331 | n/a | token.tok_name[token.ENDMARKER]) |
|---|
| 1332 | n/a | |
|---|
| 1333 | n/a | def test_exact_type(self): |
|---|
| 1334 | n/a | self.assertExactTypeEqual('()', token.LPAR, token.RPAR) |
|---|
| 1335 | n/a | self.assertExactTypeEqual('[]', token.LSQB, token.RSQB) |
|---|
| 1336 | n/a | self.assertExactTypeEqual(':', token.COLON) |
|---|
| 1337 | n/a | self.assertExactTypeEqual(',', token.COMMA) |
|---|
| 1338 | n/a | self.assertExactTypeEqual(';', token.SEMI) |
|---|
| 1339 | n/a | self.assertExactTypeEqual('+', token.PLUS) |
|---|
| 1340 | n/a | self.assertExactTypeEqual('-', token.MINUS) |
|---|
| 1341 | n/a | self.assertExactTypeEqual('*', token.STAR) |
|---|
| 1342 | n/a | self.assertExactTypeEqual('/', token.SLASH) |
|---|
| 1343 | n/a | self.assertExactTypeEqual('|', token.VBAR) |
|---|
| 1344 | n/a | self.assertExactTypeEqual('&', token.AMPER) |
|---|
| 1345 | n/a | self.assertExactTypeEqual('<', token.LESS) |
|---|
| 1346 | n/a | self.assertExactTypeEqual('>', token.GREATER) |
|---|
| 1347 | n/a | self.assertExactTypeEqual('=', token.EQUAL) |
|---|
| 1348 | n/a | self.assertExactTypeEqual('.', token.DOT) |
|---|
| 1349 | n/a | self.assertExactTypeEqual('%', token.PERCENT) |
|---|
| 1350 | n/a | self.assertExactTypeEqual('{}', token.LBRACE, token.RBRACE) |
|---|
| 1351 | n/a | self.assertExactTypeEqual('==', token.EQEQUAL) |
|---|
| 1352 | n/a | self.assertExactTypeEqual('!=', token.NOTEQUAL) |
|---|
| 1353 | n/a | self.assertExactTypeEqual('<=', token.LESSEQUAL) |
|---|
| 1354 | n/a | self.assertExactTypeEqual('>=', token.GREATEREQUAL) |
|---|
| 1355 | n/a | self.assertExactTypeEqual('~', token.TILDE) |
|---|
| 1356 | n/a | self.assertExactTypeEqual('^', token.CIRCUMFLEX) |
|---|
| 1357 | n/a | self.assertExactTypeEqual('<<', token.LEFTSHIFT) |
|---|
| 1358 | n/a | self.assertExactTypeEqual('>>', token.RIGHTSHIFT) |
|---|
| 1359 | n/a | self.assertExactTypeEqual('**', token.DOUBLESTAR) |
|---|
| 1360 | n/a | self.assertExactTypeEqual('+=', token.PLUSEQUAL) |
|---|
| 1361 | n/a | self.assertExactTypeEqual('-=', token.MINEQUAL) |
|---|
| 1362 | n/a | self.assertExactTypeEqual('*=', token.STAREQUAL) |
|---|
| 1363 | n/a | self.assertExactTypeEqual('/=', token.SLASHEQUAL) |
|---|
| 1364 | n/a | self.assertExactTypeEqual('%=', token.PERCENTEQUAL) |
|---|
| 1365 | n/a | self.assertExactTypeEqual('&=', token.AMPEREQUAL) |
|---|
| 1366 | n/a | self.assertExactTypeEqual('|=', token.VBAREQUAL) |
|---|
| 1367 | n/a | self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL) |
|---|
| 1368 | n/a | self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL) |
|---|
| 1369 | n/a | self.assertExactTypeEqual('<<=', token.LEFTSHIFTEQUAL) |
|---|
| 1370 | n/a | self.assertExactTypeEqual('>>=', token.RIGHTSHIFTEQUAL) |
|---|
| 1371 | n/a | self.assertExactTypeEqual('**=', token.DOUBLESTAREQUAL) |
|---|
| 1372 | n/a | self.assertExactTypeEqual('//', token.DOUBLESLASH) |
|---|
| 1373 | n/a | self.assertExactTypeEqual('//=', token.DOUBLESLASHEQUAL) |
|---|
| 1374 | n/a | self.assertExactTypeEqual('@', token.AT) |
|---|
| 1375 | n/a | self.assertExactTypeEqual('@=', token.ATEQUAL) |
|---|
| 1376 | n/a | |
|---|
| 1377 | n/a | self.assertExactTypeEqual('a**2+b**2==c**2', |
|---|
| 1378 | n/a | NAME, token.DOUBLESTAR, NUMBER, |
|---|
| 1379 | n/a | token.PLUS, |
|---|
| 1380 | n/a | NAME, token.DOUBLESTAR, NUMBER, |
|---|
| 1381 | n/a | token.EQEQUAL, |
|---|
| 1382 | n/a | NAME, token.DOUBLESTAR, NUMBER) |
|---|
| 1383 | n/a | self.assertExactTypeEqual('{1, 2, 3}', |
|---|
| 1384 | n/a | token.LBRACE, |
|---|
| 1385 | n/a | token.NUMBER, token.COMMA, |
|---|
| 1386 | n/a | token.NUMBER, token.COMMA, |
|---|
| 1387 | n/a | token.NUMBER, |
|---|
| 1388 | n/a | token.RBRACE) |
|---|
| 1389 | n/a | self.assertExactTypeEqual('^(x & 0x1)', |
|---|
| 1390 | n/a | token.CIRCUMFLEX, |
|---|
| 1391 | n/a | token.LPAR, |
|---|
| 1392 | n/a | token.NAME, token.AMPER, token.NUMBER, |
|---|
| 1393 | n/a | token.RPAR) |
|---|
| 1394 | n/a | |
|---|
| 1395 | n/a | def test_pathological_trailing_whitespace(self): |
|---|
| 1396 | n/a | # See http://bugs.python.org/issue16152 |
|---|
| 1397 | n/a | self.assertExactTypeEqual('@ ', token.AT) |
|---|
| 1398 | n/a | |
|---|
| 1399 | n/a | |
|---|
| 1400 | n/a | class UntokenizeTest(TestCase): |
|---|
| 1401 | n/a | |
|---|
| 1402 | n/a | def test_bad_input_order(self): |
|---|
| 1403 | n/a | # raise if previous row |
|---|
| 1404 | n/a | u = Untokenizer() |
|---|
| 1405 | n/a | u.prev_row = 2 |
|---|
| 1406 | n/a | u.prev_col = 2 |
|---|
| 1407 | n/a | with self.assertRaises(ValueError) as cm: |
|---|
| 1408 | n/a | u.add_whitespace((1,3)) |
|---|
| 1409 | n/a | self.assertEqual(cm.exception.args[0], |
|---|
| 1410 | n/a | 'start (1,3) precedes previous end (2,2)') |
|---|
| 1411 | n/a | # raise if previous column in row |
|---|
| 1412 | n/a | self.assertRaises(ValueError, u.add_whitespace, (2,1)) |
|---|
| 1413 | n/a | |
|---|
| 1414 | n/a | def test_backslash_continuation(self): |
|---|
| 1415 | n/a | # The problem is that <whitespace>\<newline> leaves no token |
|---|
| 1416 | n/a | u = Untokenizer() |
|---|
| 1417 | n/a | u.prev_row = 1 |
|---|
| 1418 | n/a | u.prev_col = 1 |
|---|
| 1419 | n/a | u.tokens = [] |
|---|
| 1420 | n/a | u.add_whitespace((2, 0)) |
|---|
| 1421 | n/a | self.assertEqual(u.tokens, ['\\\n']) |
|---|
| 1422 | n/a | u.prev_row = 2 |
|---|
| 1423 | n/a | u.add_whitespace((4, 4)) |
|---|
| 1424 | n/a | self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' ']) |
|---|
| 1425 | n/a | TestRoundtrip.check_roundtrip(self, 'a\n b\n c\n \\\n c\n') |
|---|
| 1426 | n/a | |
|---|
| 1427 | n/a | def test_iter_compat(self): |
|---|
| 1428 | n/a | u = Untokenizer() |
|---|
| 1429 | n/a | token = (NAME, 'Hello') |
|---|
| 1430 | n/a | tokens = [(ENCODING, 'utf-8'), token] |
|---|
| 1431 | n/a | u.compat(token, iter([])) |
|---|
| 1432 | n/a | self.assertEqual(u.tokens, ["Hello "]) |
|---|
| 1433 | n/a | u = Untokenizer() |
|---|
| 1434 | n/a | self.assertEqual(u.untokenize(iter([token])), 'Hello ') |
|---|
| 1435 | n/a | u = Untokenizer() |
|---|
| 1436 | n/a | self.assertEqual(u.untokenize(iter(tokens)), 'Hello ') |
|---|
| 1437 | n/a | self.assertEqual(u.encoding, 'utf-8') |
|---|
| 1438 | n/a | self.assertEqual(untokenize(iter(tokens)), b'Hello ') |
|---|
| 1439 | n/a | |
|---|
| 1440 | n/a | |
|---|
| 1441 | n/a | class TestRoundtrip(TestCase): |
|---|
| 1442 | n/a | |
|---|
| 1443 | n/a | def check_roundtrip(self, f): |
|---|
| 1444 | n/a | """ |
|---|
| 1445 | n/a | Test roundtrip for `untokenize`. `f` is an open file or a string. |
|---|
| 1446 | n/a | The source code in f is tokenized to both 5- and 2-tuples. |
|---|
| 1447 | n/a | Both sequences are converted back to source code via |
|---|
| 1448 | n/a | tokenize.untokenize(), and the latter tokenized again to 2-tuples. |
|---|
| 1449 | n/a | The test fails if the 3 pair tokenizations do not match. |
|---|
| 1450 | n/a | |
|---|
| 1451 | n/a | When untokenize bugs are fixed, untokenize with 5-tuples should |
|---|
| 1452 | n/a | reproduce code that does not contain a backslash continuation |
|---|
| 1453 | n/a | following spaces. A proper test should test this. |
|---|
| 1454 | n/a | """ |
|---|
| 1455 | n/a | # Get source code and original tokenizations |
|---|
| 1456 | n/a | if isinstance(f, str): |
|---|
| 1457 | n/a | code = f.encode('utf-8') |
|---|
| 1458 | n/a | else: |
|---|
| 1459 | n/a | code = f.read() |
|---|
| 1460 | n/a | f.close() |
|---|
| 1461 | n/a | readline = iter(code.splitlines(keepends=True)).__next__ |
|---|
| 1462 | n/a | tokens5 = list(tokenize(readline)) |
|---|
| 1463 | n/a | tokens2 = [tok[:2] for tok in tokens5] |
|---|
| 1464 | n/a | # Reproduce tokens2 from pairs |
|---|
| 1465 | n/a | bytes_from2 = untokenize(tokens2) |
|---|
| 1466 | n/a | readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ |
|---|
| 1467 | n/a | tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] |
|---|
| 1468 | n/a | self.assertEqual(tokens2_from2, tokens2) |
|---|
| 1469 | n/a | # Reproduce tokens2 from 5-tuples |
|---|
| 1470 | n/a | bytes_from5 = untokenize(tokens5) |
|---|
| 1471 | n/a | readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ |
|---|
| 1472 | n/a | tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] |
|---|
| 1473 | n/a | self.assertEqual(tokens2_from5, tokens2) |
|---|
| 1474 | n/a | |
|---|
| 1475 | n/a | def test_roundtrip(self): |
|---|
| 1476 | n/a | # There are some standard formatting practices that are easy to get right. |
|---|
| 1477 | n/a | |
|---|
| 1478 | n/a | self.check_roundtrip("if x == 1:\n" |
|---|
| 1479 | n/a | " print(x)\n") |
|---|
| 1480 | n/a | self.check_roundtrip("# This is a comment\n" |
|---|
| 1481 | n/a | "# This also") |
|---|
| 1482 | n/a | |
|---|
| 1483 | n/a | # Some people use different formatting conventions, which makes |
|---|
| 1484 | n/a | # untokenize a little trickier. Note that this test involves trailing |
|---|
| 1485 | n/a | # whitespace after the colon. Note that we use hex escapes to make the |
|---|
| 1486 | n/a | # two trailing blanks apparent in the expected output. |
|---|
| 1487 | n/a | |
|---|
| 1488 | n/a | self.check_roundtrip("if x == 1 : \n" |
|---|
| 1489 | n/a | " print(x)\n") |
|---|
| 1490 | n/a | fn = support.findfile("tokenize_tests.txt") |
|---|
| 1491 | n/a | with open(fn, 'rb') as f: |
|---|
| 1492 | n/a | self.check_roundtrip(f) |
|---|
| 1493 | n/a | self.check_roundtrip("if x == 1:\n" |
|---|
| 1494 | n/a | " # A comment by itself.\n" |
|---|
| 1495 | n/a | " print(x) # Comment here, too.\n" |
|---|
| 1496 | n/a | " # Another comment.\n" |
|---|
| 1497 | n/a | "after_if = True\n") |
|---|
| 1498 | n/a | self.check_roundtrip("if (x # The comments need to go in the right place\n" |
|---|
| 1499 | n/a | " == 1):\n" |
|---|
| 1500 | n/a | " print('x==1')\n") |
|---|
| 1501 | n/a | self.check_roundtrip("class Test: # A comment here\n" |
|---|
| 1502 | n/a | " # A comment with weird indent\n" |
|---|
| 1503 | n/a | " after_com = 5\n" |
|---|
| 1504 | n/a | " def x(m): return m*5 # a one liner\n" |
|---|
| 1505 | n/a | " def y(m): # A whitespace after the colon\n" |
|---|
| 1506 | n/a | " return y*4 # 3-space indent\n") |
|---|
| 1507 | n/a | |
|---|
| 1508 | n/a | # Some error-handling code |
|---|
| 1509 | n/a | self.check_roundtrip("try: import somemodule\n" |
|---|
| 1510 | n/a | "except ImportError: # comment\n" |
|---|
| 1511 | n/a | " print('Can not import' # comment2\n)" |
|---|
| 1512 | n/a | "else: print('Loaded')\n") |
|---|
| 1513 | n/a | |
|---|
| 1514 | n/a | def test_continuation(self): |
|---|
| 1515 | n/a | # Balancing continuation |
|---|
| 1516 | n/a | self.check_roundtrip("a = (3,4, \n" |
|---|
| 1517 | n/a | "5,6)\n" |
|---|
| 1518 | n/a | "y = [3, 4,\n" |
|---|
| 1519 | n/a | "5]\n" |
|---|
| 1520 | n/a | "z = {'a': 5,\n" |
|---|
| 1521 | n/a | "'b':15, 'c':True}\n" |
|---|
| 1522 | n/a | "x = len(y) + 5 - a[\n" |
|---|
| 1523 | n/a | "3] - a[2]\n" |
|---|
| 1524 | n/a | "+ len(z) - z[\n" |
|---|
| 1525 | n/a | "'b']\n") |
|---|
| 1526 | n/a | |
|---|
| 1527 | n/a | def test_backslash_continuation(self): |
|---|
| 1528 | n/a | # Backslash means line continuation, except for comments |
|---|
| 1529 | n/a | self.check_roundtrip("x=1+\\\n" |
|---|
| 1530 | n/a | "1\n" |
|---|
| 1531 | n/a | "# This is a comment\\\n" |
|---|
| 1532 | n/a | "# This also\n") |
|---|
| 1533 | n/a | self.check_roundtrip("# Comment \\\n" |
|---|
| 1534 | n/a | "x = 0") |
|---|
| 1535 | n/a | |
|---|
| 1536 | n/a | def test_string_concatenation(self): |
|---|
| 1537 | n/a | # Two string literals on the same line |
|---|
| 1538 | n/a | self.check_roundtrip("'' ''") |
|---|
| 1539 | n/a | |
|---|
| 1540 | n/a | def test_random_files(self): |
|---|
| 1541 | n/a | # Test roundtrip on random python modules. |
|---|
| 1542 | n/a | # pass the '-ucpu' option to process the full directory. |
|---|
| 1543 | n/a | |
|---|
| 1544 | n/a | import glob, random |
|---|
| 1545 | n/a | fn = support.findfile("tokenize_tests.txt") |
|---|
| 1546 | n/a | tempdir = os.path.dirname(fn) or os.curdir |
|---|
| 1547 | n/a | testfiles = glob.glob(os.path.join(tempdir, "test*.py")) |
|---|
| 1548 | n/a | |
|---|
| 1549 | n/a | # Tokenize is broken on test_pep3131.py because regular expressions are |
|---|
| 1550 | n/a | # broken on the obscure unicode identifiers in it. *sigh* |
|---|
| 1551 | n/a | # With roundtrip extended to test the 5-tuple mode of untokenize, |
|---|
| 1552 | n/a | # 7 more testfiles fail. Remove them also until the failure is diagnosed. |
|---|
| 1553 | n/a | |
|---|
| 1554 | n/a | testfiles.remove(os.path.join(tempdir, "test_unicode_identifiers.py")) |
|---|
| 1555 | n/a | for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): |
|---|
| 1556 | n/a | testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) |
|---|
| 1557 | n/a | |
|---|
| 1558 | n/a | if not support.is_resource_enabled("cpu"): |
|---|
| 1559 | n/a | testfiles = random.sample(testfiles, 10) |
|---|
| 1560 | n/a | |
|---|
| 1561 | n/a | for testfile in testfiles: |
|---|
| 1562 | n/a | with open(testfile, 'rb') as f: |
|---|
| 1563 | n/a | with self.subTest(file=testfile): |
|---|
| 1564 | n/a | self.check_roundtrip(f) |
|---|
| 1565 | n/a | |
|---|
| 1566 | n/a | |
|---|
| 1567 | n/a | def roundtrip(self, code): |
|---|
| 1568 | n/a | if isinstance(code, str): |
|---|
| 1569 | n/a | code = code.encode('utf-8') |
|---|
| 1570 | n/a | return untokenize(tokenize(BytesIO(code).readline)).decode('utf-8') |
|---|
| 1571 | n/a | |
|---|
| 1572 | n/a | def test_indentation_semantics_retained(self): |
|---|
| 1573 | n/a | """ |
|---|
| 1574 | n/a | Ensure that although whitespace might be mutated in a roundtrip, |
|---|
| 1575 | n/a | the semantic meaning of the indentation remains consistent. |
|---|
| 1576 | n/a | """ |
|---|
| 1577 | n/a | code = "if False:\n\tx=3\n\tx=3\n" |
|---|
| 1578 | n/a | codelines = self.roundtrip(code).split('\n') |
|---|
| 1579 | n/a | self.assertEqual(codelines[1], codelines[2]) |
|---|
| 1580 | n/a | self.check_roundtrip(code) |
|---|
| 1581 | n/a | |
|---|
| 1582 | n/a | |
|---|
| 1583 | n/a | if __name__ == "__main__": |
|---|
| 1584 | n/a | unittest.main() |
|---|