| 1 | n/a | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. |
|---|
| 2 | n/a | # Licensed to PSF under a Contributor Agreement. |
|---|
| 3 | n/a | |
|---|
| 4 | n/a | # Pgen imports |
|---|
| 5 | n/a | from . import grammar, token, tokenize |
|---|
| 6 | n/a | |
|---|
| 7 | n/a | class PgenGrammar(grammar.Grammar): |
|---|
| 8 | n/a | pass |
|---|
| 9 | n/a | |
|---|
| 10 | n/a | class ParserGenerator(object): |
|---|
| 11 | n/a | |
|---|
| 12 | n/a | def __init__(self, filename, stream=None): |
|---|
| 13 | n/a | close_stream = None |
|---|
| 14 | n/a | if stream is None: |
|---|
| 15 | n/a | stream = open(filename) |
|---|
| 16 | n/a | close_stream = stream.close |
|---|
| 17 | n/a | self.filename = filename |
|---|
| 18 | n/a | self.stream = stream |
|---|
| 19 | n/a | self.generator = tokenize.generate_tokens(stream.readline) |
|---|
| 20 | n/a | self.gettoken() # Initialize lookahead |
|---|
| 21 | n/a | self.dfas, self.startsymbol = self.parse() |
|---|
| 22 | n/a | if close_stream is not None: |
|---|
| 23 | n/a | close_stream() |
|---|
| 24 | n/a | self.first = {} # map from symbol name to set of tokens |
|---|
| 25 | n/a | self.addfirstsets() |
|---|
| 26 | n/a | |
|---|
| 27 | n/a | def make_grammar(self): |
|---|
| 28 | n/a | c = PgenGrammar() |
|---|
| 29 | n/a | names = list(self.dfas.keys()) |
|---|
| 30 | n/a | names.sort() |
|---|
| 31 | n/a | names.remove(self.startsymbol) |
|---|
| 32 | n/a | names.insert(0, self.startsymbol) |
|---|
| 33 | n/a | for name in names: |
|---|
| 34 | n/a | i = 256 + len(c.symbol2number) |
|---|
| 35 | n/a | c.symbol2number[name] = i |
|---|
| 36 | n/a | c.number2symbol[i] = name |
|---|
| 37 | n/a | for name in names: |
|---|
| 38 | n/a | dfa = self.dfas[name] |
|---|
| 39 | n/a | states = [] |
|---|
| 40 | n/a | for state in dfa: |
|---|
| 41 | n/a | arcs = [] |
|---|
| 42 | n/a | for label, next in sorted(state.arcs.items()): |
|---|
| 43 | n/a | arcs.append((self.make_label(c, label), dfa.index(next))) |
|---|
| 44 | n/a | if state.isfinal: |
|---|
| 45 | n/a | arcs.append((0, dfa.index(state))) |
|---|
| 46 | n/a | states.append(arcs) |
|---|
| 47 | n/a | c.states.append(states) |
|---|
| 48 | n/a | c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) |
|---|
| 49 | n/a | c.start = c.symbol2number[self.startsymbol] |
|---|
| 50 | n/a | return c |
|---|
| 51 | n/a | |
|---|
| 52 | n/a | def make_first(self, c, name): |
|---|
| 53 | n/a | rawfirst = self.first[name] |
|---|
| 54 | n/a | first = {} |
|---|
| 55 | n/a | for label in sorted(rawfirst): |
|---|
| 56 | n/a | ilabel = self.make_label(c, label) |
|---|
| 57 | n/a | ##assert ilabel not in first # XXX failed on <> ... != |
|---|
| 58 | n/a | first[ilabel] = 1 |
|---|
| 59 | n/a | return first |
|---|
| 60 | n/a | |
|---|
| 61 | n/a | def make_label(self, c, label): |
|---|
| 62 | n/a | # XXX Maybe this should be a method on a subclass of converter? |
|---|
| 63 | n/a | ilabel = len(c.labels) |
|---|
| 64 | n/a | if label[0].isalpha(): |
|---|
| 65 | n/a | # Either a symbol name or a named token |
|---|
| 66 | n/a | if label in c.symbol2number: |
|---|
| 67 | n/a | # A symbol name (a non-terminal) |
|---|
| 68 | n/a | if label in c.symbol2label: |
|---|
| 69 | n/a | return c.symbol2label[label] |
|---|
| 70 | n/a | else: |
|---|
| 71 | n/a | c.labels.append((c.symbol2number[label], None)) |
|---|
| 72 | n/a | c.symbol2label[label] = ilabel |
|---|
| 73 | n/a | return ilabel |
|---|
| 74 | n/a | else: |
|---|
| 75 | n/a | # A named token (NAME, NUMBER, STRING) |
|---|
| 76 | n/a | itoken = getattr(token, label, None) |
|---|
| 77 | n/a | assert isinstance(itoken, int), label |
|---|
| 78 | n/a | assert itoken in token.tok_name, label |
|---|
| 79 | n/a | if itoken in c.tokens: |
|---|
| 80 | n/a | return c.tokens[itoken] |
|---|
| 81 | n/a | else: |
|---|
| 82 | n/a | c.labels.append((itoken, None)) |
|---|
| 83 | n/a | c.tokens[itoken] = ilabel |
|---|
| 84 | n/a | return ilabel |
|---|
| 85 | n/a | else: |
|---|
| 86 | n/a | # Either a keyword or an operator |
|---|
| 87 | n/a | assert label[0] in ('"', "'"), label |
|---|
| 88 | n/a | value = eval(label) |
|---|
| 89 | n/a | if value[0].isalpha(): |
|---|
| 90 | n/a | # A keyword |
|---|
| 91 | n/a | if value in c.keywords: |
|---|
| 92 | n/a | return c.keywords[value] |
|---|
| 93 | n/a | else: |
|---|
| 94 | n/a | c.labels.append((token.NAME, value)) |
|---|
| 95 | n/a | c.keywords[value] = ilabel |
|---|
| 96 | n/a | return ilabel |
|---|
| 97 | n/a | else: |
|---|
| 98 | n/a | # An operator (any non-numeric token) |
|---|
| 99 | n/a | itoken = grammar.opmap[value] # Fails if unknown token |
|---|
| 100 | n/a | if itoken in c.tokens: |
|---|
| 101 | n/a | return c.tokens[itoken] |
|---|
| 102 | n/a | else: |
|---|
| 103 | n/a | c.labels.append((itoken, None)) |
|---|
| 104 | n/a | c.tokens[itoken] = ilabel |
|---|
| 105 | n/a | return ilabel |
|---|
| 106 | n/a | |
|---|
| 107 | n/a | def addfirstsets(self): |
|---|
| 108 | n/a | names = list(self.dfas.keys()) |
|---|
| 109 | n/a | names.sort() |
|---|
| 110 | n/a | for name in names: |
|---|
| 111 | n/a | if name not in self.first: |
|---|
| 112 | n/a | self.calcfirst(name) |
|---|
| 113 | n/a | #print name, self.first[name].keys() |
|---|
| 114 | n/a | |
|---|
| 115 | n/a | def calcfirst(self, name): |
|---|
| 116 | n/a | dfa = self.dfas[name] |
|---|
| 117 | n/a | self.first[name] = None # dummy to detect left recursion |
|---|
| 118 | n/a | state = dfa[0] |
|---|
| 119 | n/a | totalset = {} |
|---|
| 120 | n/a | overlapcheck = {} |
|---|
| 121 | n/a | for label, next in state.arcs.items(): |
|---|
| 122 | n/a | if label in self.dfas: |
|---|
| 123 | n/a | if label in self.first: |
|---|
| 124 | n/a | fset = self.first[label] |
|---|
| 125 | n/a | if fset is None: |
|---|
| 126 | n/a | raise ValueError("recursion for rule %r" % name) |
|---|
| 127 | n/a | else: |
|---|
| 128 | n/a | self.calcfirst(label) |
|---|
| 129 | n/a | fset = self.first[label] |
|---|
| 130 | n/a | totalset.update(fset) |
|---|
| 131 | n/a | overlapcheck[label] = fset |
|---|
| 132 | n/a | else: |
|---|
| 133 | n/a | totalset[label] = 1 |
|---|
| 134 | n/a | overlapcheck[label] = {label: 1} |
|---|
| 135 | n/a | inverse = {} |
|---|
| 136 | n/a | for label, itsfirst in overlapcheck.items(): |
|---|
| 137 | n/a | for symbol in itsfirst: |
|---|
| 138 | n/a | if symbol in inverse: |
|---|
| 139 | n/a | raise ValueError("rule %s is ambiguous; %s is in the" |
|---|
| 140 | n/a | " first sets of %s as well as %s" % |
|---|
| 141 | n/a | (name, symbol, label, inverse[symbol])) |
|---|
| 142 | n/a | inverse[symbol] = label |
|---|
| 143 | n/a | self.first[name] = totalset |
|---|
| 144 | n/a | |
|---|
| 145 | n/a | def parse(self): |
|---|
| 146 | n/a | dfas = {} |
|---|
| 147 | n/a | startsymbol = None |
|---|
| 148 | n/a | # MSTART: (NEWLINE | RULE)* ENDMARKER |
|---|
| 149 | n/a | while self.type != token.ENDMARKER: |
|---|
| 150 | n/a | while self.type == token.NEWLINE: |
|---|
| 151 | n/a | self.gettoken() |
|---|
| 152 | n/a | # RULE: NAME ':' RHS NEWLINE |
|---|
| 153 | n/a | name = self.expect(token.NAME) |
|---|
| 154 | n/a | self.expect(token.OP, ":") |
|---|
| 155 | n/a | a, z = self.parse_rhs() |
|---|
| 156 | n/a | self.expect(token.NEWLINE) |
|---|
| 157 | n/a | #self.dump_nfa(name, a, z) |
|---|
| 158 | n/a | dfa = self.make_dfa(a, z) |
|---|
| 159 | n/a | #self.dump_dfa(name, dfa) |
|---|
| 160 | n/a | oldlen = len(dfa) |
|---|
| 161 | n/a | self.simplify_dfa(dfa) |
|---|
| 162 | n/a | newlen = len(dfa) |
|---|
| 163 | n/a | dfas[name] = dfa |
|---|
| 164 | n/a | #print name, oldlen, newlen |
|---|
| 165 | n/a | if startsymbol is None: |
|---|
| 166 | n/a | startsymbol = name |
|---|
| 167 | n/a | return dfas, startsymbol |
|---|
| 168 | n/a | |
|---|
| 169 | n/a | def make_dfa(self, start, finish): |
|---|
| 170 | n/a | # To turn an NFA into a DFA, we define the states of the DFA |
|---|
| 171 | n/a | # to correspond to *sets* of states of the NFA. Then do some |
|---|
| 172 | n/a | # state reduction. Let's represent sets as dicts with 1 for |
|---|
| 173 | n/a | # values. |
|---|
| 174 | n/a | assert isinstance(start, NFAState) |
|---|
| 175 | n/a | assert isinstance(finish, NFAState) |
|---|
| 176 | n/a | def closure(state): |
|---|
| 177 | n/a | base = {} |
|---|
| 178 | n/a | addclosure(state, base) |
|---|
| 179 | n/a | return base |
|---|
| 180 | n/a | def addclosure(state, base): |
|---|
| 181 | n/a | assert isinstance(state, NFAState) |
|---|
| 182 | n/a | if state in base: |
|---|
| 183 | n/a | return |
|---|
| 184 | n/a | base[state] = 1 |
|---|
| 185 | n/a | for label, next in state.arcs: |
|---|
| 186 | n/a | if label is None: |
|---|
| 187 | n/a | addclosure(next, base) |
|---|
| 188 | n/a | states = [DFAState(closure(start), finish)] |
|---|
| 189 | n/a | for state in states: # NB states grows while we're iterating |
|---|
| 190 | n/a | arcs = {} |
|---|
| 191 | n/a | for nfastate in state.nfaset: |
|---|
| 192 | n/a | for label, next in nfastate.arcs: |
|---|
| 193 | n/a | if label is not None: |
|---|
| 194 | n/a | addclosure(next, arcs.setdefault(label, {})) |
|---|
| 195 | n/a | for label, nfaset in sorted(arcs.items()): |
|---|
| 196 | n/a | for st in states: |
|---|
| 197 | n/a | if st.nfaset == nfaset: |
|---|
| 198 | n/a | break |
|---|
| 199 | n/a | else: |
|---|
| 200 | n/a | st = DFAState(nfaset, finish) |
|---|
| 201 | n/a | states.append(st) |
|---|
| 202 | n/a | state.addarc(st, label) |
|---|
| 203 | n/a | return states # List of DFAState instances; first one is start |
|---|
| 204 | n/a | |
|---|
| 205 | n/a | def dump_nfa(self, name, start, finish): |
|---|
| 206 | n/a | print("Dump of NFA for", name) |
|---|
| 207 | n/a | todo = [start] |
|---|
| 208 | n/a | for i, state in enumerate(todo): |
|---|
| 209 | n/a | print(" State", i, state is finish and "(final)" or "") |
|---|
| 210 | n/a | for label, next in state.arcs: |
|---|
| 211 | n/a | if next in todo: |
|---|
| 212 | n/a | j = todo.index(next) |
|---|
| 213 | n/a | else: |
|---|
| 214 | n/a | j = len(todo) |
|---|
| 215 | n/a | todo.append(next) |
|---|
| 216 | n/a | if label is None: |
|---|
| 217 | n/a | print(" -> %d" % j) |
|---|
| 218 | n/a | else: |
|---|
| 219 | n/a | print(" %s -> %d" % (label, j)) |
|---|
| 220 | n/a | |
|---|
| 221 | n/a | def dump_dfa(self, name, dfa): |
|---|
| 222 | n/a | print("Dump of DFA for", name) |
|---|
| 223 | n/a | for i, state in enumerate(dfa): |
|---|
| 224 | n/a | print(" State", i, state.isfinal and "(final)" or "") |
|---|
| 225 | n/a | for label, next in sorted(state.arcs.items()): |
|---|
| 226 | n/a | print(" %s -> %d" % (label, dfa.index(next))) |
|---|
| 227 | n/a | |
|---|
| 228 | n/a | def simplify_dfa(self, dfa): |
|---|
| 229 | n/a | # This is not theoretically optimal, but works well enough. |
|---|
| 230 | n/a | # Algorithm: repeatedly look for two states that have the same |
|---|
| 231 | n/a | # set of arcs (same labels pointing to the same nodes) and |
|---|
| 232 | n/a | # unify them, until things stop changing. |
|---|
| 233 | n/a | |
|---|
| 234 | n/a | # dfa is a list of DFAState instances |
|---|
| 235 | n/a | changes = True |
|---|
| 236 | n/a | while changes: |
|---|
| 237 | n/a | changes = False |
|---|
| 238 | n/a | for i, state_i in enumerate(dfa): |
|---|
| 239 | n/a | for j in range(i+1, len(dfa)): |
|---|
| 240 | n/a | state_j = dfa[j] |
|---|
| 241 | n/a | if state_i == state_j: |
|---|
| 242 | n/a | #print " unify", i, j |
|---|
| 243 | n/a | del dfa[j] |
|---|
| 244 | n/a | for state in dfa: |
|---|
| 245 | n/a | state.unifystate(state_j, state_i) |
|---|
| 246 | n/a | changes = True |
|---|
| 247 | n/a | break |
|---|
| 248 | n/a | |
|---|
| 249 | n/a | def parse_rhs(self): |
|---|
| 250 | n/a | # RHS: ALT ('|' ALT)* |
|---|
| 251 | n/a | a, z = self.parse_alt() |
|---|
| 252 | n/a | if self.value != "|": |
|---|
| 253 | n/a | return a, z |
|---|
| 254 | n/a | else: |
|---|
| 255 | n/a | aa = NFAState() |
|---|
| 256 | n/a | zz = NFAState() |
|---|
| 257 | n/a | aa.addarc(a) |
|---|
| 258 | n/a | z.addarc(zz) |
|---|
| 259 | n/a | while self.value == "|": |
|---|
| 260 | n/a | self.gettoken() |
|---|
| 261 | n/a | a, z = self.parse_alt() |
|---|
| 262 | n/a | aa.addarc(a) |
|---|
| 263 | n/a | z.addarc(zz) |
|---|
| 264 | n/a | return aa, zz |
|---|
| 265 | n/a | |
|---|
| 266 | n/a | def parse_alt(self): |
|---|
| 267 | n/a | # ALT: ITEM+ |
|---|
| 268 | n/a | a, b = self.parse_item() |
|---|
| 269 | n/a | while (self.value in ("(", "[") or |
|---|
| 270 | n/a | self.type in (token.NAME, token.STRING)): |
|---|
| 271 | n/a | c, d = self.parse_item() |
|---|
| 272 | n/a | b.addarc(c) |
|---|
| 273 | n/a | b = d |
|---|
| 274 | n/a | return a, b |
|---|
| 275 | n/a | |
|---|
| 276 | n/a | def parse_item(self): |
|---|
| 277 | n/a | # ITEM: '[' RHS ']' | ATOM ['+' | '*'] |
|---|
| 278 | n/a | if self.value == "[": |
|---|
| 279 | n/a | self.gettoken() |
|---|
| 280 | n/a | a, z = self.parse_rhs() |
|---|
| 281 | n/a | self.expect(token.OP, "]") |
|---|
| 282 | n/a | a.addarc(z) |
|---|
| 283 | n/a | return a, z |
|---|
| 284 | n/a | else: |
|---|
| 285 | n/a | a, z = self.parse_atom() |
|---|
| 286 | n/a | value = self.value |
|---|
| 287 | n/a | if value not in ("+", "*"): |
|---|
| 288 | n/a | return a, z |
|---|
| 289 | n/a | self.gettoken() |
|---|
| 290 | n/a | z.addarc(a) |
|---|
| 291 | n/a | if value == "+": |
|---|
| 292 | n/a | return a, z |
|---|
| 293 | n/a | else: |
|---|
| 294 | n/a | return a, a |
|---|
| 295 | n/a | |
|---|
| 296 | n/a | def parse_atom(self): |
|---|
| 297 | n/a | # ATOM: '(' RHS ')' | NAME | STRING |
|---|
| 298 | n/a | if self.value == "(": |
|---|
| 299 | n/a | self.gettoken() |
|---|
| 300 | n/a | a, z = self.parse_rhs() |
|---|
| 301 | n/a | self.expect(token.OP, ")") |
|---|
| 302 | n/a | return a, z |
|---|
| 303 | n/a | elif self.type in (token.NAME, token.STRING): |
|---|
| 304 | n/a | a = NFAState() |
|---|
| 305 | n/a | z = NFAState() |
|---|
| 306 | n/a | a.addarc(z, self.value) |
|---|
| 307 | n/a | self.gettoken() |
|---|
| 308 | n/a | return a, z |
|---|
| 309 | n/a | else: |
|---|
| 310 | n/a | self.raise_error("expected (...) or NAME or STRING, got %s/%s", |
|---|
| 311 | n/a | self.type, self.value) |
|---|
| 312 | n/a | |
|---|
| 313 | n/a | def expect(self, type, value=None): |
|---|
| 314 | n/a | if self.type != type or (value is not None and self.value != value): |
|---|
| 315 | n/a | self.raise_error("expected %s/%s, got %s/%s", |
|---|
| 316 | n/a | type, value, self.type, self.value) |
|---|
| 317 | n/a | value = self.value |
|---|
| 318 | n/a | self.gettoken() |
|---|
| 319 | n/a | return value |
|---|
| 320 | n/a | |
|---|
| 321 | n/a | def gettoken(self): |
|---|
| 322 | n/a | tup = next(self.generator) |
|---|
| 323 | n/a | while tup[0] in (tokenize.COMMENT, tokenize.NL): |
|---|
| 324 | n/a | tup = next(self.generator) |
|---|
| 325 | n/a | self.type, self.value, self.begin, self.end, self.line = tup |
|---|
| 326 | n/a | #print token.tok_name[self.type], repr(self.value) |
|---|
| 327 | n/a | |
|---|
| 328 | n/a | def raise_error(self, msg, *args): |
|---|
| 329 | n/a | if args: |
|---|
| 330 | n/a | try: |
|---|
| 331 | n/a | msg = msg % args |
|---|
| 332 | n/a | except: |
|---|
| 333 | n/a | msg = " ".join([msg] + list(map(str, args))) |
|---|
| 334 | n/a | raise SyntaxError(msg, (self.filename, self.end[0], |
|---|
| 335 | n/a | self.end[1], self.line)) |
|---|
| 336 | n/a | |
|---|
| 337 | n/a | class NFAState(object): |
|---|
| 338 | n/a | |
|---|
| 339 | n/a | def __init__(self): |
|---|
| 340 | n/a | self.arcs = [] # list of (label, NFAState) pairs |
|---|
| 341 | n/a | |
|---|
| 342 | n/a | def addarc(self, next, label=None): |
|---|
| 343 | n/a | assert label is None or isinstance(label, str) |
|---|
| 344 | n/a | assert isinstance(next, NFAState) |
|---|
| 345 | n/a | self.arcs.append((label, next)) |
|---|
| 346 | n/a | |
|---|
| 347 | n/a | class DFAState(object): |
|---|
| 348 | n/a | |
|---|
| 349 | n/a | def __init__(self, nfaset, final): |
|---|
| 350 | n/a | assert isinstance(nfaset, dict) |
|---|
| 351 | n/a | assert isinstance(next(iter(nfaset)), NFAState) |
|---|
| 352 | n/a | assert isinstance(final, NFAState) |
|---|
| 353 | n/a | self.nfaset = nfaset |
|---|
| 354 | n/a | self.isfinal = final in nfaset |
|---|
| 355 | n/a | self.arcs = {} # map from label to DFAState |
|---|
| 356 | n/a | |
|---|
| 357 | n/a | def addarc(self, next, label): |
|---|
| 358 | n/a | assert isinstance(label, str) |
|---|
| 359 | n/a | assert label not in self.arcs |
|---|
| 360 | n/a | assert isinstance(next, DFAState) |
|---|
| 361 | n/a | self.arcs[label] = next |
|---|
| 362 | n/a | |
|---|
| 363 | n/a | def unifystate(self, old, new): |
|---|
| 364 | n/a | for label, next in self.arcs.items(): |
|---|
| 365 | n/a | if next is old: |
|---|
| 366 | n/a | self.arcs[label] = new |
|---|
| 367 | n/a | |
|---|
| 368 | n/a | def __eq__(self, other): |
|---|
| 369 | n/a | # Equality test -- ignore the nfaset instance variable |
|---|
| 370 | n/a | assert isinstance(other, DFAState) |
|---|
| 371 | n/a | if self.isfinal != other.isfinal: |
|---|
| 372 | n/a | return False |
|---|
| 373 | n/a | # Can't just return self.arcs == other.arcs, because that |
|---|
| 374 | n/a | # would invoke this method recursively, with cycles... |
|---|
| 375 | n/a | if len(self.arcs) != len(other.arcs): |
|---|
| 376 | n/a | return False |
|---|
| 377 | n/a | for label, next in self.arcs.items(): |
|---|
| 378 | n/a | if next is not other.arcs.get(label): |
|---|
| 379 | n/a | return False |
|---|
| 380 | n/a | return True |
|---|
| 381 | n/a | |
|---|
| 382 | n/a | __hash__ = None # For Py3 compatibility. |
|---|
| 383 | n/a | |
|---|
| 384 | n/a | def generate_grammar(filename="Grammar.txt"): |
|---|
| 385 | n/a | p = ParserGenerator(filename) |
|---|
| 386 | n/a | return p.make_grammar() |
|---|