| 1 | n/a | """A parser for HTML and XHTML.""" |
|---|
| 2 | n/a | |
|---|
| 3 | n/a | # This file is based on sgmllib.py, but the API is slightly different. |
|---|
| 4 | n/a | |
|---|
| 5 | n/a | # XXX There should be a way to distinguish between PCDATA (parsed |
|---|
| 6 | n/a | # character data -- the normal case), RCDATA (replaceable character |
|---|
| 7 | n/a | # data -- only char and entity references and end tags are special) |
|---|
| 8 | n/a | # and CDATA (character data -- only end tags are special). |
|---|
| 9 | n/a | |
|---|
| 10 | n/a | |
|---|
| 11 | n/a | import re |
|---|
| 12 | n/a | import warnings |
|---|
| 13 | n/a | import _markupbase |
|---|
| 14 | n/a | |
|---|
| 15 | n/a | from html import unescape |
|---|
| 16 | n/a | |
|---|
| 17 | n/a | |
|---|
| 18 | n/a | __all__ = ['HTMLParser'] |
|---|
| 19 | n/a | |
|---|
| 20 | n/a | # Regular expressions used for parsing |
|---|
| 21 | n/a | |
|---|
| 22 | n/a | interesting_normal = re.compile('[&<]') |
|---|
| 23 | n/a | incomplete = re.compile('&[a-zA-Z#]') |
|---|
| 24 | n/a | |
|---|
| 25 | n/a | entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]') |
|---|
| 26 | n/a | charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') |
|---|
| 27 | n/a | |
|---|
| 28 | n/a | starttagopen = re.compile('<[a-zA-Z]') |
|---|
| 29 | n/a | piclose = re.compile('>') |
|---|
| 30 | n/a | commentclose = re.compile(r'--\s*>') |
|---|
| 31 | n/a | # Note: |
|---|
| 32 | n/a | # 1) if you change tagfind/attrfind remember to update locatestarttagend too; |
|---|
| 33 | n/a | # 2) if you change tagfind/attrfind and/or locatestarttagend the parser will |
|---|
| 34 | n/a | # explode, so don't do it. |
|---|
| 35 | n/a | # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state |
|---|
| 36 | n/a | # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state |
|---|
| 37 | n/a | tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') |
|---|
| 38 | n/a | attrfind_tolerant = re.compile( |
|---|
| 39 | n/a | r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' |
|---|
| 40 | n/a | r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') |
|---|
| 41 | n/a | locatestarttagend_tolerant = re.compile(r""" |
|---|
| 42 | n/a | <[a-zA-Z][^\t\n\r\f />\x00]* # tag name |
|---|
| 43 | n/a | (?:[\s/]* # optional whitespace before attribute name |
|---|
| 44 | n/a | (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name |
|---|
| 45 | n/a | (?:\s*=+\s* # value indicator |
|---|
| 46 | n/a | (?:'[^']*' # LITA-enclosed value |
|---|
| 47 | n/a | |"[^"]*" # LIT-enclosed value |
|---|
| 48 | n/a | |(?!['"])[^>\s]* # bare value |
|---|
| 49 | n/a | ) |
|---|
| 50 | n/a | (?:\s*,)* # possibly followed by a comma |
|---|
| 51 | n/a | )?(?:\s|/(?!>))* |
|---|
| 52 | n/a | )* |
|---|
| 53 | n/a | )? |
|---|
| 54 | n/a | \s* # trailing whitespace |
|---|
| 55 | n/a | """, re.VERBOSE) |
|---|
| 56 | n/a | endendtag = re.compile('>') |
|---|
| 57 | n/a | # the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between |
|---|
| 58 | n/a | # </ and the tag name, so maybe this should be fixed |
|---|
| 59 | n/a | endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>') |
|---|
| 60 | n/a | |
|---|
| 61 | n/a | |
|---|
| 62 | n/a | |
|---|
| 63 | n/a | class HTMLParser(_markupbase.ParserBase): |
|---|
| 64 | n/a | """Find tags and other markup and call handler functions. |
|---|
| 65 | n/a | |
|---|
| 66 | n/a | Usage: |
|---|
| 67 | n/a | p = HTMLParser() |
|---|
| 68 | n/a | p.feed(data) |
|---|
| 69 | n/a | ... |
|---|
| 70 | n/a | p.close() |
|---|
| 71 | n/a | |
|---|
| 72 | n/a | Start tags are handled by calling self.handle_starttag() or |
|---|
| 73 | n/a | self.handle_startendtag(); end tags by self.handle_endtag(). The |
|---|
| 74 | n/a | data between tags is passed from the parser to the derived class |
|---|
| 75 | n/a | by calling self.handle_data() with the data as argument (the data |
|---|
| 76 | n/a | may be split up in arbitrary chunks). If convert_charrefs is |
|---|
| 77 | n/a | True the character references are converted automatically to the |
|---|
| 78 | n/a | corresponding Unicode character (and self.handle_data() is no |
|---|
| 79 | n/a | longer split in chunks), otherwise they are passed by calling |
|---|
| 80 | n/a | self.handle_entityref() or self.handle_charref() with the string |
|---|
| 81 | n/a | containing respectively the named or numeric reference as the |
|---|
| 82 | n/a | argument. |
|---|
| 83 | n/a | """ |
|---|
| 84 | n/a | |
|---|
| 85 | n/a | CDATA_CONTENT_ELEMENTS = ("script", "style") |
|---|
| 86 | n/a | |
|---|
| 87 | n/a | def __init__(self, *, convert_charrefs=True): |
|---|
| 88 | n/a | """Initialize and reset this instance. |
|---|
| 89 | n/a | |
|---|
| 90 | n/a | If convert_charrefs is True (the default), all character references |
|---|
| 91 | n/a | are automatically converted to the corresponding Unicode characters. |
|---|
| 92 | n/a | """ |
|---|
| 93 | n/a | self.convert_charrefs = convert_charrefs |
|---|
| 94 | n/a | self.reset() |
|---|
| 95 | n/a | |
|---|
| 96 | n/a | def reset(self): |
|---|
| 97 | n/a | """Reset this instance. Loses all unprocessed data.""" |
|---|
| 98 | n/a | self.rawdata = '' |
|---|
| 99 | n/a | self.lasttag = '???' |
|---|
| 100 | n/a | self.interesting = interesting_normal |
|---|
| 101 | n/a | self.cdata_elem = None |
|---|
| 102 | n/a | _markupbase.ParserBase.reset(self) |
|---|
| 103 | n/a | |
|---|
| 104 | n/a | def feed(self, data): |
|---|
| 105 | n/a | r"""Feed data to the parser. |
|---|
| 106 | n/a | |
|---|
| 107 | n/a | Call this as often as you want, with as little or as much text |
|---|
| 108 | n/a | as you want (may include '\n'). |
|---|
| 109 | n/a | """ |
|---|
| 110 | n/a | self.rawdata = self.rawdata + data |
|---|
| 111 | n/a | self.goahead(0) |
|---|
| 112 | n/a | |
|---|
| 113 | n/a | def close(self): |
|---|
| 114 | n/a | """Handle any buffered data.""" |
|---|
| 115 | n/a | self.goahead(1) |
|---|
| 116 | n/a | |
|---|
| 117 | n/a | __starttag_text = None |
|---|
| 118 | n/a | |
|---|
| 119 | n/a | def get_starttag_text(self): |
|---|
| 120 | n/a | """Return full source of start tag: '<...>'.""" |
|---|
| 121 | n/a | return self.__starttag_text |
|---|
| 122 | n/a | |
|---|
| 123 | n/a | def set_cdata_mode(self, elem): |
|---|
| 124 | n/a | self.cdata_elem = elem.lower() |
|---|
| 125 | n/a | self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I) |
|---|
| 126 | n/a | |
|---|
| 127 | n/a | def clear_cdata_mode(self): |
|---|
| 128 | n/a | self.interesting = interesting_normal |
|---|
| 129 | n/a | self.cdata_elem = None |
|---|
| 130 | n/a | |
|---|
| 131 | n/a | # Internal -- handle data as far as reasonable. May leave state |
|---|
| 132 | n/a | # and data to be processed by a subsequent call. If 'end' is |
|---|
| 133 | n/a | # true, force handling all data as if followed by EOF marker. |
|---|
| 134 | n/a | def goahead(self, end): |
|---|
| 135 | n/a | rawdata = self.rawdata |
|---|
| 136 | n/a | i = 0 |
|---|
| 137 | n/a | n = len(rawdata) |
|---|
| 138 | n/a | while i < n: |
|---|
| 139 | n/a | if self.convert_charrefs and not self.cdata_elem: |
|---|
| 140 | n/a | j = rawdata.find('<', i) |
|---|
| 141 | n/a | if j < 0: |
|---|
| 142 | n/a | # if we can't find the next <, either we are at the end |
|---|
| 143 | n/a | # or there's more text incoming. If the latter is True, |
|---|
| 144 | n/a | # we can't pass the text to handle_data in case we have |
|---|
| 145 | n/a | # a charref cut in half at end. Try to determine if |
|---|
| 146 | n/a | # this is the case before proceeding by looking for an |
|---|
| 147 | n/a | # & near the end and see if it's followed by a space or ;. |
|---|
| 148 | n/a | amppos = rawdata.rfind('&', max(i, n-34)) |
|---|
| 149 | n/a | if (amppos >= 0 and |
|---|
| 150 | n/a | not re.compile(r'[\s;]').search(rawdata, amppos)): |
|---|
| 151 | n/a | break # wait till we get all the text |
|---|
| 152 | n/a | j = n |
|---|
| 153 | n/a | else: |
|---|
| 154 | n/a | match = self.interesting.search(rawdata, i) # < or & |
|---|
| 155 | n/a | if match: |
|---|
| 156 | n/a | j = match.start() |
|---|
| 157 | n/a | else: |
|---|
| 158 | n/a | if self.cdata_elem: |
|---|
| 159 | n/a | break |
|---|
| 160 | n/a | j = n |
|---|
| 161 | n/a | if i < j: |
|---|
| 162 | n/a | if self.convert_charrefs and not self.cdata_elem: |
|---|
| 163 | n/a | self.handle_data(unescape(rawdata[i:j])) |
|---|
| 164 | n/a | else: |
|---|
| 165 | n/a | self.handle_data(rawdata[i:j]) |
|---|
| 166 | n/a | i = self.updatepos(i, j) |
|---|
| 167 | n/a | if i == n: break |
|---|
| 168 | n/a | startswith = rawdata.startswith |
|---|
| 169 | n/a | if startswith('<', i): |
|---|
| 170 | n/a | if starttagopen.match(rawdata, i): # < + letter |
|---|
| 171 | n/a | k = self.parse_starttag(i) |
|---|
| 172 | n/a | elif startswith("</", i): |
|---|
| 173 | n/a | k = self.parse_endtag(i) |
|---|
| 174 | n/a | elif startswith("<!--", i): |
|---|
| 175 | n/a | k = self.parse_comment(i) |
|---|
| 176 | n/a | elif startswith("<?", i): |
|---|
| 177 | n/a | k = self.parse_pi(i) |
|---|
| 178 | n/a | elif startswith("<!", i): |
|---|
| 179 | n/a | k = self.parse_html_declaration(i) |
|---|
| 180 | n/a | elif (i + 1) < n: |
|---|
| 181 | n/a | self.handle_data("<") |
|---|
| 182 | n/a | k = i + 1 |
|---|
| 183 | n/a | else: |
|---|
| 184 | n/a | break |
|---|
| 185 | n/a | if k < 0: |
|---|
| 186 | n/a | if not end: |
|---|
| 187 | n/a | break |
|---|
| 188 | n/a | k = rawdata.find('>', i + 1) |
|---|
| 189 | n/a | if k < 0: |
|---|
| 190 | n/a | k = rawdata.find('<', i + 1) |
|---|
| 191 | n/a | if k < 0: |
|---|
| 192 | n/a | k = i + 1 |
|---|
| 193 | n/a | else: |
|---|
| 194 | n/a | k += 1 |
|---|
| 195 | n/a | if self.convert_charrefs and not self.cdata_elem: |
|---|
| 196 | n/a | self.handle_data(unescape(rawdata[i:k])) |
|---|
| 197 | n/a | else: |
|---|
| 198 | n/a | self.handle_data(rawdata[i:k]) |
|---|
| 199 | n/a | i = self.updatepos(i, k) |
|---|
| 200 | n/a | elif startswith("&#", i): |
|---|
| 201 | n/a | match = charref.match(rawdata, i) |
|---|
| 202 | n/a | if match: |
|---|
| 203 | n/a | name = match.group()[2:-1] |
|---|
| 204 | n/a | self.handle_charref(name) |
|---|
| 205 | n/a | k = match.end() |
|---|
| 206 | n/a | if not startswith(';', k-1): |
|---|
| 207 | n/a | k = k - 1 |
|---|
| 208 | n/a | i = self.updatepos(i, k) |
|---|
| 209 | n/a | continue |
|---|
| 210 | n/a | else: |
|---|
| 211 | n/a | if ";" in rawdata[i:]: # bail by consuming &# |
|---|
| 212 | n/a | self.handle_data(rawdata[i:i+2]) |
|---|
| 213 | n/a | i = self.updatepos(i, i+2) |
|---|
| 214 | n/a | break |
|---|
| 215 | n/a | elif startswith('&', i): |
|---|
| 216 | n/a | match = entityref.match(rawdata, i) |
|---|
| 217 | n/a | if match: |
|---|
| 218 | n/a | name = match.group(1) |
|---|
| 219 | n/a | self.handle_entityref(name) |
|---|
| 220 | n/a | k = match.end() |
|---|
| 221 | n/a | if not startswith(';', k-1): |
|---|
| 222 | n/a | k = k - 1 |
|---|
| 223 | n/a | i = self.updatepos(i, k) |
|---|
| 224 | n/a | continue |
|---|
| 225 | n/a | match = incomplete.match(rawdata, i) |
|---|
| 226 | n/a | if match: |
|---|
| 227 | n/a | # match.group() will contain at least 2 chars |
|---|
| 228 | n/a | if end and match.group() == rawdata[i:]: |
|---|
| 229 | n/a | k = match.end() |
|---|
| 230 | n/a | if k <= i: |
|---|
| 231 | n/a | k = n |
|---|
| 232 | n/a | i = self.updatepos(i, i + 1) |
|---|
| 233 | n/a | # incomplete |
|---|
| 234 | n/a | break |
|---|
| 235 | n/a | elif (i + 1) < n: |
|---|
| 236 | n/a | # not the end of the buffer, and can't be confused |
|---|
| 237 | n/a | # with some other construct |
|---|
| 238 | n/a | self.handle_data("&") |
|---|
| 239 | n/a | i = self.updatepos(i, i + 1) |
|---|
| 240 | n/a | else: |
|---|
| 241 | n/a | break |
|---|
| 242 | n/a | else: |
|---|
| 243 | n/a | assert 0, "interesting.search() lied" |
|---|
| 244 | n/a | # end while |
|---|
| 245 | n/a | if end and i < n and not self.cdata_elem: |
|---|
| 246 | n/a | if self.convert_charrefs and not self.cdata_elem: |
|---|
| 247 | n/a | self.handle_data(unescape(rawdata[i:n])) |
|---|
| 248 | n/a | else: |
|---|
| 249 | n/a | self.handle_data(rawdata[i:n]) |
|---|
| 250 | n/a | i = self.updatepos(i, n) |
|---|
| 251 | n/a | self.rawdata = rawdata[i:] |
|---|
| 252 | n/a | |
|---|
| 253 | n/a | # Internal -- parse html declarations, return length or -1 if not terminated |
|---|
| 254 | n/a | # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state |
|---|
| 255 | n/a | # See also parse_declaration in _markupbase |
|---|
| 256 | n/a | def parse_html_declaration(self, i): |
|---|
| 257 | n/a | rawdata = self.rawdata |
|---|
| 258 | n/a | assert rawdata[i:i+2] == '<!', ('unexpected call to ' |
|---|
| 259 | n/a | 'parse_html_declaration()') |
|---|
| 260 | n/a | if rawdata[i:i+4] == '<!--': |
|---|
| 261 | n/a | # this case is actually already handled in goahead() |
|---|
| 262 | n/a | return self.parse_comment(i) |
|---|
| 263 | n/a | elif rawdata[i:i+3] == '<![': |
|---|
| 264 | n/a | return self.parse_marked_section(i) |
|---|
| 265 | n/a | elif rawdata[i:i+9].lower() == '<!doctype': |
|---|
| 266 | n/a | # find the closing > |
|---|
| 267 | n/a | gtpos = rawdata.find('>', i+9) |
|---|
| 268 | n/a | if gtpos == -1: |
|---|
| 269 | n/a | return -1 |
|---|
| 270 | n/a | self.handle_decl(rawdata[i+2:gtpos]) |
|---|
| 271 | n/a | return gtpos+1 |
|---|
| 272 | n/a | else: |
|---|
| 273 | n/a | return self.parse_bogus_comment(i) |
|---|
| 274 | n/a | |
|---|
| 275 | n/a | # Internal -- parse bogus comment, return length or -1 if not terminated |
|---|
| 276 | n/a | # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state |
|---|
| 277 | n/a | def parse_bogus_comment(self, i, report=1): |
|---|
| 278 | n/a | rawdata = self.rawdata |
|---|
| 279 | n/a | assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to ' |
|---|
| 280 | n/a | 'parse_comment()') |
|---|
| 281 | n/a | pos = rawdata.find('>', i+2) |
|---|
| 282 | n/a | if pos == -1: |
|---|
| 283 | n/a | return -1 |
|---|
| 284 | n/a | if report: |
|---|
| 285 | n/a | self.handle_comment(rawdata[i+2:pos]) |
|---|
| 286 | n/a | return pos + 1 |
|---|
| 287 | n/a | |
|---|
| 288 | n/a | # Internal -- parse processing instr, return end or -1 if not terminated |
|---|
| 289 | n/a | def parse_pi(self, i): |
|---|
| 290 | n/a | rawdata = self.rawdata |
|---|
| 291 | n/a | assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()' |
|---|
| 292 | n/a | match = piclose.search(rawdata, i+2) # > |
|---|
| 293 | n/a | if not match: |
|---|
| 294 | n/a | return -1 |
|---|
| 295 | n/a | j = match.start() |
|---|
| 296 | n/a | self.handle_pi(rawdata[i+2: j]) |
|---|
| 297 | n/a | j = match.end() |
|---|
| 298 | n/a | return j |
|---|
| 299 | n/a | |
|---|
| 300 | n/a | # Internal -- handle starttag, return end or -1 if not terminated |
|---|
| 301 | n/a | def parse_starttag(self, i): |
|---|
| 302 | n/a | self.__starttag_text = None |
|---|
| 303 | n/a | endpos = self.check_for_whole_start_tag(i) |
|---|
| 304 | n/a | if endpos < 0: |
|---|
| 305 | n/a | return endpos |
|---|
| 306 | n/a | rawdata = self.rawdata |
|---|
| 307 | n/a | self.__starttag_text = rawdata[i:endpos] |
|---|
| 308 | n/a | |
|---|
| 309 | n/a | # Now parse the data between i+1 and j into a tag and attrs |
|---|
| 310 | n/a | attrs = [] |
|---|
| 311 | n/a | match = tagfind_tolerant.match(rawdata, i+1) |
|---|
| 312 | n/a | assert match, 'unexpected call to parse_starttag()' |
|---|
| 313 | n/a | k = match.end() |
|---|
| 314 | n/a | self.lasttag = tag = match.group(1).lower() |
|---|
| 315 | n/a | while k < endpos: |
|---|
| 316 | n/a | m = attrfind_tolerant.match(rawdata, k) |
|---|
| 317 | n/a | if not m: |
|---|
| 318 | n/a | break |
|---|
| 319 | n/a | attrname, rest, attrvalue = m.group(1, 2, 3) |
|---|
| 320 | n/a | if not rest: |
|---|
| 321 | n/a | attrvalue = None |
|---|
| 322 | n/a | elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ |
|---|
| 323 | n/a | attrvalue[:1] == '"' == attrvalue[-1:]: |
|---|
| 324 | n/a | attrvalue = attrvalue[1:-1] |
|---|
| 325 | n/a | if attrvalue: |
|---|
| 326 | n/a | attrvalue = unescape(attrvalue) |
|---|
| 327 | n/a | attrs.append((attrname.lower(), attrvalue)) |
|---|
| 328 | n/a | k = m.end() |
|---|
| 329 | n/a | |
|---|
| 330 | n/a | end = rawdata[k:endpos].strip() |
|---|
| 331 | n/a | if end not in (">", "/>"): |
|---|
| 332 | n/a | lineno, offset = self.getpos() |
|---|
| 333 | n/a | if "\n" in self.__starttag_text: |
|---|
| 334 | n/a | lineno = lineno + self.__starttag_text.count("\n") |
|---|
| 335 | n/a | offset = len(self.__starttag_text) \ |
|---|
| 336 | n/a | - self.__starttag_text.rfind("\n") |
|---|
| 337 | n/a | else: |
|---|
| 338 | n/a | offset = offset + len(self.__starttag_text) |
|---|
| 339 | n/a | self.handle_data(rawdata[i:endpos]) |
|---|
| 340 | n/a | return endpos |
|---|
| 341 | n/a | if end.endswith('/>'): |
|---|
| 342 | n/a | # XHTML-style empty tag: <span attr="value" /> |
|---|
| 343 | n/a | self.handle_startendtag(tag, attrs) |
|---|
| 344 | n/a | else: |
|---|
| 345 | n/a | self.handle_starttag(tag, attrs) |
|---|
| 346 | n/a | if tag in self.CDATA_CONTENT_ELEMENTS: |
|---|
| 347 | n/a | self.set_cdata_mode(tag) |
|---|
| 348 | n/a | return endpos |
|---|
| 349 | n/a | |
|---|
| 350 | n/a | # Internal -- check to see if we have a complete starttag; return end |
|---|
| 351 | n/a | # or -1 if incomplete. |
|---|
| 352 | n/a | def check_for_whole_start_tag(self, i): |
|---|
| 353 | n/a | rawdata = self.rawdata |
|---|
| 354 | n/a | m = locatestarttagend_tolerant.match(rawdata, i) |
|---|
| 355 | n/a | if m: |
|---|
| 356 | n/a | j = m.end() |
|---|
| 357 | n/a | next = rawdata[j:j+1] |
|---|
| 358 | n/a | if next == ">": |
|---|
| 359 | n/a | return j + 1 |
|---|
| 360 | n/a | if next == "/": |
|---|
| 361 | n/a | if rawdata.startswith("/>", j): |
|---|
| 362 | n/a | return j + 2 |
|---|
| 363 | n/a | if rawdata.startswith("/", j): |
|---|
| 364 | n/a | # buffer boundary |
|---|
| 365 | n/a | return -1 |
|---|
| 366 | n/a | # else bogus input |
|---|
| 367 | n/a | if j > i: |
|---|
| 368 | n/a | return j |
|---|
| 369 | n/a | else: |
|---|
| 370 | n/a | return i + 1 |
|---|
| 371 | n/a | if next == "": |
|---|
| 372 | n/a | # end of input |
|---|
| 373 | n/a | return -1 |
|---|
| 374 | n/a | if next in ("abcdefghijklmnopqrstuvwxyz=/" |
|---|
| 375 | n/a | "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): |
|---|
| 376 | n/a | # end of input in or before attribute value, or we have the |
|---|
| 377 | n/a | # '/' from a '/>' ending |
|---|
| 378 | n/a | return -1 |
|---|
| 379 | n/a | if j > i: |
|---|
| 380 | n/a | return j |
|---|
| 381 | n/a | else: |
|---|
| 382 | n/a | return i + 1 |
|---|
| 383 | n/a | raise AssertionError("we should not get here!") |
|---|
| 384 | n/a | |
|---|
| 385 | n/a | # Internal -- parse endtag, return end or -1 if incomplete |
|---|
| 386 | n/a | def parse_endtag(self, i): |
|---|
| 387 | n/a | rawdata = self.rawdata |
|---|
| 388 | n/a | assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag" |
|---|
| 389 | n/a | match = endendtag.search(rawdata, i+1) # > |
|---|
| 390 | n/a | if not match: |
|---|
| 391 | n/a | return -1 |
|---|
| 392 | n/a | gtpos = match.end() |
|---|
| 393 | n/a | match = endtagfind.match(rawdata, i) # </ + tag + > |
|---|
| 394 | n/a | if not match: |
|---|
| 395 | n/a | if self.cdata_elem is not None: |
|---|
| 396 | n/a | self.handle_data(rawdata[i:gtpos]) |
|---|
| 397 | n/a | return gtpos |
|---|
| 398 | n/a | # find the name: w3.org/TR/html5/tokenization.html#tag-name-state |
|---|
| 399 | n/a | namematch = tagfind_tolerant.match(rawdata, i+2) |
|---|
| 400 | n/a | if not namematch: |
|---|
| 401 | n/a | # w3.org/TR/html5/tokenization.html#end-tag-open-state |
|---|
| 402 | n/a | if rawdata[i:i+3] == '</>': |
|---|
| 403 | n/a | return i+3 |
|---|
| 404 | n/a | else: |
|---|
| 405 | n/a | return self.parse_bogus_comment(i) |
|---|
| 406 | n/a | tagname = namematch.group(1).lower() |
|---|
| 407 | n/a | # consume and ignore other stuff between the name and the > |
|---|
| 408 | n/a | # Note: this is not 100% correct, since we might have things like |
|---|
| 409 | n/a | # </tag attr=">">, but looking for > after tha name should cover |
|---|
| 410 | n/a | # most of the cases and is much simpler |
|---|
| 411 | n/a | gtpos = rawdata.find('>', namematch.end()) |
|---|
| 412 | n/a | self.handle_endtag(tagname) |
|---|
| 413 | n/a | return gtpos+1 |
|---|
| 414 | n/a | |
|---|
| 415 | n/a | elem = match.group(1).lower() # script or style |
|---|
| 416 | n/a | if self.cdata_elem is not None: |
|---|
| 417 | n/a | if elem != self.cdata_elem: |
|---|
| 418 | n/a | self.handle_data(rawdata[i:gtpos]) |
|---|
| 419 | n/a | return gtpos |
|---|
| 420 | n/a | |
|---|
| 421 | n/a | self.handle_endtag(elem.lower()) |
|---|
| 422 | n/a | self.clear_cdata_mode() |
|---|
| 423 | n/a | return gtpos |
|---|
| 424 | n/a | |
|---|
| 425 | n/a | # Overridable -- finish processing of start+end tag: <tag.../> |
|---|
| 426 | n/a | def handle_startendtag(self, tag, attrs): |
|---|
| 427 | n/a | self.handle_starttag(tag, attrs) |
|---|
| 428 | n/a | self.handle_endtag(tag) |
|---|
| 429 | n/a | |
|---|
| 430 | n/a | # Overridable -- handle start tag |
|---|
| 431 | n/a | def handle_starttag(self, tag, attrs): |
|---|
| 432 | n/a | pass |
|---|
| 433 | n/a | |
|---|
| 434 | n/a | # Overridable -- handle end tag |
|---|
| 435 | n/a | def handle_endtag(self, tag): |
|---|
| 436 | n/a | pass |
|---|
| 437 | n/a | |
|---|
| 438 | n/a | # Overridable -- handle character reference |
|---|
| 439 | n/a | def handle_charref(self, name): |
|---|
| 440 | n/a | pass |
|---|
| 441 | n/a | |
|---|
| 442 | n/a | # Overridable -- handle entity reference |
|---|
| 443 | n/a | def handle_entityref(self, name): |
|---|
| 444 | n/a | pass |
|---|
| 445 | n/a | |
|---|
| 446 | n/a | # Overridable -- handle data |
|---|
| 447 | n/a | def handle_data(self, data): |
|---|
| 448 | n/a | pass |
|---|
| 449 | n/a | |
|---|
| 450 | n/a | # Overridable -- handle comment |
|---|
| 451 | n/a | def handle_comment(self, data): |
|---|
| 452 | n/a | pass |
|---|
| 453 | n/a | |
|---|
| 454 | n/a | # Overridable -- handle declaration |
|---|
| 455 | n/a | def handle_decl(self, decl): |
|---|
| 456 | n/a | pass |
|---|
| 457 | n/a | |
|---|
| 458 | n/a | # Overridable -- handle processing instruction |
|---|
| 459 | n/a | def handle_pi(self, data): |
|---|
| 460 | n/a | pass |
|---|
| 461 | n/a | |
|---|
| 462 | n/a | def unknown_decl(self, data): |
|---|
| 463 | n/a | pass |
|---|
| 464 | n/a | |
|---|
| 465 | n/a | # Internal -- helper to remove special character quoting |
|---|
| 466 | n/a | def unescape(self, s): |
|---|
| 467 | n/a | warnings.warn('The unescape method is deprecated and will be removed ' |
|---|
| 468 | n/a | 'in 3.5, use html.unescape() instead.', |
|---|
| 469 | n/a | DeprecationWarning, stacklevel=2) |
|---|
| 470 | n/a | return unescape(s) |
|---|