Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit d1c7b1a

Browse files
committed
#13993: merge with 3.2.
2 parents 3dc74c0 + 5211ffe commit d1c7b1a

3 files changed

Lines changed: 71 additions & 17 deletions

File tree

Lib/html/parser.py

Lines changed: 27 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@
2323
piclose = re.compile('>')
2424
commentclose = re.compile(r'--\s*>')
2525
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
26+
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
27+
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
28+
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
2629
# Note, the strict one of this pair isn't really strict, but we can't
2730
# make it correctly strict without breaking backward compatibility.
2831
attrfind = re.compile(
@@ -270,7 +273,7 @@ def goahead(self, end):
270273
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
271274
def parse_bogus_comment(self, i, report=1):
272275
rawdata = self.rawdata
273-
if rawdata[i:i+2] != '<!':
276+
if rawdata[i:i+2] not in ('<!', '</'):
274277
self.error('unexpected call to parse_comment()')
275278
pos = rawdata.find('>', i+2)
276279
if pos == -1:
@@ -398,31 +401,40 @@ def parse_endtag(self, i):
398401
match = endendtag.search(rawdata, i+1) # >
399402
if not match:
400403
return -1
401-
j = match.end()
404+
gtpos = match.end()
402405
match = endtagfind.match(rawdata, i) # </ + tag + >
403406
if not match:
404407
if self.cdata_elem is not None:
405-
self.handle_data(rawdata[i:j])
406-
return j
408+
self.handle_data(rawdata[i:gtpos])
409+
return gtpos
407410
if self.strict:
408-
self.error("bad end tag: %r" % (rawdata[i:j],))
409-
k = rawdata.find('<', i + 1, j)
410-
if k > i:
411-
j = k
412-
if j <= i:
413-
j = i + 1
414-
self.handle_data(rawdata[i:j])
415-
return j
411+
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
412+
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
413+
namematch = tagfind_tolerant.match(rawdata, i+2)
414+
if not namematch:
415+
# w3.org/TR/html5/tokenization.html#end-tag-open-state
416+
if rawdata[i:i+3] == '</>':
417+
return i+3
418+
else:
419+
return self.parse_bogus_comment(i)
420+
tagname = namematch.group().lower()
421+
# consume and ignore other stuff between the name and the >
422+
# Note: this is not 100% correct, since we might have things like
423+
# </tag attr=">">, but looking for > after tha name should cover
424+
# most of the cases and is much simpler
425+
gtpos = rawdata.find('>', namematch.end())
426+
self.handle_endtag(tagname)
427+
return gtpos+1
416428

417429
elem = match.group(1).lower() # script or style
418430
if self.cdata_elem is not None:
419431
if elem != self.cdata_elem:
420-
self.handle_data(rawdata[i:j])
421-
return j
432+
self.handle_data(rawdata[i:gtpos])
433+
return gtpos
422434

423435
self.handle_endtag(elem.lower())
424436
self.clear_cdata_mode()
425-
return j
437+
return gtpos
426438

427439
# Overridable -- finish processing of start+end tag: <tag.../>
428440
def handle_startendtag(self, tag, attrs):

Lib/test/test_htmlparser.py

Lines changed: 41 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -364,8 +364,9 @@ def test_tolerant_parsing(self):
364364
('data', '<<bc'),
365365
('endtag', 'a'),
366366
('endtag', 'html'),
367-
('data', '\n<img src="URL><//img></html'),
368-
('endtag', 'html')])
367+
('data', '\n<img src="URL>'),
368+
('comment', '/img'),
369+
('endtag', 'html<')])
369370

370371
def test_with_unquoted_attributes(self):
371372
# see #12008
@@ -403,6 +404,44 @@ def test_weird_chars_in_unquoted_attribute_values(self):
403404
('starttag', 'form',
404405
[('action', 'bogus|&#()value')])])
405406

407+
def test_invalid_end_tags(self):
408+
# A collection of broken end tags. <br> is used as separator.
409+
# see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state
410+
# and #13993
411+
html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>'
412+
'</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>')
413+
expected = [('starttag', 'br', []),
414+
# < is part of the name, / is discarded, p is an attribute
415+
('endtag', 'label<'),
416+
('starttag', 'br', []),
417+
# text and attributes are discarded
418+
('endtag', 'div'),
419+
('starttag', 'br', []),
420+
# comment because the first char after </ is not a-zA-Z
421+
('comment', '<h4'),
422+
('starttag', 'br', []),
423+
# attributes are discarded
424+
('endtag', 'li'),
425+
('starttag', 'br', []),
426+
# everything till ul (included) is discarded
427+
('endtag', 'li'),
428+
('starttag', 'br', []),
429+
# </> is ignored
430+
('starttag', 'br', [])]
431+
self._run_check(html, expected)
432+
433+
def test_broken_invalid_end_tag(self):
434+
# This is technically wrong (the "> shouldn't be included in the 'data')
435+
# but is probably not worth fixing it (in addition to all the cases of
436+
# the previous test, it would require a full attribute parsing).
437+
# see #13993
438+
html = '<b>This</b attr=">"> confuses the parser'
439+
expected = [('starttag', 'b', []),
440+
('data', 'This'),
441+
('endtag', 'b'),
442+
('data', '"> confuses the parser')]
443+
self._run_check(html, expected)
444+
406445
def test_correct_detection_of_start_tags(self):
407446
# see #13273
408447
html = ('<div style="" ><b>The <a href="some_url">rain</a> '

Misc/NEWS

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -466,6 +466,9 @@ Core and Builtins
466466
Library
467467
-------
468468

469+
- Issue #13993: HTMLParser is now able to handle broken end tags when
470+
strict=False.
471+
469472
- Issue #13930: lib2to3 now supports writing converted output files to another
470473
directory tree as well as copying unchanged files and altering the file
471474
suffix.

0 commit comments

Comments
 (0)