Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit e4d4b15

Browse files
committed
Move where we concatenate tokens to handle ignoreErrorOrder
This was causing one of the tokenizer test failures.
1 parent d9b1a9f commit e4d4b15

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

html5lib/tests/test_tokenizer.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
109109
token.pop()
110110

111111
if not ignoreErrorOrder and not ignoreErrors:
112+
expectedTokens = concatenateCharacterTokens(expectedTokens)
112113
return expectedTokens == receivedTokens
113114
else:
114115
# Sort the tokens into two groups; non-parse errors and parse errors
@@ -121,6 +122,7 @@ def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
121122
else:
122123
if not ignoreErrors:
123124
tokens[tokenType][1].append(token)
125+
tokens[tokenType][0] = concatenateCharacterTokens(tokens[tokenType][0])
124126
return tokens["expected"] == tokens["received"]
125127

126128

@@ -174,7 +176,7 @@ def runTokenizerTest(test):
174176
warnings.resetwarnings()
175177
warnings.simplefilter("error")
176178

177-
expected = concatenateCharacterTokens(test['output'])
179+
expected = test['output']
178180
if 'lastStartTag' not in test:
179181
test['lastStartTag'] = None
180182
parser = TokenizerTestParser(test['initialState'],

0 commit comments

Comments
 (0)