8000 Revert breakage of test_tokenizer.py from accidential checkin of loca… · html5lib/html5lib-python@ae0e9ea · GitHub
[go: up one dir, main page]

Skip to content
  • Commit ae0e9ea

    Browse files
    committed
    Revert breakage of test_tokenizer.py from accidential checkin of local modifications
    --HG-- extra : convert_revision : svn%3Aacbfec75-9323-0410-a652-858a13e371e0/trunk%40889
    1 parent 16c86c2 commit ae0e9ea

    File tree

    1 file changed

    +8
    -7
    lines changed

    1 file changed

    +8
    -7
    lines changed

    tests/test_tokenizer.py

    Lines changed: 8 additions & 7 deletions
    Original file line numberDiff line numberDiff line change
    @@ -37,6 +37,8 @@ def processEmptyTag(self, token):
    3737
    self.outputTokens.append([u"StartTag", token["name"], token["data"]])
    3838

    3939
    def processEndTag(self, token):
    40+
    if token["data"]:
    41+
    self.processParseError(None)
    4042
    self.outputTokens.append([u"EndTag", token["name"]])
    4143

    4244
    def processComment(self, token):
    @@ -53,7 +55,7 @@ def processEOF(self, token):
    5355
    pass
    5456

    5557
    def processParseError(self, token):
    56-
    self.outputTokens.append([u"ParseError", token["data"]])
    58+
    self.outputTokens.append(u"ParseError")
    5759

    5860
    def concatenateCharacterTokens(tokens):
    5961< E748 code class="diff-text syntax-highlighted-line">
    outputTokens = []
    @@ -71,10 +73,9 @@ def concatenateCharacterTokens(tokens):
    7173
    def normalizeTokens(tokens):
    7274
    """ convert array of attributes to a dictionary """
    7375
    # TODO: convert tests to reflect arrays
    74-
    for i, token in enumerate(tokens):
    75-
    if token[0] == u'ParseError':
    76-
    tokens[i] = token[0]
    77-
    #token[2] = dict(token[2][::-1])
    76+
    for token in tokens:
    77+
    if token[0] == 'StartTag':
    78+
    token[2] = dict(token[2][::-1])
    7879
    return tokens
    7980

    8081
    def tokensMatch(expectedTokens, recievedTokens):
    @@ -101,14 +102,14 @@ def runTokenizerTest(self, test):
    101102
    test['lastStartTag'] = None
    102103
    parser = TokenizerTestParser(test['contentModelFlag'],
    103104
    test['lastStartTag'])
    104-
    tokens = parser.parse(test['input'])
    105+
    106+
    tokens = normalizeTokens(parser.parse(test['input']))
    105107
    tokens = concatenateCharacterTokens(tokens)
    106108
    errorMsg = "\n".join(["\n\nContent Model Flag:",
    107109
    test['contentModelFlag'] ,
    108110
    "\nInput:", str(test['input']),
    109111
    "\nExpected:", str(output),
    110112
    "\nRecieved:", str(tokens)])
    111-
    tokens = normalizeTokens(tokens)
    112113
    self.assertEquals(tokensMatch(tokens, output), True, errorMsg)
    113114

    114115
    def buildTestSuite():

    0 commit comments

    Comments
     (0)
    0