diff --git a/hgdemandimport/py3tokenize.py b/hgdemandimport/py3tokenize.py --- a/hgdemandimport/py3tokenize.py +++ b/hgdemandimport/py3tokenize.py @@ -62,6 +62,7 @@ # * Adjusted for relative imports. # * absolute_import added. # * Removed re.ASCII. +# * Various backports to work on Python 2.7. from __future__ import absolute_import @@ -256,7 +257,7 @@ class StopTokenizing(Exception): pass -class Untokenizer: +class Untokenizer(object): def __init__(self): self.tokens = [] @@ -503,11 +504,22 @@ """ # This import is here to avoid problems when the itertools module is not # built yet and tokenize is imported. - from itertools import chain, repeat + from itertools import repeat encoding, consumed = detect_encoding(readline) - rl_gen = iter(readline, b"") - empty = repeat(b"") - return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) + + def lines(): + for line in consumed: + yield line + + while True: + try: + yield readline() + except StopIteration: + break + + yield repeat(b'') + + return _tokenize(lines(), encoding) def _tokenize(readline, encoding): @@ -531,7 +543,7 @@ # hence `line` itself will always be overwritten at the end # of this loop. last_line = line - line = readline() + line = next(readline) except StopIteration: line = b''