SILENT KILLERPanel

Current Path: > > opt > alt > python35 > lib64 > python3.5 > > >


Operation   : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64
Software     : Apache
Server IP    : 162.0.232.56 | Your IP: 216.73.216.111
Domains      : 1034 Domain(s)
Permission   : [ 0755 ]

Files and Folders in: //opt/alt/python35/lib64/python3.5///

NameTypeSizeLast ModifiedActions
__pycache__ Directory - -
asyncio Directory - -
collections Directory - -
concurrent Directory - -
config-3.5m Directory - -
ctypes Directory - -
curses Directory - -
dbm Directory - -
distutils Directory - -
email Directory - -
encodings Directory - -
ensurepip Directory - -
html Directory - -
http Directory - -
idlelib Directory - -
importlib Directory - -
json Directory - -
lib-dynload Directory - -
lib2to3 Directory - -
logging Directory - -
multiprocessing Directory - -
plat-linux Directory - -
pydoc_data Directory - -
site-packages Directory - -
sqlite3 Directory - -
test Directory - -
unittest Directory - -
urllib Directory - -
venv Directory - -
wsgiref Directory - -
xml Directory - -
xmlrpc Directory - -
__future__.py File 4841 bytes May 31 2024 13:51:46.
__phello__.foo.py File 64 bytes May 31 2024 13:51:44.
_bootlocale.py File 1301 bytes May 31 2024 13:51:40.
_collections_abc.py File 24794 bytes May 31 2024 13:51:40.
_compat_pickle.py File 8556 bytes May 31 2024 13:51:43.
_compression.py File 5340 bytes May 31 2024 13:51:41.
_dummy_thread.py File 5118 bytes May 31 2024 13:51:44.
_markupbase.py File 14598 bytes May 31 2024 13:51:40.
_osx_support.py File 19115 bytes May 31 2024 13:51:44.
_pydecimal.py File 228628 bytes May 31 2024 13:51:40.
_pyio.py File 87968 bytes May 31 2024 13:51:41.
_sitebuiltins.py File 3115 bytes May 31 2024 13:51:41.
_strptime.py File 22185 bytes May 31 2024 13:51:45.
_sysconfigdata.py File 26951 bytes May 31 2024 13:51:44.
_threading_local.py File 7410 bytes May 31 2024 13:51:40.
_weakrefset.py File 5705 bytes May 31 2024 13:51:40.
abc.py File 8628 bytes May 31 2024 13:51:40.
aifc.py File 31967 bytes May 31 2024 13:51:45.
antigravity.py File 476 bytes May 31 2024 13:51:40.
argparse.py File 90138 bytes May 31 2024 13:51:44.
ast.py File 12001 bytes May 31 2024 13:51:44.
asynchat.py File 11971 bytes May 31 2024 13:51:44.
asyncore.py File 20104 bytes May 31 2024 13:51:46.
base64.py File 20522 bytes May 31 2024 13:51:40.
bdb.py File 23354 bytes May 31 2024 13:51:43.
binhex.py File 13954 bytes May 31 2024 13:51:40.
bisect.py File 2595 bytes May 31 2024 13:51:40.
bz2.py File 12424 bytes May 31 2024 13:51:44.
cProfile.py File 5324 bytes May 31 2024 13:51:40.
calendar.py File 22998 bytes May 31 2024 13:51:44.
cgi.py File 36046 bytes May 31 2024 13:51:44.
cgitb.py File 12023 bytes May 31 2024 13:51:46.
chunk.py File 5425 bytes May 31 2024 13:51:41.
cmd.py File 14860 bytes May 31 2024 13:51:40.
code.py File 10118 bytes May 31 2024 13:51:40.
codecs.py File 36231 bytes May 31 2024 13:51:40.
codeop.py File 5994 bytes May 31 2024 13:51:40.
colorsys.py File 4064 bytes May 31 2024 13:51:40.
compileall.py File 11711 bytes May 31 2024 13:51:40.
configparser.py File 53452 bytes May 31 2024 13:51:40.
contextlib.py File 12451 bytes May 31 2024 13:51:40.
copy.py File 8946 bytes May 31 2024 13:51:40.
copyreg.py File 6833 bytes May 31 2024 13:51:44.
crypt.py File 1879 bytes May 31 2024 13:51:40.
csv.py File 16128 bytes May 31 2024 13:51:40.
datetime.py File 75899 bytes May 31 2024 13:51:46.
decimal.py File 320 bytes May 31 2024 13:51:44.
difflib.py File 84204 bytes May 31 2024 13:51:41.
dis.py File 17350 bytes May 31 2024 13:51:40.
doctest.py File 104036 bytes May 31 2024 13:51:40.
dummy_threading.py File 2815 bytes May 31 2024 13:51:40.
enum.py File 22226 bytes May 31 2024 13:51:40.
filecmp.py File 9830 bytes May 31 2024 13:51:40.
fileinput.py File 14259 bytes May 31 2024 13:51:41.
fnmatch.py File 3163 bytes May 31 2024 13:51:40.
formatter.py File 15143 bytes May 31 2024 13:51:40.
fractions.py File 24612 bytes May 31 2024 13:51:40.
ftplib.py File 34951 bytes May 31 2024 13:51:40.
functools.py File 28944 bytes May 31 2024 13:51:46.
genericpath.py File 4364 bytes May 31 2024 13:51:46.
getopt.py File 7489 bytes May 31 2024 13:51:44.
getpass.py File 5994 bytes May 31 2024 13:51:40.
gettext.py File 21530 bytes May 31 2024 13:51:44.
glob.py File 5072 bytes May 31 2024 13:51:40.
gzip.py File 20260 bytes May 31 2024 13:51:44.
hashlib.py File 7979 bytes May 31 2024 13:51:46.
heapq.py File 22929 bytes May 31 2024 13:51:40.
hmac.py File 5063 bytes May 31 2024 13:51:41.
imaplib.py File 52183 bytes May 31 2024 13:51:44.
imghdr.py File 3758 bytes May 31 2024 13:51:44.
imp.py File 10676 bytes May 31 2024 13:51:40.
inspect.py File 114199 bytes May 31 2024 13:51:43.
io.py File 3396 bytes May 31 2024 13:51:40.
ipaddress.py File 75733 bytes May 31 2024 13:51:44.
keyword.py File 2222 bytes May 31 2024 13:51:44.
linecache.py File 5312 bytes May 31 2024 13:51:40.
locale.py File 74713 bytes May 31 2024 13:51:44.
lzma.py File 12925 bytes May 31 2024 13:51:45.
macpath.py File 5907 bytes May 31 2024 13:51:40.
macurl2path.py File 2732 bytes May 31 2024 13:51:40.
mailbox.py File 78418 bytes May 31 2024 13:51:43.
mailcap.py File 8104 bytes May 31 2024 13:51:40.
mimetypes.py File 20847 bytes May 31 2024 13:51:44.
modulefinder.py File 23085 bytes May 31 2024 13:51:40.
netrc.py File 5748 bytes May 31 2024 13:51:41.
nntplib.py File 43078 bytes May 31 2024 13:51:40.
ntpath.py File 22793 bytes May 31 2024 13:51:40.
nturl2path.py File 2444 bytes May 31 2024 13:51:44.
numbers.py File 10243 bytes May 31 2024 13:51:45.
opcode.py File 5885 bytes May 31 2024 13:51:46.
operator.py File 10863 bytes May 31 2024 13:51:44.
optparse.py File 60344 bytes May 31 2024 13:51:44.
os.py File 37033 bytes May 31 2024 13:51:40.
pathlib.py File 47001 bytes May 31 2024 13:51:44.
pdb.py File 61149 bytes May 31 2024 13:51:40.
pickle.py File 56176 bytes May 31 2024 13:51:41.
pickletools.py File 91761 bytes May 31 2024 13:51:40.
pipes.py File 8916 bytes May 31 2024 13:51:44.
pkgutil.py File 21355 bytes May 31 2024 13:51:40.
platform.py File 46147 bytes May 31 2024 13:51:40.
plistlib.py File 31810 bytes May 31 2024 13:51:40.
poplib.py File 14717 bytes May 31 2024 13:51:40.
posixpath.py File 14911 bytes May 31 2024 13:51:40.
pprint.py File 20860 bytes May 31 2024 13:51:40.
profile.py File 22032 bytes May 31 2024 13:51:41.
pstats.py File 26564 bytes May 31 2024 13:51:40.
pty.py File 4763 bytes May 31 2024 13:51:40.
py_compile.py File 7181 bytes May 31 2024 13:51:44.
pyclbr.py File 13564 bytes May 31 2024 13:51:39.
pydoc.py File 103652 bytes May 31 2024 13:51:40.
queue.py File 8780 bytes May 31 2024 13:51:44.
quopri.py File 7265 bytes May 31 2024 13:51:44.
random.py File 26463 bytes May 31 2024 13:51:40.
re.py File 15501 bytes May 31 2024 13:51:40.
reprlib.py File 5336 bytes May 31 2024 13:51:40.
rlcompleter.py File 6307 bytes May 31 2024 13:51:46.
runpy.py File 11959 bytes May 31 2024 13:51:40.
sched.py File 6216 bytes May 31 2024 13:51:44.
selectors.py File 19438 bytes May 31 2024 13:51:40.
shelve.py File 8528 bytes May 31 2024 13:51:44.
shlex.py File 11448 bytes May 31 2024 13:51:46.
shutil.py File 40048 bytes May 31 2024 13:51:44.
signal.py File 2123 bytes May 31 2024 13:51:44.
site.py File 21509 bytes May 31 2024 13:51:43.
smtpd.py File 35373 bytes May 31 2024 13:51:40.
smtplib.py File 43635 bytes May 31 2024 13:51:40.
sndhdr.py File 6418 bytes May 31 2024 13:51:44.
socket.py File 27135 bytes May 31 2024 13:51:45.
socketserver.py File 24666 bytes May 31 2024 13:51:46.
sre_compile.py File 18410 bytes May 31 2024 13:51:40.
sre_constants.py File 6821 bytes May 31 2024 13:51:40.
sre_parse.py File 35117 bytes May 31 2024 13:51:40.
ssl.py File 42352 bytes May 31 2024 13:51:44.
stat.py File 5038 bytes May 31 2024 13:51:44.
statistics.py File 19533 bytes May 31 2024 13:51:40.
string.py File 11854 bytes May 31 2024 13:51:44.
stringprep.py File 12917 bytes May 31 2024 13:51:41.
struct.py File 257 bytes May 31 2024 13:51:40.
subprocess.py File 58912 bytes May 31 2024 13:51:40.
sunau.py File 18095 bytes May 31 2024 13:51:40.
symbol.py File 2106 bytes May 31 2024 13:51:40.
symtable.py File 7191 bytes May 31 2024 13:51:44.
sysconfig.py File 24462 bytes May 31 2024 13:51:44.
tabnanny.py File 11414 bytes May 31 2024 13:51:44.
tarfile.py File 93070 bytes May 31 2024 13:51:40.
telnetlib.py File 23016 bytes May 31 2024 13:51:40.
tempfile.py File 26636 bytes May 31 2024 13:51:40.
textwrap.py File 19558 bytes May 31 2024 13:51:40.
this.py File 1003 bytes May 31 2024 13:51:41.
threading.py File 49027 bytes May 31 2024 13:51:44.
timeit.py File 12403 bytes May 31 2024 13:51:40.
token.py File 3075 bytes May 31 2024 13:51:40.
tokenize.py File 27790 bytes May 31 2024 13:51:44.
trace.py File 31553 bytes May 31 2024 13:51:40.
traceback.py File 22175 bytes May 31 2024 13:51:44.
tracemalloc.py File 15641 bytes May 31 2024 13:51:44.
tty.py File 879 bytes May 31 2024 13:51:40.
types.py File 8799 bytes May 31 2024 13:51:40.
typing.py File 80135 bytes May 31 2024 13:51:44.
uu.py File 6766 bytes May 31 2024 13:51:40.
uuid.py File 23261 bytes May 31 2024 13:51:40.
warnings.py File 15850 bytes May 31 2024 13:51:40.
wave.py File 17682 bytes May 31 2024 13:51:40.
weakref.py File 20466 bytes May 31 2024 13:51:44.
webbrowser.py File 21828 bytes May 31 2024 13:51:44.
xdrlib.py File 5913 bytes May 31 2024 13:51:46.
zipapp.py File 7157 bytes May 31 2024 13:51:40.
zipfile.py File 73672 bytes May 31 2024 13:51:46.

Reading File: //opt/alt/python35/lib64/python3.5////tokenize.py

"""Tokenization help for Python programs.

tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens.  It decodes the bytes according to PEP-0263 for
determining source file encoding.

It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF).  It generates 5-tuples with these
members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators.  Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""

__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
               'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
               'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import re
import sys
from token import *

cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)

import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
                           "NL", "untokenize", "ENCODING", "TokenInfo"]
del token

COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
    '(':   LPAR,
    ')':   RPAR,
    '[':   LSQB,
    ']':   RSQB,
    ':':   COLON,
    ',':   COMMA,
    ';':   SEMI,
    '+':   PLUS,
    '-':   MINUS,
    '*':   STAR,
    '/':   SLASH,
    '|':   VBAR,
    '&':   AMPER,
    '<':   LESS,
    '>':   GREATER,
    '=':   EQUAL,
    '.':   DOT,
    '%':   PERCENT,
    '{':   LBRACE,
    '}':   RBRACE,
    '==':  EQEQUAL,
    '!=':  NOTEQUAL,
    '<=':  LESSEQUAL,
    '>=':  GREATEREQUAL,
    '~':   TILDE,
    '^':   CIRCUMFLEX,
    '<<':  LEFTSHIFT,
    '>>':  RIGHTSHIFT,
    '**':  DOUBLESTAR,
    '+=':  PLUSEQUAL,
    '-=':  MINEQUAL,
    '*=':  STAREQUAL,
    '/=':  SLASHEQUAL,
    '%=':  PERCENTEQUAL,
    '&=':  AMPEREQUAL,
    '|=':  VBAREQUAL,
    '^=': CIRCUMFLEXEQUAL,
    '<<=': LEFTSHIFTEQUAL,
    '>>=': RIGHTSHIFTEQUAL,
    '**=': DOUBLESTAREQUAL,
    '//':  DOUBLESLASH,
    '//=': DOUBLESLASHEQUAL,
    '@':   AT,
    '@=':  ATEQUAL,
}

class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
    def __repr__(self):
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type))

    @property
    def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type

def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'

# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'

Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)

StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'

# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')

# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
                 r"//=?", r"->",
                 r"[+\-*/%&@|^=<>]=?",
                 r"~")

Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)

PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken

# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
                group("'", r'\\\r?\n'),
                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
                group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)

def _compile(expr):
    return re.compile(expr, re.UNICODE)

endpats = {"'": Single, '"': Double,
           "'''": Single3, '"""': Double3,
           "r'''": Single3, 'r"""': Double3,
           "b'''": Single3, 'b"""': Double3,
           "R'''": Single3, 'R"""': Double3,
           "B'''": Single3, 'B"""': Double3,
           "br'''": Single3, 'br"""': Double3,
           "bR'''": Single3, 'bR"""': Double3,
           "Br'''": Single3, 'Br"""': Double3,
           "BR'''": Single3, 'BR"""': Double3,
           "rb'''": Single3, 'rb"""': Double3,
           "Rb'''": Single3, 'Rb"""': Double3,
           "rB'''": Single3, 'rB"""': Double3,
           "RB'''": Single3, 'RB"""': Double3,
           "u'''": Single3, 'u"""': Double3,
           "U'''": Single3, 'U"""': Double3,
           'r': None, 'R': None, 'b': None, 'B': None,
           'u': None, 'U': None}

triple_quoted = {}
for t in ("'''", '"""',
          "r'''", 'r"""', "R'''", 'R"""',
          "b'''", 'b"""', "B'''", 'B"""',
          "br'''", 'br"""', "Br'''", 'Br"""',
          "bR'''", 'bR"""', "BR'''", 'BR"""',
          "rb'''", 'rb"""', "rB'''", 'rB"""',
          "Rb'''", 'Rb"""', "RB'''", 'RB"""',
          "u'''", 'u"""', "U'''", 'U"""',
          ):
    triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
          "r'", 'r"', "R'", 'R"',
          "b'", 'b"', "B'", 'B"',
          "br'", 'br"', "Br'", 'Br"',
          "bR'", 'bR"', "BR'", 'BR"' ,
          "rb'", 'rb"', "rB'", 'rB"',
          "Rb'", 'Rb"', "RB'", 'RB"' ,
          "u'", 'u"', "U'", 'U"',
          ):
    single_quoted[t] = t

tabsize = 8

class TokenError(Exception): pass

class StopTokenizing(Exception): pass


class Untokenizer:

    def __init__(self):
        self.tokens = []
        self.prev_row = 1
        self.prev_col = 0
        self.encoding = None

    def add_whitespace(self, start):
        row, col = start
        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
            raise ValueError("start ({},{}) precedes previous end ({},{})"
                             .format(row, col, self.prev_row, self.prev_col))
        row_offset = row - self.prev_row
        if row_offset:
            self.tokens.append("\\\n" * row_offset)
            self.prev_col = 0
        col_offset = col - self.prev_col
        if col_offset:
            self.tokens.append(" " * col_offset)

    def untokenize(self, iterable):
        it = iter(iterable)
        indents = []
        startline = False
        for t in it:
            if len(t) == 2:
                self.compat(t, it)
                break
            tok_type, token, start, end, line = t
            if tok_type == ENCODING:
                self.encoding = token
                continue
            if tok_type == ENDMARKER:
                break
            if tok_type == INDENT:
                indents.append(token)
                continue
            elif tok_type == DEDENT:
                indents.pop()
                self.prev_row, self.prev_col = end
                continue
            elif tok_type in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                indent = indents[-1]
                if start[1] >= len(indent):
                    self.tokens.append(indent)
                    self.prev_col = len(indent)
                startline = False
            self.add_whitespace(start)
            self.tokens.append(token)
            self.prev_row, self.prev_col = end
            if tok_type in (NEWLINE, NL):
                self.prev_row += 1
                self.prev_col = 0
        return "".join(self.tokens)

    def compat(self, token, iterable):
        indents = []
        toks_append = self.tokens.append
        startline = token[0] in (NEWLINE, NL)
        prevstring = False

        for tok in chain([token], iterable):
            toknum, tokval = tok[:2]
            if toknum == ENCODING:
                self.encoding = tokval
                continue

            if toknum in (NAME, NUMBER, ASYNC, AWAIT):
                tokval += ' '

            # Insert a space between two consecutive strings
            if toknum == STRING:
                if prevstring:
                    tokval = ' ' + tokval
                prevstring = True
            else:
                prevstring = False

            if toknum == INDENT:
                indents.append(tokval)
                continue
            elif toknum == DEDENT:
                indents.pop()
                continue
            elif toknum in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                toks_append(indents[-1])
                startline = False
            toks_append(tokval)


def untokenize(iterable):
    """Transform tokens back into Python source code.
    It returns a bytes object, encoded using the ENCODING
    token, which is the first token sequence output by tokenize.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited input:
        # Output bytes will tokenize back to the input
        t1 = [tok[:2] for tok in tokenize(f.readline)]
        newcode = untokenize(t1)
        readline = BytesIO(newcode).readline
        t2 = [tok[:2] for tok in tokenize(readline)]
        assert t1 == t2
    """
    ut = Untokenizer()
    out = ut.untokenize(iterable)
    if ut.encoding is not None:
        out = out.encode(ut.encoding)
    return out


def _get_normal_name(orig_enc):
    """Imitates get_normal_name in tokenizer.c."""
    # Only care about the first 12 characters.
    enc = orig_enc[:12].lower().replace("_", "-")
    if enc == "utf-8" or enc.startswith("utf-8-"):
        return "utf-8"
    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
        return "iso-8859-1"
    return orig_enc

def detect_encoding(readline):
    """
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file.  It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263.  If both a bom and a cookie are present,
    but disagree, a SyntaxError will be raised.  If the encoding cookie is an
    invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    """
    try:
        filename = readline.__self__.name
    except AttributeError:
        filename = None
    bom_found = False
    encoding = None
    default = 'utf-8'
    def read_or_stop():
        try:
            return readline()
        except StopIteration:
            return b''

    def find_cookie(line):
        try:
            # Decode as UTF-8. Either the line is an encoding declaration,
            # in which case it should be pure ASCII, or it must be UTF-8
            # per default encoding.
            line_string = line.decode('utf-8')
        except UnicodeDecodeError:
            msg = "invalid or missing encoding declaration"
            if filename is not None:
                msg = '{} for {!r}'.format(msg, filename)
            raise SyntaxError(msg)

        match = cookie_re.match(line_string)
        if not match:
            return None
        encoding = _get_normal_name(match.group(1))
        try:
            codec = lookup(encoding)
        except LookupError:
            # This behaviour mimics the Python interpreter
            if filename is None:
                msg = "unknown encoding: " + encoding
            else:
                msg = "unknown encoding for {!r}: {}".format(filename,
                        encoding)
            raise SyntaxError(msg)

        if bom_found:
            if encoding != 'utf-8':
                # This behaviour mimics the Python interpreter
                if filename is None:
                    msg = 'encoding problem: utf-8'
                else:
                    msg = 'encoding problem for {!r}: utf-8'.format(filename)
                raise SyntaxError(msg)
            encoding += '-sig'
        return encoding

    first = read_or_stop()
    if first.startswith(BOM_UTF8):
        bom_found = True
        first = first[3:]
        default = 'utf-8-sig'
    if not first:
        return default, []

    encoding = find_cookie(first)
    if encoding:
        return encoding, [first]
    if not blank_re.match(first):
        return default, [first]

    second = read_or_stop()
    if not second:
        return default, [first]

    encoding = find_cookie(second)
    if encoding:
        return encoding, [first, second]

    return default, [first, second]


def open(filename):
    """Open a file in read only mode using the encoding detected by
    detect_encoding().
    """
    buffer = _builtin_open(filename, 'rb')
    try:
        encoding, lines = detect_encoding(buffer.readline)
        buffer.seek(0)
        text = TextIOWrapper(buffer, encoding, line_buffering=True)
        text.mode = 'r'
        return text
    except:
        buffer.close()
        raise


def tokenize(readline):
    """
    The tokenize() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects.  Each call to the function
    should return one line of input as bytes.  Alternatively, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile, 'rb').__next__  # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found.  The line passed is the
    logical line; continuation lines are included.

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
    """
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
    from itertools import chain, repeat
    encoding, consumed = detect_encoding(readline)
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)


def _tokenize(readline, encoding):
    lnum = parenlev = continued = 0
    numchars = '0123456789'
    contstr, needcont = '', 0
    contline = None
    indents = [0]

    # 'stashed' and 'async_*' are used for async/await parsing
    stashed = None
    async_def = False
    async_def_indent = 0
    async_def_nl = False

    if encoding is not None:
        if encoding == "utf-8-sig":
            # BOM will already have been stripped.
            encoding = "utf-8"
        yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
    while True:             # loop over lines in stream
        try:
            line = readline()
        except StopIteration:
            line = b''

        if encoding is not None:
            line = line.decode(encoding)
        lnum += 1
        pos, max = 0, len(line)

        if contstr:                            # continued string
            if not line:
                raise TokenError("EOF in multi-line string", strstart)
            endmatch = endprog.match(line)
            if endmatch:
                pos = end = endmatch.end(0)
                yield TokenInfo(STRING, contstr + line[:end],
                       strstart, (lnum, end), contline + line)
                contstr, needcont = '', 0
                contline = None
            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
                yield TokenInfo(ERRORTOKEN, contstr + line,
                           strstart, (lnum, len(line)), contline)
                contstr = ''
                contline = None
                continue
            else:
                contstr = contstr + line
                contline = contline + line
                continue

        elif parenlev == 0 and not continued:  # new statement
            if not line: break
            column = 0
            while pos < max:                   # measure leading whitespace
                if line[pos] == ' ':
                    column += 1
                elif line[pos] == '\t':
                    column = (column//tabsize + 1)*tabsize
                elif line[pos] == '\f':
                    column = 0
                else:
                    break
                pos += 1
            if pos == max:
                break

            if line[pos] in '#\r\n':           # skip comments or blank lines
                if line[pos] == '#':
                    comment_token = line[pos:].rstrip('\r\n')
                    nl_pos = pos + len(comment_token)
                    yield TokenInfo(COMMENT, comment_token,
                           (lnum, pos), (lnum, pos + len(comment_token)), line)
                    yield TokenInfo(NL, line[nl_pos:],
                           (lnum, nl_pos), (lnum, len(line)), line)
                else:
                    yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
                           (lnum, pos), (lnum, len(line)), line)
                continue

            if column > indents[-1]:           # count indents or dedents
                indents.append(column)
                yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
            while column < indents[-1]:
                if column not in indents:
                    raise IndentationError(
                        "unindent does not match any outer indentation level",
                        ("<tokenize>", lnum, pos, line))
                indents = indents[:-1]

                if async_def and async_def_indent >= indents[-1]:
                    async_def = False
                    async_def_nl = False
                    async_def_indent = 0

                yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)

            if async_def and async_def_nl and async_def_indent >= indents[-1]:
                async_def = False
                async_def_nl = False
                async_def_indent = 0

        else:                                  # continued statement
            if not line:
                raise TokenError("EOF in multi-line statement", (lnum, 0))
            continued = 0

        while pos < max:
            pseudomatch = _compile(PseudoToken).match(line, pos)
            if pseudomatch:                                # scan for tokens
                start, end = pseudomatch.span(1)
                spos, epos, pos = (lnum, start), (lnum, end), end
                if start == end:
                    continue
                token, initial = line[start:end], line[start]

                if (initial in numchars or                  # ordinary number
                    (initial == '.' and token != '.' and token != '...')):
                    yield TokenInfo(NUMBER, token, spos, epos, line)
                elif initial in '\r\n':
                    if stashed:
                        yield stashed
                        stashed = None
                    if parenlev > 0:
                        yield TokenInfo(NL, token, spos, epos, line)
                    else:
                        yield TokenInfo(NEWLINE, token, spos, epos, line)
                        if async_def:
                            async_def_nl = True

                elif initial == '#':
                    assert not token.endswith("\n")
                    if stashed:
                        yield stashed
                        stashed = None
                    yield TokenInfo(COMMENT, token, spos, epos, line)
                elif token in triple_quoted:
                    endprog = _compile(endpats[token])
                    endmatch = endprog.match(line, pos)
                    if endmatch:                           # all on one line
                        pos = endmatch.end(0)
                        token = line[start:pos]
                        yield TokenInfo(STRING, token, spos, (lnum, pos), line)
                    else:
                        strstart = (lnum, start)           # multiple lines
                        contstr = line[start:]
                        contline = line
                        break
                elif initial in single_quoted or \
                    token[:2] in single_quoted or \
                    token[:3] in single_quoted:
                    if token[-1] == '\n':                  # continued string
                        strstart = (lnum, start)
                        endprog = _compile(endpats[initial] or
                                           endpats[token[1]] or
                                           endpats[token[2]])
                        contstr, needcont = line[start:], 1
                        contline = line
                        break
                    else:                                  # ordinary string
                        yield TokenInfo(STRING, token, spos, epos, line)
                elif initial.isidentifier():               # ordinary name
                    if token in ('async', 'await'):
                        if async_def:
                            yield TokenInfo(
                                ASYNC if token == 'async' else AWAIT,
                                token, spos, epos, line)
                            continue

                    tok = TokenInfo(NAME, token, spos, epos, line)
                    if token == 'async' and not stashed:
                        stashed = tok
                        continue

                    if token == 'def':
                        if (stashed
                                and stashed.type == NAME
                                and stashed.string == 'async'):

                            async_def = True
                            async_def_indent = indents[-1]

                            yield TokenInfo(ASYNC, stashed.string,
                                            stashed.start, stashed.end,
                                            stashed.line)
                            stashed = None

                    if stashed:
                        yield stashed
                        stashed = None

                    yield tok
                elif initial == '\\':                      # continued stmt
                    continued = 1
                else:
                    if initial in '([{':
                        parenlev += 1
                    elif initial in ')]}':
                        parenlev -= 1
                    if stashed:
                        yield stashed
                        stashed = None
                    yield TokenInfo(OP, token, spos, epos, line)
            else:
                yield TokenInfo(ERRORTOKEN, line[pos],
                           (lnum, pos), (lnum, pos+1), line)
                pos += 1

    if stashed:
        yield stashed
        stashed = None

    for indent in indents[1:]:                 # pop remaining indent levels
        yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
    yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')


# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
    return _tokenize(readline, None)

def main():
    import argparse

    # Helper error handling routines
    def perror(message):
        print(message, file=sys.stderr)

    def error(message, filename=None, location=None):
        if location:
            args = (filename,) + location + (message,)
            perror("%s:%d:%d: error: %s" % args)
        elif filename:
            perror("%s: error: %s" % (filename, message))
        else:
            perror("error: %s" % message)
        sys.exit(1)

    # Parse the arguments and options
    parser = argparse.ArgumentParser(prog='python -m tokenize')
    parser.add_argument(dest='filename', nargs='?',
                        metavar='filename.py',
                        help='the file to tokenize; defaults to stdin')
    parser.add_argument('-e', '--exact', dest='exact', action='store_true',
                        help='display token names using the exact type')
    args = parser.parse_args()

    try:
        # Tokenize the input
        if args.filename:
            filename = args.filename
            with _builtin_open(filename, 'rb') as f:
                tokens = list(tokenize(f.readline))
        else:
            filename = "<stdin>"
            tokens = _tokenize(sys.stdin.readline, None)

        # Output the tokenization
        for token in tokens:
            token_type = token.type
            if args.exact:
                token_type = token.exact_type
            token_range = "%d,%d-%d,%d:" % (token.start + token.end)
            print("%-20s%-15s%-15r" %
                  (token_range, tok_name[token_type], token.string))
    except IndentationError as err:
        line, column = err.args[1][1:3]
        error(err.args[0], filename, (line, column))
    except TokenError as err:
        line, column = err.args[1]
        error(err.args[0], filename, (line, column))
    except SyntaxError as err:
        error(err, filename)
    except OSError as err:
        error(err)
    except KeyboardInterrupt:
        print("interrupted\n")
    except Exception as err:
        perror("unexpected error: %s" % err)
        raise

if __name__ == "__main__":
    main()

SILENT KILLER Tool