SILENT KILLERPanel

Current Path: > > opt > alt > python36 > > lib64 > > python3.6


Operation   : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64
Software     : Apache
Server IP    : 162.0.232.56 | Your IP: 216.73.216.111
Domains      : 1034 Domain(s)
Permission   : [ 0755 ]

Files and Folders in: //opt/alt/python36//lib64//python3.6

NameTypeSizeLast ModifiedActions
__pycache__ Directory - -
asyncio Directory - -
collections Directory - -
concurrent Directory - -
config-3.6m Directory - -
ctypes Directory - -
curses Directory - -
dbm Directory - -
distutils Directory - -
email Directory - -
encodings Directory - -
ensurepip Directory - -
html Directory - -
http Directory - -
idlelib Directory - -
importlib Directory - -
json Directory - -
lib-dynload Directory - -
lib2to3 Directory - -
logging Directory - -
multiprocessing Directory - -
pydoc_data Directory - -
site-packages Directory - -
sqlite3 Directory - -
test Directory - -
unittest Directory - -
urllib Directory - -
venv Directory - -
wsgiref Directory - -
xml Directory - -
xmlrpc Directory - -
__future__.py File 4841 bytes April 17 2024 17:19:39.
__phello__.foo.py File 64 bytes April 17 2024 17:19:38.
_bootlocale.py File 1301 bytes April 17 2024 17:19:35.
_collections_abc.py File 26392 bytes April 17 2024 17:19:35.
_compat_pickle.py File 8749 bytes April 17 2024 17:19:38.
_compression.py File 5340 bytes April 17 2024 17:19:35.
_dummy_thread.py File 5118 bytes April 17 2024 17:19:38.
_markupbase.py File 14598 bytes April 17 2024 17:19:34.
_osx_support.py File 19138 bytes April 17 2024 17:19:38.
_pydecimal.py File 230228 bytes April 17 2024 17:19:34.
_pyio.py File 88097 bytes April 17 2024 17:19:35.
_sitebuiltins.py File 3115 bytes April 17 2024 17:19:35.
_strptime.py File 24747 bytes April 17 2024 17:19:39.
_sysconfigdata_dm_linux_x86_64-linux-gnu.py File 27823 bytes April 17 2024 17:19:34.
_sysconfigdata_m_linux_x86_64-linux-gnu.py File 26288 bytes April 17 2024 17:19:39.
_threading_local.py File 7214 bytes April 17 2024 17:19:34.
_weakrefset.py File 5705 bytes April 17 2024 17:19:34.
abc.py File 8727 bytes April 17 2024 17:19:34.
aifc.py File 32454 bytes April 17 2024 17:19:39.
antigravity.py File 477 bytes April 17 2024 17:19:35.
argparse.py File 90372 bytes April 17 2024 17:19:38.
ast.py File 12166 bytes April 17 2024 17:19:38.
asynchat.py File 11328 bytes April 17 2024 17:19:38.
asyncore.py File 20159 bytes April 17 2024 17:19:39.
base64.py File 20391 bytes April 17 2024 17:19:35.
bdb.py File 23556 bytes April 17 2024 17:19:38.
binhex.py File 13954 bytes April 17 2024 17:19:34.
bisect.py File 2595 bytes April 17 2024 17:19:34.
bz2.py File 12478 bytes April 17 2024 17:19:38.
cProfile.py File 5383 bytes April 17 2024 17:19:34.
calendar.py File 23213 bytes April 17 2024 17:19:38.
cgi.py File 37074 bytes April 17 2024 17:19:38.
cgitb.py File 12018 bytes April 17 2024 17:19:39.
chunk.py File 5425 bytes April 17 2024 17:19:35.
cmd.py File 14860 bytes April 17 2024 17:19:34.
code.py File 10614 bytes April 17 2024 17:19:35.
codecs.py File 36276 bytes April 17 2024 17:19:34.
codeop.py File 5994 bytes April 17 2024 17:19:34.
colorsys.py File 4064 bytes April 17 2024 17:19:34.
compileall.py File 12125 bytes April 17 2024 17:19:34.
configparser.py File 53592 bytes April 17 2024 17:19:34.
contextlib.py File 13162 bytes April 17 2024 17:19:34.
copy.py File 8815 bytes April 17 2024 17:19:34.
copyreg.py File 7007 bytes April 17 2024 17:19:38.
crypt.py File 1864 bytes April 17 2024 17:19:34.
csv.py File 16180 bytes April 17 2024 17:19:35.
datetime.py File 82034 bytes April 17 2024 17:19:39.
decimal.py File 320 bytes April 17 2024 17:19:38.
difflib.py File 84377 bytes April 17 2024 17:19:35.
dis.py File 18132 bytes April 17 2024 17:19:34.
doctest.py File 104391 bytes April 17 2024 17:19:35.
dummy_threading.py File 2815 bytes April 17 2024 17:19:34.
enum.py File 33606 bytes April 17 2024 17:19:35.
filecmp.py File 9830 bytes April 17 2024 17:19:34.
fileinput.py File 14471 bytes April 17 2024 17:19:35.
fnmatch.py File 3166 bytes April 17 2024 17:19:34.
formatter.py File 15143 bytes April 17 2024 17:19:35.
fractions.py File 23639 bytes April 17 2024 17:19:34.
ftplib.py File 35617 bytes April 17 2024 17:19:34.
functools.py File 31346 bytes April 17 2024 17:19:39.
genericpath.py File 4756 bytes April 17 2024 17:19:39.
getopt.py File 7489 bytes April 17 2024 17:19:38.
getpass.py File 5994 bytes April 17 2024 17:19:34.
gettext.py File 21530 bytes April 17 2024 17:19:38.
glob.py File 5638 bytes April 17 2024 17:19:34.
gzip.py File 20334 bytes April 17 2024 17:19:38.
hashlib.py File 9533 bytes April 17 2024 17:19:39.
heapq.py File 22929 bytes April 17 2024 17:19:34.
hmac.py File 5057 bytes April 17 2024 17:19:35.
imaplib.py File 53295 bytes April 17 2024 17:19:38.
imghdr.py File 3795 bytes April 17 2024 17:19:38.
imp.py File 10669 bytes April 17 2024 17:19:35.
inspect.py File 116958 bytes April 17 2024 17:19:38.
io.py File 3517 bytes April 17 2024 17:19:35.
ipaddress.py File 74563 bytes April 17 2024 17:19:38.
keyword.py File 2222 bytes April 17 2024 17:19:38.
linecache.py File 5312 bytes April 17 2024 17:19:35.
locale.py File 77300 bytes April 17 2024 17:19:38.
lzma.py File 12983 bytes April 17 2024 17:19:39.
macpath.py File 5971 bytes April 17 2024 17:19:35.
macurl2path.py File 2732 bytes April 17 2024 17:19:34.
mailbox.py File 78624 bytes April 17 2024 17:19:38.
mailcap.py File 8104 bytes April 17 2024 17:19:34.
mimetypes.py File 21042 bytes April 17 2024 17:19:38.
modulefinder.py File 23027 bytes April 17 2024 17:19:34.
netrc.py File 5684 bytes April 17 2024 17:19:35.
nntplib.py File 43078 bytes April 17 2024 17:19:34.
ntpath.py File 23094 bytes April 17 2024 17:19:34.
nturl2path.py File 2444 bytes April 17 2024 17:19:38.
numbers.py File 10243 bytes April 17 2024 17:19:39.
opcode.py File 5822 bytes April 17 2024 17:19:39.
operator.py File 10863 bytes April 17 2024 17:19:38.
optparse.py File 60371 bytes April 17 2024 17:19:38.
os.py File 37526 bytes April 17 2024 17:19:34.
pathlib.py File 48982 bytes April 17 2024 17:19:38.
pdb.py File 61323 bytes April 17 2024 17:19:35.
pickle.py File 55691 bytes April 17 2024 17:19:35.
pickletools.py File 91775 bytes April 17 2024 17:19:34.
pipes.py File 8916 bytes April 17 2024 17:19:38.
pkgutil.py File 21315 bytes April 17 2024 17:19:35.
platform.py File 47204 bytes April 17 2024 17:19:34.
plistlib.py File 32787 bytes April 17 2024 17:19:34.
poplib.py File 14964 bytes April 17 2024 17:19:34.
posixpath.py File 15772 bytes April 17 2024 17:19:35.
pprint.py File 20860 bytes April 17 2024 17:19:35.
profile.py File 22032 bytes April 17 2024 17:19:35.
pstats.py File 26564 bytes April 17 2024 17:19:34.
pty.py File 4763 bytes April 17 2024 17:19:34.
py_compile.py File 7181 bytes April 17 2024 17:19:38.
pyclbr.py File 13558 bytes April 17 2024 17:19:34.
pydoc.py File 103501 bytes April 17 2024 17:19:34.
queue.py File 8780 bytes April 17 2024 17:19:38.
quopri.py File 7265 bytes April 17 2024 17:19:38.
random.py File 27442 bytes April 17 2024 17:19:34.
re.py File 15552 bytes April 17 2024 17:19:35.
reprlib.py File 5336 bytes April 17 2024 17:19:35.
rlcompleter.py File 7097 bytes April 17 2024 17:19:39.
runpy.py File 11959 bytes April 17 2024 17:19:34.
sched.py File 6511 bytes April 17 2024 17:19:38.
secrets.py File 2038 bytes April 17 2024 17:19:35.
selectors.py File 19438 bytes April 17 2024 17:19:34.
shelve.py File 8515 bytes April 17 2024 17:19:38.
shlex.py File 12956 bytes April 17 2024 17:19:39.
shutil.py File 40540 bytes April 17 2024 17:19:38.
signal.py File 2123 bytes April 17 2024 17:19:38.
site.py File 21027 bytes April 17 2024 17:19:38.
smtpd.py File 34722 bytes April 17 2024 17:19:35.
smtplib.py File 44418 bytes April 17 2024 17:19:34.
sndhdr.py File 7088 bytes April 17 2024 17:19:38.
socket.py File 27443 bytes April 17 2024 17:19:39.
socketserver.py File 27010 bytes April 17 2024 17:19:39.
sre_compile.py File 19338 bytes April 17 2024 17:19:34.
sre_constants.py File 6821 bytes April 17 2024 17:19:34.
sre_parse.py File 36536 bytes April 17 2024 17:19:35.
ssl.py File 44793 bytes April 17 2024 17:19:38.
stat.py File 5038 bytes April 17 2024 17:19:38.
statistics.py File 20673 bytes April 17 2024 17:19:34.
string.py File 11795 bytes April 17 2024 17:19:38.
stringprep.py File 12917 bytes April 17 2024 17:19:35.
struct.py File 257 bytes April 17 2024 17:19:35.
subprocess.py File 62339 bytes April 17 2024 17:19:34.
sunau.py File 18095 bytes April 17 2024 17:19:34.
symbol.py File 2122 bytes April 17 2024 17:19:34.
symtable.py File 7277 bytes April 17 2024 17:19:38.
sysconfig.py File 24984 bytes April 17 2024 17:19:38.
tabnanny.py File 11414 bytes April 17 2024 17:19:38.
tarfile.py File 93316 bytes April 17 2024 17:19:34.
telnetlib.py File 23136 bytes April 17 2024 17:19:34.
tempfile.py File 26776 bytes April 17 2024 17:19:34.
textwrap.py File 19558 bytes April 17 2024 17:19:34.
this.py File 1003 bytes April 17 2024 17:19:35.
threading.py File 49029 bytes April 17 2024 17:19:38.
timeit.py File 13345 bytes April 17 2024 17:19:34.
token.py File 3075 bytes April 17 2024 17:19:34.
tokenize.py File 29496 bytes April 17 2024 17:19:38.
trace.py File 28736 bytes April 17 2024 17:19:34.
traceback.py File 23458 bytes April 17 2024 17:19:38.
tracemalloc.py File 16658 bytes April 17 2024 17:19:38.
tty.py File 879 bytes April 17 2024 17:19:34.
types.py File 8870 bytes April 17 2024 17:19:34.
typing.py File 80274 bytes April 17 2024 17:19:38.
uu.py File 6909 bytes April 17 2024 17:19:34.
uuid.py File 23971 bytes April 17 2024 17:19:35.
warnings.py File 18488 bytes April 17 2024 17:19:35.
wave.py File 17709 bytes April 17 2024 17:19:34.
weakref.py File 20466 bytes April 17 2024 17:19:38.
webbrowser.py File 21770 bytes April 17 2024 17:19:38.
xdrlib.py File 5913 bytes April 17 2024 17:19:39.
zipapp.py File 7157 bytes April 17 2024 17:19:35.
zipfile.py File 76282 bytes April 17 2024 17:19:39.

Reading File: //opt/alt/python36//lib64//python3.6/tokenize.py

"""Tokenization help for Python programs.

tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens.  It decodes the bytes according to PEP-0263 for
determining source file encoding.

It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF).  It generates 5-tuples with these
members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators.  Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""

__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
               'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
               'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import itertools as _itertools
import re
import sys
from token import *

cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)

import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
                           "NL", "untokenize", "ENCODING", "TokenInfo"]
del token

COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
    '(':   LPAR,
    ')':   RPAR,
    '[':   LSQB,
    ']':   RSQB,
    ':':   COLON,
    ',':   COMMA,
    ';':   SEMI,
    '+':   PLUS,
    '-':   MINUS,
    '*':   STAR,
    '/':   SLASH,
    '|':   VBAR,
    '&':   AMPER,
    '<':   LESS,
    '>':   GREATER,
    '=':   EQUAL,
    '.':   DOT,
    '%':   PERCENT,
    '{':   LBRACE,
    '}':   RBRACE,
    '==':  EQEQUAL,
    '!=':  NOTEQUAL,
    '<=':  LESSEQUAL,
    '>=':  GREATEREQUAL,
    '~':   TILDE,
    '^':   CIRCUMFLEX,
    '<<':  LEFTSHIFT,
    '>>':  RIGHTSHIFT,
    '**':  DOUBLESTAR,
    '+=':  PLUSEQUAL,
    '-=':  MINEQUAL,
    '*=':  STAREQUAL,
    '/=':  SLASHEQUAL,
    '%=':  PERCENTEQUAL,
    '&=':  AMPEREQUAL,
    '|=':  VBAREQUAL,
    '^=': CIRCUMFLEXEQUAL,
    '<<=': LEFTSHIFTEQUAL,
    '>>=': RIGHTSHIFTEQUAL,
    '**=': DOUBLESTAREQUAL,
    '//':  DOUBLESLASH,
    '//=': DOUBLESLASHEQUAL,
    '@':   AT,
    '@=':  ATEQUAL,
}

class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
    def __repr__(self):
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type))

    @property
    def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type

def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'

# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'

Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
                   r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)

# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
    # The valid string prefixes. Only contain the lower case versions,
    #  and don't contain any permuations (include 'fr', but not
    #  'rf'). The various permutations will be generated.
    _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
    # if we add binary f-strings, add: ['fb', 'fbr']
    result = set([''])
    for prefix in _valid_string_prefixes:
        for t in _itertools.permutations(prefix):
            # create a list with upper and lower versions of each
            #  character
            for u in _itertools.product(*[(c, c.upper()) for c in t]):
                result.add(''.join(u))
    return result

def _compile(expr):
    return re.compile(expr, re.UNICODE)

# Note that since _all_string_prefixes includes the empty string,
#  StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())

# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')

# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
                 r"//=?", r"->",
                 r"[+\-*/%&@|^=<>]=?",
                 r"~")

Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)

PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken

# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
                group("'", r'\\\r?\n'),
                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
                group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)

# For a given string prefix plus quotes, endpats maps it to a regex
#  to match the remainder of that string. _prefix can be empty, for
#  a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
    endpats[_prefix + "'"] = Single
    endpats[_prefix + '"'] = Double
    endpats[_prefix + "'''"] = Single3
    endpats[_prefix + '"""'] = Double3

# A set of all of the single and triple quoted string prefixes,
#  including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
    for u in (t + '"', t + "'"):
        single_quoted.add(u)
    for u in (t + '"""', t + "'''"):
        triple_quoted.add(u)

tabsize = 8

class TokenError(Exception): pass

class StopTokenizing(Exception): pass


class Untokenizer:

    def __init__(self):
        self.tokens = []
        self.prev_row = 1
        self.prev_col = 0
        self.encoding = None

    def add_whitespace(self, start):
        row, col = start
        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
            raise ValueError("start ({},{}) precedes previous end ({},{})"
                             .format(row, col, self.prev_row, self.prev_col))
        row_offset = row - self.prev_row
        if row_offset:
            self.tokens.append("\\\n" * row_offset)
            self.prev_col = 0
        col_offset = col - self.prev_col
        if col_offset:
            self.tokens.append(" " * col_offset)

    def untokenize(self, iterable):
        it = iter(iterable)
        indents = []
        startline = False
        for t in it:
            if len(t) == 2:
                self.compat(t, it)
                break
            tok_type, token, start, end, line = t
            if tok_type == ENCODING:
                self.encoding = token
                continue
            if tok_type == ENDMARKER:
                break
            if tok_type == INDENT:
                indents.append(token)
                continue
            elif tok_type == DEDENT:
                indents.pop()
                self.prev_row, self.prev_col = end
                continue
            elif tok_type in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                indent = indents[-1]
                if start[1] >= len(indent):
                    self.tokens.append(indent)
                    self.prev_col = len(indent)
                startline = False
            self.add_whitespace(start)
            self.tokens.append(token)
            self.prev_row, self.prev_col = end
            if tok_type in (NEWLINE, NL):
                self.prev_row += 1
                self.prev_col = 0
        return "".join(self.tokens)

    def compat(self, token, iterable):
        indents = []
        toks_append = self.tokens.append
        startline = token[0] in (NEWLINE, NL)
        prevstring = False

        for tok in chain([token], iterable):
            toknum, tokval = tok[:2]
            if toknum == ENCODING:
                self.encoding = tokval
                continue

            if toknum in (NAME, NUMBER, ASYNC, AWAIT):
                tokval += ' '

            # Insert a space between two consecutive strings
            if toknum == STRING:
                if prevstring:
                    tokval = ' ' + tokval
                prevstring = True
            else:
                prevstring = False

            if toknum == INDENT:
                indents.append(tokval)
                continue
            elif toknum == DEDENT:
                indents.pop()
                continue
            elif toknum in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                toks_append(indents[-1])
                startline = False
            toks_append(tokval)


def untokenize(iterable):
    """Transform tokens back into Python source code.
    It returns a bytes object, encoded using the ENCODING
    token, which is the first token sequence output by tokenize.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    Round-trip invariant for full input:
        Untokenized source will match input source exactly

    Round-trip invariant for limited input:
        # Output bytes will tokenize back to the input
        t1 = [tok[:2] for tok in tokenize(f.readline)]
        newcode = untokenize(t1)
        readline = BytesIO(newcode).readline
        t2 = [tok[:2] for tok in tokenize(readline)]
        assert t1 == t2
    """
    ut = Untokenizer()
    out = ut.untokenize(iterable)
    if ut.encoding is not None:
        out = out.encode(ut.encoding)
    return out


def _get_normal_name(orig_enc):
    """Imitates get_normal_name in tokenizer.c."""
    # Only care about the first 12 characters.
    enc = orig_enc[:12].lower().replace("_", "-")
    if enc == "utf-8" or enc.startswith("utf-8-"):
        return "utf-8"
    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
        return "iso-8859-1"
    return orig_enc

def detect_encoding(readline):
    """
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file.  It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263.  If both a bom and a cookie are present,
    but disagree, a SyntaxError will be raised.  If the encoding cookie is an
    invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    """
    try:
        filename = readline.__self__.name
    except AttributeError:
        filename = None
    bom_found = False
    encoding = None
    default = 'utf-8'
    def read_or_stop():
        try:
            return readline()
        except StopIteration:
            return b''

    def find_cookie(line):
        try:
            # Decode as UTF-8. Either the line is an encoding declaration,
            # in which case it should be pure ASCII, or it must be UTF-8
            # per default encoding.
            line_string = line.decode('utf-8')
        except UnicodeDecodeError:
            msg = "invalid or missing encoding declaration"
            if filename is not None:
                msg = '{} for {!r}'.format(msg, filename)
            raise SyntaxError(msg)

        match = cookie_re.match(line_string)
        if not match:
            return None
        encoding = _get_normal_name(match.group(1))
        try:
            codec = lookup(encoding)
        except LookupError:
            # This behaviour mimics the Python interpreter
            if filename is None:
                msg = "unknown encoding: " + encoding
            else:
                msg = "unknown encoding for {!r}: {}".format(filename,
                        encoding)
            raise SyntaxError(msg)

        if bom_found:
            if encoding != 'utf-8':
                # This behaviour mimics the Python interpreter
                if filename is None:
                    msg = 'encoding problem: utf-8'
                else:
                    msg = 'encoding problem for {!r}: utf-8'.format(filename)
                raise SyntaxError(msg)
            encoding += '-sig'
        return encoding

    first = read_or_stop()
    if first.startswith(BOM_UTF8):
        bom_found = True
        first = first[3:]
        default = 'utf-8-sig'
    if not first:
        return default, []

    encoding = find_cookie(first)
    if encoding:
        return encoding, [first]
    if not blank_re.match(first):
        return default, [first]

    second = read_or_stop()
    if not second:
        return default, [first]

    encoding = find_cookie(second)
    if encoding:
        return encoding, [first, second]

    return default, [first, second]


def open(filename):
    """Open a file in read only mode using the encoding detected by
    detect_encoding().
    """
    buffer = _builtin_open(filename, 'rb')
    try:
        encoding, lines = detect_encoding(buffer.readline)
        buffer.seek(0)
        text = TextIOWrapper(buffer, encoding, line_buffering=True)
        text.mode = 'r'
        return text
    except:
        buffer.close()
        raise


def tokenize(readline):
    """
    The tokenize() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects.  Each call to the function
    should return one line of input as bytes.  Alternatively, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile, 'rb').__next__  # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found.  The line passed is the
    logical line; continuation lines are included.

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
    """
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
    from itertools import chain, repeat
    encoding, consumed = detect_encoding(readline)
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)


def _tokenize(readline, encoding):
    lnum = parenlev = continued = 0
    numchars = '0123456789'
    contstr, needcont = '', 0
    contline = None
    indents = [0]

    # 'stashed' and 'async_*' are used for async/await parsing
    stashed = None
    async_def = False
    async_def_indent = 0
    async_def_nl = False

    if encoding is not None:
        if encoding == "utf-8-sig":
            # BOM will already have been stripped.
            encoding = "utf-8"
        yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
    last_line = b''
    line = b''
    while True:                                # loop over lines in stream
        try:
            # We capture the value of the line variable here because
            # readline uses the empty string '' to signal end of input,
            # hence `line` itself will always be overwritten at the end
            # of this loop.
            last_line = line
            line = readline()
        except StopIteration:
            line = b''

        if encoding is not None:
            line = line.decode(encoding)
        lnum += 1
        pos, max = 0, len(line)

        if contstr:                            # continued string
            if not line:
                raise TokenError("EOF in multi-line string", strstart)
            endmatch = endprog.match(line)
            if endmatch:
                pos = end = endmatch.end(0)
                yield TokenInfo(STRING, contstr + line[:end],
                       strstart, (lnum, end), contline + line)
                contstr, needcont = '', 0
                contline = None
            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
                yield TokenInfo(ERRORTOKEN, contstr + line,
                           strstart, (lnum, len(line)), contline)
                contstr = ''
                contline = None
                continue
            else:
                contstr = contstr + line
                contline = contline + line
                continue

        elif parenlev == 0 and not continued:  # new statement
            if not line: break
            column = 0
            while pos < max:                   # measure leading whitespace
                if line[pos] == ' ':
                    column += 1
                elif line[pos] == '\t':
                    column = (column//tabsize + 1)*tabsize
                elif line[pos] == '\f':
                    column = 0
                else:
                    break
                pos += 1
            if pos == max:
                break

            if line[pos] in '#\r\n':           # skip comments or blank lines
                if line[pos] == '#':
                    comment_token = line[pos:].rstrip('\r\n')
                    nl_pos = pos + len(comment_token)
                    yield TokenInfo(COMMENT, comment_token,
                           (lnum, pos), (lnum, pos + len(comment_token)), line)
                    yield TokenInfo(NL, line[nl_pos:],
                           (lnum, nl_pos), (lnum, len(line)), line)
                else:
                    yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
                           (lnum, pos), (lnum, len(line)), line)
                continue

            if column > indents[-1]:           # count indents or dedents
                indents.append(column)
                yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
            while column < indents[-1]:
                if column not in indents:
                    raise IndentationError(
                        "unindent does not match any outer indentation level",
                        ("<tokenize>", lnum, pos, line))
                indents = indents[:-1]

                if async_def and async_def_indent >= indents[-1]:
                    async_def = False
                    async_def_nl = False
                    async_def_indent = 0

                yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)

            if async_def and async_def_nl and async_def_indent >= indents[-1]:
                async_def = False
                async_def_nl = False
                async_def_indent = 0

        else:                                  # continued statement
            if not line:
                raise TokenError("EOF in multi-line statement", (lnum, 0))
            continued = 0

        while pos < max:
            pseudomatch = _compile(PseudoToken).match(line, pos)
            if pseudomatch:                                # scan for tokens
                start, end = pseudomatch.span(1)
                spos, epos, pos = (lnum, start), (lnum, end), end
                if start == end:
                    continue
                token, initial = line[start:end], line[start]

                if (initial in numchars or                  # ordinary number
                    (initial == '.' and token != '.' and token != '...')):
                    yield TokenInfo(NUMBER, token, spos, epos, line)
                elif initial in '\r\n':
                    if stashed:
                        yield stashed
                        stashed = None
                    if parenlev > 0:
                        yield TokenInfo(NL, token, spos, epos, line)
                    else:
                        yield TokenInfo(NEWLINE, token, spos, epos, line)
                        if async_def:
                            async_def_nl = True

                elif initial == '#':
                    assert not token.endswith("\n")
                    if stashed:
                        yield stashed
                        stashed = None
                    yield TokenInfo(COMMENT, token, spos, epos, line)

                elif token in triple_quoted:
                    endprog = _compile(endpats[token])
                    endmatch = endprog.match(line, pos)
                    if endmatch:                           # all on one line
                        pos = endmatch.end(0)
                        token = line[start:pos]
                        yield TokenInfo(STRING, token, spos, (lnum, pos), line)
                    else:
                        strstart = (lnum, start)           # multiple lines
                        contstr = line[start:]
                        contline = line
                        break

                # Check up to the first 3 chars of the token to see if
                #  they're in the single_quoted set. If so, they start
                #  a string.
                # We're using the first 3, because we're looking for
                #  "rb'" (for example) at the start of the token. If
                #  we switch to longer prefixes, this needs to be
                #  adjusted.
                # Note that initial == token[:1].
                # Also note that single quote checking must come after
                #  triple quote checking (above).
                elif (initial in single_quoted or
                      token[:2] in single_quoted or
                      token[:3] in single_quoted):
                    if token[-1] == '\n':                  # continued string
                        strstart = (lnum, start)
                        # Again, using the first 3 chars of the
                        #  token. This is looking for the matching end
                        #  regex for the correct type of quote
                        #  character. So it's really looking for
                        #  endpats["'"] or endpats['"'], by trying to
                        #  skip string prefix characters, if any.
                        endprog = _compile(endpats.get(initial) or
                                           endpats.get(token[1]) or
                                           endpats.get(token[2]))
                        contstr, needcont = line[start:], 1
                        contline = line
                        break
                    else:                                  # ordinary string
                        yield TokenInfo(STRING, token, spos, epos, line)

                elif initial.isidentifier():               # ordinary name
                    if token in ('async', 'await'):
                        if async_def:
                            yield TokenInfo(
                                ASYNC if token == 'async' else AWAIT,
                                token, spos, epos, line)
                            continue

                    tok = TokenInfo(NAME, token, spos, epos, line)
                    if token == 'async' and not stashed:
                        stashed = tok
                        continue

                    if token == 'def':
                        if (stashed
                                and stashed.type == NAME
                                and stashed.string == 'async'):

                            async_def = True
                            async_def_indent = indents[-1]

                            yield TokenInfo(ASYNC, stashed.string,
                                            stashed.start, stashed.end,
                                            stashed.line)
                            stashed = None

                    if stashed:
                        yield stashed
                        stashed = None

                    yield tok
                elif initial == '\\':                      # continued stmt
                    continued = 1
                else:
                    if initial in '([{':
                        parenlev += 1
                    elif initial in ')]}':
                        parenlev -= 1
                    if stashed:
                        yield stashed
                        stashed = None
                    yield TokenInfo(OP, token, spos, epos, line)
            else:
                yield TokenInfo(ERRORTOKEN, line[pos],
                           (lnum, pos), (lnum, pos+1), line)
                pos += 1

    if stashed:
        yield stashed
        stashed = None

    # Add an implicit NEWLINE if the input doesn't end in one
    if last_line and last_line[-1] not in '\r\n':
        yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
    for indent in indents[1:]:                 # pop remaining indent levels
        yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
    yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')


# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
    return _tokenize(readline, None)

def main():
    import argparse

    # Helper error handling routines
    def perror(message):
        print(message, file=sys.stderr)

    def error(message, filename=None, location=None):
        if location:
            args = (filename,) + location + (message,)
            perror("%s:%d:%d: error: %s" % args)
        elif filename:
            perror("%s: error: %s" % (filename, message))
        else:
            perror("error: %s" % message)
        sys.exit(1)

    # Parse the arguments and options
    parser = argparse.ArgumentParser(prog='python -m tokenize')
    parser.add_argument(dest='filename', nargs='?',
                        metavar='filename.py',
                        help='the file to tokenize; defaults to stdin')
    parser.add_argument('-e', '--exact', dest='exact', action='store_true',
                        help='display token names using the exact type')
    args = parser.parse_args()

    try:
        # Tokenize the input
        if args.filename:
            filename = args.filename
            with _builtin_open(filename, 'rb') as f:
                tokens = list(tokenize(f.readline))
        else:
            filename = "<stdin>"
            tokens = _tokenize(sys.stdin.readline, None)

        # Output the tokenization
        for token in tokens:
            token_type = token.type
            if args.exact:
                token_type = token.exact_type
            token_range = "%d,%d-%d,%d:" % (token.start + token.end)
            print("%-20s%-15s%-15r" %
                  (token_range, tok_name[token_type], token.string))
    except IndentationError as err:
        line, column = err.args[1][1:3]
        error(err.args[0], filename, (line, column))
    except TokenError as err:
        line, column = err.args[1]
        error(err.args[0], filename, (line, column))
    except SyntaxError as err:
        error(err, filename)
    except OSError as err:
        error(err)
    except KeyboardInterrupt:
        print("interrupted\n")
    except Exception as err:
        perror("unexpected error: %s" % err)
        raise

if __name__ == "__main__":
    main()

SILENT KILLER Tool