Current Path: > > opt > alt > > python34 > lib64 > python3.4
Operation : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64 Software : Apache Server IP : 162.0.232.56 | Your IP: 216.73.216.111 Domains : 1034 Domain(s) Permission : [ 0755 ]
Name | Type | Size | Last Modified | Actions |
---|---|---|---|---|
__pycache__ | Directory | - | - | |
asyncio | Directory | - | - | |
collections | Directory | - | - | |
concurrent | Directory | - | - | |
config-3.4m | Directory | - | - | |
ctypes | Directory | - | - | |
curses | Directory | - | - | |
dbm | Directory | - | - | |
distutils | Directory | - | - | |
Directory | - | - | ||
encodings | Directory | - | - | |
ensurepip | Directory | - | - | |
html | Directory | - | - | |
http | Directory | - | - | |
idlelib | Directory | - | - | |
importlib | Directory | - | - | |
json | Directory | - | - | |
lib-dynload | Directory | - | - | |
lib2to3 | Directory | - | - | |
logging | Directory | - | - | |
multiprocessing | Directory | - | - | |
plat-linux | Directory | - | - | |
pydoc_data | Directory | - | - | |
site-packages | Directory | - | - | |
sqlite3 | Directory | - | - | |
test | Directory | - | - | |
unittest | Directory | - | - | |
urllib | Directory | - | - | |
venv | Directory | - | - | |
wsgiref | Directory | - | - | |
xml | Directory | - | - | |
xmlrpc | Directory | - | - | |
__future__.py | File | 4584 bytes | April 17 2024 17:10:02. | |
__phello__.foo.py | File | 64 bytes | April 17 2024 17:10:01. | |
_bootlocale.py | File | 1301 bytes | April 17 2024 17:09:57. | |
_collections_abc.py | File | 19898 bytes | April 17 2024 17:09:57. | |
_compat_pickle.py | File | 8318 bytes | April 17 2024 17:10:00. | |
_dummy_thread.py | File | 4872 bytes | April 17 2024 17:10:01. | |
_markupbase.py | File | 14598 bytes | April 17 2024 17:09:57. | |
_osx_support.py | File | 19101 bytes | April 17 2024 17:10:01. | |
_pyio.py | File | 73893 bytes | April 17 2024 17:09:58. | |
_sitebuiltins.py | File | 3115 bytes | April 17 2024 17:09:58. | |
_strptime.py | File | 22053 bytes | April 17 2024 17:10:02. | |
_sysconfigdata.py | File | 28728 bytes | April 17 2024 17:10:01. | |
_threading_local.py | File | 7410 bytes | April 17 2024 17:09:57. | |
_weakrefset.py | File | 5705 bytes | April 17 2024 17:09:57. | |
abc.py | File | 8624 bytes | April 17 2024 17:09:57. | |
aifc.py | File | 31578 bytes | April 17 2024 17:10:02. | |
antigravity.py | File | 475 bytes | April 17 2024 17:09:57. | |
argparse.py | File | 90027 bytes | April 17 2024 17:10:01. | |
ast.py | File | 12034 bytes | April 17 2024 17:10:01. | |
asynchat.py | File | 11825 bytes | April 17 2024 17:10:00. | |
asyncore.py | File | 20998 bytes | April 17 2024 17:10:02. | |
base64.py | File | 20180 bytes | April 17 2024 17:09:57. | |
bdb.py | File | 23354 bytes | April 17 2024 17:10:00. | |
binhex.py | File | 13928 bytes | April 17 2024 17:09:57. | |
bisect.py | File | 2595 bytes | April 17 2024 17:09:57. | |
bz2.py | File | 18860 bytes | April 17 2024 17:10:01. | |
cProfile.py | File | 5324 bytes | April 17 2024 17:09:57. | |
calendar.py | File | 22941 bytes | April 17 2024 17:10:01. | |
cgi.py | File | 35941 bytes | April 17 2024 17:10:01. | |
cgitb.py | File | 12041 bytes | April 17 2024 17:10:02. | |
chunk.py | File | 5425 bytes | April 17 2024 17:09:58. | |
cmd.py | File | 14860 bytes | April 17 2024 17:09:57. | |
code.py | File | 10037 bytes | April 17 2024 17:09:57. | |
codecs.py | File | 35910 bytes | April 17 2024 17:09:57. | |
codeop.py | File | 5994 bytes | April 17 2024 17:09:57. | |
colorsys.py | File | 4064 bytes | April 17 2024 17:09:57. | |
compileall.py | File | 9618 bytes | April 17 2024 17:09:57. | |
configparser.py | File | 49698 bytes | April 17 2024 17:09:57. | |
contextlib.py | File | 11639 bytes | April 17 2024 17:09:57. | |
copy.py | File | 9005 bytes | April 17 2024 17:09:57. | |
copyreg.py | File | 6833 bytes | April 17 2024 17:10:01. | |
crypt.py | File | 1879 bytes | April 17 2024 17:09:57. | |
csv.py | File | 16185 bytes | April 17 2024 17:09:57. | |
datetime.py | File | 75804 bytes | April 17 2024 17:10:02. | |
decimal.py | File | 228688 bytes | April 17 2024 17:10:00. | |
difflib.py | File | 81684 bytes | April 17 2024 17:09:57. | |
dis.py | File | 17160 bytes | April 17 2024 17:09:57. | |
doctest.py | File | 104492 bytes | April 17 2024 17:09:57. | |
dummy_threading.py | File | 2815 bytes | April 17 2024 17:09:57. | |
enum.py | File | 21538 bytes | April 17 2024 17:09:57. | |
filecmp.py | File | 9830 bytes | April 17 2024 17:09:57. | |
fileinput.py | File | 14865 bytes | April 17 2024 17:09:57. | |
fnmatch.py | File | 3163 bytes | April 17 2024 17:09:57. | |
formatter.py | File | 15173 bytes | April 17 2024 17:09:57. | |
fractions.py | File | 23203 bytes | April 17 2024 17:09:57. | |
ftplib.py | File | 38532 bytes | April 17 2024 17:09:57. | |
functools.py | File | 28511 bytes | April 17 2024 17:10:02. | |
genericpath.py | File | 3882 bytes | April 17 2024 17:10:02. | |
getopt.py | File | 7489 bytes | April 17 2024 17:10:01. | |
getpass.py | File | 6069 bytes | April 17 2024 17:09:57. | |
gettext.py | File | 20767 bytes | April 17 2024 17:10:01. | |
glob.py | File | 3461 bytes | April 17 2024 17:09:57. | |
gzip.py | File | 24314 bytes | April 17 2024 17:10:01. | |
hashlib.py | File | 9850 bytes | April 17 2024 17:10:02. | |
heapq.py | File | 17997 bytes | April 17 2024 17:09:57. | |
hmac.py | File | 5063 bytes | April 17 2024 17:09:58. | |
imaplib.py | File | 50267 bytes | April 17 2024 17:10:01. | |
imghdr.py | File | 3528 bytes | April 17 2024 17:10:01. | |
imp.py | File | 9984 bytes | April 17 2024 17:09:57. | |
inspect.py | File | 104640 bytes | April 17 2024 17:10:00. | |
io.py | File | 3396 bytes | April 17 2024 17:09:57. | |
ipaddress.py | File | 71598 bytes | April 17 2024 17:10:01. | |
keyword.py | File | 2222 bytes | April 17 2024 17:10:01. | |
linecache.py | File | 3953 bytes | April 17 2024 17:09:57. | |
locale.py | File | 74530 bytes | April 17 2024 17:10:00. | |
lzma.py | File | 19371 bytes | April 17 2024 17:10:02. | |
macpath.py | File | 5619 bytes | April 17 2024 17:09:57. | |
macurl2path.py | File | 2732 bytes | April 17 2024 17:09:57. | |
mailbox.py | File | 78382 bytes | April 17 2024 17:10:00. | |
mailcap.py | File | 7437 bytes | April 17 2024 17:09:57. | |
mimetypes.py | File | 20781 bytes | April 17 2024 17:10:00. | |
modulefinder.py | File | 23421 bytes | April 17 2024 17:09:57. | |
netrc.py | File | 5748 bytes | April 17 2024 17:09:58. | |
nntplib.py | File | 43082 bytes | April 17 2024 17:09:57. | |
ntpath.py | File | 20477 bytes | April 17 2024 17:09:57. | |
nturl2path.py | File | 2444 bytes | April 17 2024 17:10:01. | |
numbers.py | File | 10243 bytes | April 17 2024 17:10:02. | |
opcode.py | File | 5442 bytes | April 17 2024 17:10:02. | |
operator.py | File | 9195 bytes | April 17 2024 17:10:00. | |
optparse.py | File | 60346 bytes | April 17 2024 17:10:01. | |
os.py | File | 33882 bytes | April 17 2024 17:09:57. | |
pathlib.py | File | 42467 bytes | April 17 2024 17:10:00. | |
pdb.py | File | 60992 bytes | April 17 2024 17:09:57. | |
pickle.py | File | 55989 bytes | April 17 2024 17:09:58. | |
pickletools.py | File | 91762 bytes | April 17 2024 17:09:57. | |
pipes.py | File | 8916 bytes | April 17 2024 17:10:01. | |
pkgutil.py | File | 21215 bytes | April 17 2024 17:09:57. | |
platform.py | File | 46761 bytes | April 17 2024 17:09:57. | |
plistlib.py | File | 31791 bytes | April 17 2024 17:09:57. | |
poplib.py | File | 14319 bytes | April 17 2024 17:09:57. | |
posixpath.py | File | 13448 bytes | April 17 2024 17:09:57. | |
pprint.py | File | 14919 bytes | April 17 2024 17:09:57. | |
profile.py | File | 22032 bytes | April 17 2024 17:09:57. | |
pstats.py | File | 26316 bytes | April 17 2024 17:09:57. | |
pty.py | File | 4763 bytes | April 17 2024 17:09:57. | |
py_compile.py | File | 7103 bytes | April 17 2024 17:10:00. | |
pyclbr.py | File | 13520 bytes | April 17 2024 17:09:57. | |
pydoc.py | File | 103011 bytes | April 17 2024 17:09:57. | |
queue.py | File | 8835 bytes | April 17 2024 17:10:01. | |
quopri.py | File | 7265 bytes | April 17 2024 17:10:01. | |
random.py | File | 26084 bytes | April 17 2024 17:09:57. | |
re.py | File | 15604 bytes | April 17 2024 17:09:57. | |
reprlib.py | File | 5110 bytes | April 17 2024 17:09:57. | |
rlcompleter.py | File | 6069 bytes | April 17 2024 17:10:02. | |
runpy.py | File | 10816 bytes | April 17 2024 17:09:57. | |
sched.py | File | 6354 bytes | April 17 2024 17:10:00. | |
selectors.py | File | 17097 bytes | April 17 2024 17:09:57. | |
shelve.py | File | 8528 bytes | April 17 2024 17:10:01. | |
shlex.py | File | 11548 bytes | April 17 2024 17:10:02. | |
shutil.py | File | 39902 bytes | April 17 2024 17:10:01. | |
site.py | File | 21553 bytes | April 17 2024 17:10:00. | |
smtpd.py | File | 29991 bytes | April 17 2024 17:09:57. | |
smtplib.py | File | 38971 bytes | April 17 2024 17:09:57. | |
sndhdr.py | File | 6256 bytes | April 17 2024 17:10:01. | |
socket.py | File | 19067 bytes | April 17 2024 17:10:02. | |
socketserver.py | File | 24372 bytes | April 17 2024 17:10:02. | |
sre_compile.py | File | 19903 bytes | April 17 2024 17:09:57. | |
sre_constants.py | File | 7267 bytes | April 17 2024 17:09:57. | |
sre_parse.py | File | 31429 bytes | April 17 2024 17:09:57. | |
ssl.py | File | 34747 bytes | April 17 2024 17:10:00. | |
stat.py | File | 4400 bytes | April 17 2024 17:10:00. | |
statistics.py | File | 19556 bytes | April 17 2024 17:09:57. | |
string.py | File | 11445 bytes | April 17 2024 17:10:01. | |
stringprep.py | File | 12917 bytes | April 17 2024 17:09:58. | |
struct.py | File | 257 bytes | April 17 2024 17:09:57. | |
subprocess.py | File | 64549 bytes | April 17 2024 17:09:57. | |
sunau.py | File | 18095 bytes | April 17 2024 17:09:57. | |
symbol.py | File | 2053 bytes | April 17 2024 17:09:57. | |
symtable.py | File | 7404 bytes | April 17 2024 17:10:01. | |
sysconfig.py | File | 24632 bytes | April 17 2024 17:10:01. | |
tabnanny.py | File | 11410 bytes | April 17 2024 17:10:01. | |
tarfile.py | File | 91557 bytes | April 17 2024 17:09:57. | |
telnetlib.py | File | 23074 bytes | April 17 2024 17:09:57. | |
tempfile.py | File | 22525 bytes | April 17 2024 17:09:57. | |
textwrap.py | File | 19282 bytes | April 17 2024 17:09:57. | |
this.py | File | 1003 bytes | April 17 2024 17:09:58. | |
threading.py | File | 48802 bytes | April 17 2024 17:10:00. | |
timeit.py | File | 11972 bytes | April 17 2024 17:09:57. | |
token.py | File | 3034 bytes | April 17 2024 17:09:57. | |
tokenize.py | File | 25596 bytes | April 17 2024 17:10:01. | |
trace.py | File | 31487 bytes | April 17 2024 17:09:57. | |
traceback.py | File | 11167 bytes | April 17 2024 17:10:01. | |
tracemalloc.py | File | 15651 bytes | April 17 2024 17:10:01. | |
tty.py | File | 879 bytes | April 17 2024 17:09:57. | |
types.py | File | 5411 bytes | April 17 2024 17:09:57. | |
uu.py | File | 6766 bytes | April 17 2024 17:09:57. | |
uuid.py | File | 23724 bytes | April 17 2024 17:09:57. | |
warnings.py | File | 14303 bytes | April 17 2024 17:09:57. | |
wave.py | File | 17682 bytes | April 17 2024 17:09:57. | |
weakref.py | File | 19384 bytes | April 17 2024 17:10:00. | |
webbrowser.py | File | 21432 bytes | April 17 2024 17:10:01. | |
xdrlib.py | File | 5913 bytes | April 17 2024 17:10:02. | |
zipfile.py | File | 68547 bytes | April 17 2024 17:10:02. |
"""Tokenization help for Python programs. tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for determining source file encoding. It accepts a readline-like method which is called repeatedly to get the next line of input (or b"" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. """ __author__ = 'Ka-Ping Yee <ping@lfw.org>' __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' 'Michael Foord') from builtins import open as _builtin_open from codecs import lookup, BOM_UTF8 import collections from io import TextIOWrapper from itertools import chain import re import sys from token import * cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) import token __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", "NL", "untokenize", "ENCODING", "TokenInfo"] del token COMMENT = N_TOKENS tok_name[COMMENT] = 'COMMENT' NL = N_TOKENS + 1 tok_name[NL] = 'NL' ENCODING = N_TOKENS + 2 tok_name[ENCODING] = 'ENCODING' N_TOKENS += 3 EXACT_TOKEN_TYPES = { '(': LPAR, ')': RPAR, '[': LSQB, ']': RSQB, ':': COLON, ',': COMMA, ';': SEMI, '+': PLUS, '-': MINUS, '*': STAR, '/': SLASH, '|': VBAR, '&': AMPER, '<': LESS, '>': GREATER, '=': EQUAL, '.': DOT, '%': PERCENT, '{': LBRACE, '}': RBRACE, '==': EQEQUAL, '!=': NOTEQUAL, '<=': LESSEQUAL, '>=': GREATEREQUAL, '~': TILDE, '^': CIRCUMFLEX, '<<': LEFTSHIFT, '>>': RIGHTSHIFT, '**': DOUBLESTAR, '+=': PLUSEQUAL, '-=': MINEQUAL, '*=': STAREQUAL, '/=': SLASHEQUAL, '%=': PERCENTEQUAL, '&=': AMPEREQUAL, '|=': VBAREQUAL, '^=': CIRCUMFLEXEQUAL, '<<=': LEFTSHIFTEQUAL, '>>=': RIGHTSHIFTEQUAL, '**=': DOUBLESTAREQUAL, '//': DOUBLESLASH, '//=': DOUBLESLASHEQUAL, '@': AT } class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type)) @property def exact_type(self): if self.type == OP and self.string in EXACT_TOKEN_TYPES: return EXACT_TOKEN_TYPES[self.string] else: return self.type def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'\w+' Hexnumber = r'0[xX][0-9a-fA-F]+' Binnumber = r'0[bB][01]+' Octnumber = r'0[oO][0-7]+' Decnumber = r'(?:0+|[1-9][0-9]*)' Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?[0-9]+' Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) Expfloat = r'[0-9]+' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group(StringPrefix + "'''", StringPrefix + '"""') # Single-line ' or " string. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", r"//=?", r"->", r"[+\-*/%&|^=<>]=?", r"~") Bracket = '[][(){}]' Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') Funny = group(Operator, Bracket, Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) def _compile(expr): return re.compile(expr, re.UNICODE) endpats = {"'": Single, '"': Double, "'''": Single3, '"""': Double3, "r'''": Single3, 'r"""': Double3, "b'''": Single3, 'b"""': Double3, "R'''": Single3, 'R"""': Double3, "B'''": Single3, 'B"""': Double3, "br'''": Single3, 'br"""': Double3, "bR'''": Single3, 'bR"""': Double3, "Br'''": Single3, 'Br"""': Double3, "BR'''": Single3, 'BR"""': Double3, "rb'''": Single3, 'rb"""': Double3, "Rb'''": Single3, 'Rb"""': Double3, "rB'''": Single3, 'rB"""': Double3, "RB'''": Single3, 'RB"""': Double3, "u'''": Single3, 'u"""': Double3, "R'''": Single3, 'R"""': Double3, "U'''": Single3, 'U"""': Double3, 'r': None, 'R': None, 'b': None, 'B': None, 'u': None, 'U': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "b'''", 'b"""', "B'''", 'B"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""', "rb'''", 'rb"""', "rB'''", 'rB"""', "Rb'''", 'Rb"""', "RB'''", 'RB"""', "u'''", 'u"""', "U'''", 'U"""', ): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "b'", 'b"', "B'", 'B"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"' , "rb'", 'rb"', "rB'", 'rB"', "Rb'", 'Rb"', "RB'", 'RB"' , "u'", 'u"', "U'", 'U"', ): single_quoted[t] = t tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 self.encoding = None def add_whitespace(self, start): row, col = start if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) row_offset = row - self.prev_row if row_offset: self.tokens.append("\\\n" * row_offset) self.prev_col = 0 col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): it = iter(iterable) indents = [] startline = False for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break if tok_type == INDENT: indents.append(token) continue elif tok_type == DEDENT: indents.pop() self.prev_row, self.prev_col = end continue elif tok_type in (NEWLINE, NL): startline = True elif startline and indents: indent = indents[-1] if start[1] >= len(indent): self.tokens.append(indent) self.prev_col = len(indent) startline = False self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): indents = [] toks_append = self.tokens.append startline = token[0] in (NEWLINE, NL) prevstring = False for tok in chain([token], iterable): toknum, tokval = tok[:2] if toknum == ENCODING: self.encoding = tokval continue if toknum in (NAME, NUMBER): tokval += ' ' # Insert a space between two consecutive strings if toknum == STRING: if prevstring: tokval = ' ' + tokval prevstring = True else: prevstring = False if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) def untokenize(iterable): """Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output bytes will tokenize the back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 """ ut = Untokenizer() out = ut.untokenize(iterable) if ut.encoding is not None: out = out.encode(ut.encoding) return out def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if encoding != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def open(filename): """Open a file in read only mode using the encoding detected by detect_encoding(). """ buffer = _builtin_open(filename, 'rb') try: encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' return text except: buffer.close() raise def tokenize(readline): """ The tokenize() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ # This import is here to avoid problems when the itertools module is not # built yet and tokenize is imported. from itertools import chain, repeat encoding, consumed = detect_encoding(readline) rl_gen = iter(readline, b"") empty = repeat(b"") return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) def _tokenize(readline, encoding): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] if encoding is not None: if encoding == "utf-8-sig": # BOM will already have been stripped. encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') while True: # loop over lines in stream try: line = readline() except StopIteration: line = b'' if encoding is not None: line = line.decode(encoding) lnum += 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield TokenInfo(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column += 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos += 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield TokenInfo(COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield TokenInfo(NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = _compile(PseudoToken).match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end if start == end: continue token, initial = line[start:end], line[start] if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos, line) elif initial in '\r\n': yield TokenInfo(NL if parenlev > 0 else NEWLINE, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield TokenInfo(COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = _compile(endpats[token]) endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield TokenInfo(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = _compile(endpats[initial] or endpats[token[1]] or endpats[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) elif initial.isidentifier(): # ordinary name yield TokenInfo(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev += 1 elif initial in ')]}': parenlev -= 1 yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos += 1 for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') # An undocumented, backwards compatible, API for all the places in the standard # library that expect to be able to use tokenize with strings def generate_tokens(readline): return _tokenize(readline, None) def main(): import argparse # Helper error handling routines def perror(message): print(message, file=sys.stderr) def error(message, filename=None, location=None): if location: args = (filename,) + location + (message,) perror("%s:%d:%d: error: %s" % args) elif filename: perror("%s: error: %s" % (filename, message)) else: perror("error: %s" % message) sys.exit(1) # Parse the arguments and options parser = argparse.ArgumentParser(prog='python -m tokenize') parser.add_argument(dest='filename', nargs='?', metavar='filename.py', help='the file to tokenize; defaults to stdin') parser.add_argument('-e', '--exact', dest='exact', action='store_true', help='display token names using the exact type') args = parser.parse_args() try: # Tokenize the input if args.filename: filename = args.filename with _builtin_open(filename, 'rb') as f: tokens = list(tokenize(f.readline)) else: filename = "<stdin>" tokens = _tokenize(sys.stdin.readline, None) # Output the tokenization for token in tokens: token_type = token.type if args.exact: token_type = token.exact_type token_range = "%d,%d-%d,%d:" % (token.start + token.end) print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) except IndentationError as err: line, column = err.args[1][1:3] error(err.args[0], filename, (line, column)) except TokenError as err: line, column = err.args[1] error(err.args[0], filename, (line, column)) except SyntaxError as err: error(err, filename) except OSError as err: error(err) except KeyboardInterrupt: print("interrupted\n") except Exception as err: perror("unexpected error: %s" % err) raise if __name__ == "__main__": main()
SILENT KILLER Tool