Current Path: > > opt > alt > python-internal > > lib64 > > python3.11
Operation : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64 Software : Apache Server IP : 162.0.232.56 | Your IP: 216.73.216.111 Domains : 1034 Domain(s) Permission : [ 0755 ]
Name | Type | Size | Last Modified | Actions |
---|---|---|---|---|
__pycache__ | Directory | - | - | |
asyncio | Directory | - | - | |
collections | Directory | - | - | |
concurrent | Directory | - | - | |
config-3.11-x86_64-linux-gnu | Directory | - | - | |
ctypes | Directory | - | - | |
curses | Directory | - | - | |
dbm | Directory | - | - | |
distutils | Directory | - | - | |
Directory | - | - | ||
encodings | Directory | - | - | |
ensurepip | Directory | - | - | |
html | Directory | - | - | |
http | Directory | - | - | |
importlib | Directory | - | - | |
json | Directory | - | - | |
lib-dynload | Directory | - | - | |
lib2to3 | Directory | - | - | |
logging | Directory | - | - | |
multiprocessing | Directory | - | - | |
pydoc_data | Directory | - | - | |
re | Directory | - | - | |
site-packages | Directory | - | - | |
sqlite3 | Directory | - | - | |
tomllib | Directory | - | - | |
unittest | Directory | - | - | |
urllib | Directory | - | - | |
venv | Directory | - | - | |
wsgiref | Directory | - | - | |
xml | Directory | - | - | |
xmlrpc | Directory | - | - | |
zoneinfo | Directory | - | - | |
LICENSE.txt | File | 13936 bytes | June 03 2025 18:38:25. | |
__future__.py | File | 5218 bytes | June 03 2025 18:38:25. | |
__hello__.py | File | 227 bytes | June 03 2025 18:38:25. | |
_aix_support.py | File | 3389 bytes | June 03 2025 18:38:25. | |
_bootsubprocess.py | File | 2675 bytes | June 03 2025 18:38:25. | |
_collections_abc.py | File | 30193 bytes | June 03 2025 18:38:25. | |
_compat_pickle.py | File | 8761 bytes | June 03 2025 18:38:25. | |
_compression.py | File | 5681 bytes | June 03 2025 18:38:25. | |
_markupbase.py | File | 14653 bytes | June 03 2025 18:38:25. | |
_osx_support.py | File | 22023 bytes | June 03 2025 18:38:25. | |
_py_abc.py | File | 6189 bytes | June 03 2025 18:38:25. | |
_pydecimal.py | File | 229202 bytes | June 03 2025 18:38:25. | |
_pyio.py | File | 94193 bytes | June 03 2025 18:38:25. | |
_sitebuiltins.py | File | 3128 bytes | June 03 2025 18:38:25. | |
_strptime.py | File | 25175 bytes | June 03 2025 18:38:25. | |
_sysconfigdata__linux_x86_64-linux-gnu.py | File | 59345 bytes | July 23 2025 10:03:07. | |
_sysconfigdata_d_linux_x86_64-linux-gnu.py | File | 58569 bytes | July 23 2025 09:46:47. | |
_threading_local.py | File | 7220 bytes | June 03 2025 18:38:25. | |
_weakrefset.py | File | 5893 bytes | June 03 2025 18:38:25. | |
abc.py | File | 6538 bytes | June 03 2025 18:38:25. | |
aifc.py | File | 34211 bytes | June 03 2025 18:38:25. | |
antigravity.py | File | 500 bytes | June 03 2025 18:38:25. | |
argparse.py | File | 100283 bytes | June 03 2025 18:38:25. | |
ast.py | File | 61444 bytes | June 03 2025 18:38:25. | |
asynchat.py | File | 11570 bytes | June 03 2025 18:38:25. | |
asyncore.py | File | 20310 bytes | June 03 2025 18:38:25. | |
base64.py | File | 21047 bytes | June 03 2025 18:38:25. | |
bdb.py | File | 32463 bytes | June 03 2025 18:38:25. | |
bisect.py | File | 3135 bytes | June 03 2025 18:38:25. | |
bz2.py | File | 11847 bytes | June 03 2025 18:38:25. | |
cProfile.py | File | 6365 bytes | June 03 2025 18:38:25. | |
calendar.py | File | 24731 bytes | June 03 2025 18:38:25. | |
cgi.py | File | 34438 bytes | June 03 2025 18:38:25. | |
cgitb.py | File | 12421 bytes | June 03 2025 18:38:25. | |
chunk.py | File | 5500 bytes | June 03 2025 18:38:25. | |
cmd.py | File | 14873 bytes | June 03 2025 18:38:25. | |
code.py | File | 10622 bytes | June 03 2025 18:38:25. | |
codecs.py | File | 37150 bytes | June 03 2025 18:38:25. | |
codeop.py | File | 5907 bytes | June 03 2025 18:38:25. | |
colorsys.py | File | 4062 bytes | June 03 2025 18:38:25. | |
compileall.py | File | 20252 bytes | June 03 2025 18:38:25. | |
configparser.py | File | 55660 bytes | June 03 2025 18:38:25. | |
contextlib.py | File | 27414 bytes | June 03 2025 18:38:25. | |
contextvars.py | File | 129 bytes | June 03 2025 18:38:25. | |
copy.py | File | 8681 bytes | June 03 2025 18:38:25. | |
copyreg.py | File | 7677 bytes | June 03 2025 18:38:25. | |
crypt.py | File | 3913 bytes | June 03 2025 18:38:25. | |
csv.py | File | 16030 bytes | June 03 2025 18:38:25. | |
dataclasses.py | File | 58472 bytes | June 03 2025 18:38:25. | |
datetime.py | File | 91832 bytes | June 03 2025 18:38:25. | |
decimal.py | File | 320 bytes | June 03 2025 18:38:25. | |
difflib.py | File | 83308 bytes | June 03 2025 18:38:25. | |
dis.py | File | 28906 bytes | June 03 2025 18:38:25. | |
doctest.py | File | 106297 bytes | June 03 2025 18:38:25. | |
enum.py | File | 79583 bytes | June 03 2025 18:38:25. | |
filecmp.py | File | 10178 bytes | June 03 2025 18:38:25. | |
fileinput.py | File | 15714 bytes | June 03 2025 18:38:25. | |
fnmatch.py | File | 5999 bytes | June 03 2025 18:38:25. | |
fractions.py | File | 28677 bytes | June 03 2025 18:38:25. | |
ftplib.py | File | 35815 bytes | June 03 2025 18:38:25. | |
functools.py | File | 38413 bytes | June 03 2025 18:38:25. | |
genericpath.py | File | 5246 bytes | June 03 2025 18:38:25. | |
getopt.py | File | 7489 bytes | June 03 2025 18:38:25. | |
getpass.py | File | 5990 bytes | June 03 2025 18:38:25. | |
gettext.py | File | 21320 bytes | June 03 2025 18:38:25. | |
glob.py | File | 8732 bytes | June 03 2025 18:38:25. | |
graphlib.py | File | 9656 bytes | June 03 2025 18:38:25. | |
gzip.py | File | 24074 bytes | June 03 2025 18:38:25. | |
hashlib.py | File | 11765 bytes | June 03 2025 18:38:25. | |
heapq.py | File | 23024 bytes | June 03 2025 18:38:25. | |
hmac.py | File | 7716 bytes | June 03 2025 18:38:25. | |
imaplib.py | File | 55217 bytes | June 03 2025 18:38:25. | |
imghdr.py | File | 3952 bytes | June 03 2025 18:38:25. | |
imp.py | File | 10606 bytes | June 03 2025 18:38:25. | |
inspect.py | File | 123419 bytes | June 03 2025 18:38:25. | |
io.py | File | 4320 bytes | June 03 2025 18:38:25. | |
ipaddress.py | File | 81414 bytes | June 03 2025 18:38:25. | |
keyword.py | File | 1061 bytes | June 03 2025 18:38:25. | |
linecache.py | File | 5649 bytes | June 03 2025 18:38:25. | |
locale.py | File | 79095 bytes | June 03 2025 18:38:25. | |
lzma.py | File | 13277 bytes | June 03 2025 18:38:25. | |
mailbox.py | File | 78830 bytes | June 03 2025 18:38:25. | |
mailcap.py | File | 9369 bytes | June 03 2025 18:38:25. | |
mimetypes.py | File | 22962 bytes | June 03 2025 18:38:25. | |
modulefinder.py | File | 23699 bytes | June 03 2025 18:38:25. | |
netrc.py | File | 6929 bytes | June 03 2025 18:38:25. | |
nntplib.py | File | 41087 bytes | June 03 2025 18:38:25. | |
ntpath.py | File | 30686 bytes | June 03 2025 18:38:25. | |
nturl2path.py | File | 2887 bytes | June 03 2025 18:38:25. | |
numbers.py | File | 10348 bytes | June 03 2025 18:38:25. | |
opcode.py | File | 10447 bytes | June 03 2025 18:38:25. | |
operator.py | File | 10965 bytes | June 03 2025 18:38:25. | |
optparse.py | File | 60369 bytes | June 03 2025 18:38:25. | |
os.py | File | 39530 bytes | June 03 2025 18:38:25. | |
pathlib.py | File | 48566 bytes | June 03 2025 18:38:25. | |
pdb.py | File | 64192 bytes | June 03 2025 18:38:25. | |
pickle.py | File | 65132 bytes | June 03 2025 18:38:25. | |
pickletools.py | File | 93861 bytes | June 03 2025 18:38:25. | |
pipes.py | File | 8978 bytes | June 03 2025 18:38:25. | |
pkgutil.py | File | 24638 bytes | June 03 2025 18:38:25. | |
platform.py | File | 42293 bytes | June 03 2025 18:38:25. | |
plistlib.py | File | 28354 bytes | June 03 2025 18:38:25. | |
poplib.py | File | 15198 bytes | June 03 2025 18:38:25. | |
posixpath.py | File | 17199 bytes | June 03 2025 18:38:25. | |
pprint.py | File | 24583 bytes | June 03 2025 18:38:25. | |
profile.py | File | 22902 bytes | June 03 2025 18:38:25. | |
pstats.py | File | 29356 bytes | June 03 2025 18:38:25. | |
pty.py | File | 6317 bytes | June 03 2025 18:38:25. | |
py_compile.py | File | 7837 bytes | June 03 2025 18:38:25. | |
pyclbr.py | File | 11396 bytes | June 03 2025 18:38:25. | |
pydoc.py | File | 112670 bytes | June 03 2025 18:38:25. | |
queue.py | File | 11496 bytes | June 03 2025 18:38:25. | |
quopri.py | File | 7287 bytes | June 03 2025 18:38:25. | |
random.py | File | 32162 bytes | June 03 2025 18:38:25. | |
reprlib.py | File | 5437 bytes | June 03 2025 18:38:25. | |
rlcompleter.py | File | 7827 bytes | June 03 2025 18:38:25. | |
runpy.py | File | 13159 bytes | June 03 2025 18:38:25. | |
sched.py | File | 6351 bytes | June 03 2025 18:38:25. | |
secrets.py | File | 2028 bytes | June 03 2025 18:38:25. | |
selectors.py | File | 19671 bytes | June 03 2025 18:38:25. | |
shelve.py | File | 8560 bytes | June 03 2025 18:38:25. | |
shlex.py | File | 13501 bytes | June 03 2025 18:38:25. | |
shutil.py | File | 56517 bytes | June 03 2025 18:38:25. | |
signal.py | File | 2495 bytes | June 03 2025 18:38:25. | |
site.py | File | 22987 bytes | June 03 2025 18:38:25. | |
smtpd.py | File | 31181 bytes | June 03 2025 18:38:25. | |
smtplib.py | File | 45437 bytes | June 03 2025 18:38:25. | |
sndhdr.py | File | 7448 bytes | June 03 2025 18:38:25. | |
socket.py | File | 37557 bytes | June 03 2025 18:38:25. | |
socketserver.py | File | 27586 bytes | June 03 2025 18:38:25. | |
sre_compile.py | File | 231 bytes | June 03 2025 18:38:25. | |
sre_constants.py | File | 232 bytes | June 03 2025 18:38:25. | |
sre_parse.py | File | 229 bytes | June 03 2025 18:38:25. | |
ssl.py | File | 54305 bytes | June 03 2025 18:38:25. | |
stat.py | File | 5485 bytes | June 03 2025 18:38:25. | |
statistics.py | File | 47705 bytes | June 03 2025 18:38:25. | |
string.py | File | 11786 bytes | June 03 2025 18:38:25. | |
stringprep.py | File | 12917 bytes | June 03 2025 18:38:25. | |
struct.py | File | 257 bytes | June 03 2025 18:38:25. | |
subprocess.py | File | 88725 bytes | June 03 2025 18:38:25. | |
sunau.py | File | 18480 bytes | June 03 2025 18:38:25. | |
symtable.py | File | 10368 bytes | June 03 2025 18:38:25. | |
sysconfig.py | File | 30315 bytes | June 03 2025 18:38:25. | |
tabnanny.py | File | 11318 bytes | June 03 2025 18:38:25. | |
tarfile.py | File | 111838 bytes | June 03 2025 18:38:25. | |
telnetlib.py | File | 23301 bytes | June 03 2025 18:38:25. | |
tempfile.py | File | 31873 bytes | June 03 2025 18:38:25. | |
textwrap.py | File | 19718 bytes | June 03 2025 18:38:25. | |
this.py | File | 1003 bytes | June 03 2025 18:38:25. | |
threading.py | File | 58231 bytes | June 03 2025 18:38:25. | |
timeit.py | File | 13538 bytes | June 03 2025 18:38:25. | |
token.py | File | 2386 bytes | June 03 2025 18:38:25. | |
tokenize.py | File | 26336 bytes | June 03 2025 18:38:25. | |
trace.py | File | 29202 bytes | June 03 2025 18:38:25. | |
traceback.py | File | 40547 bytes | June 03 2025 18:38:25. | |
tracemalloc.py | File | 18047 bytes | June 03 2025 18:38:25. | |
tty.py | File | 879 bytes | June 03 2025 18:38:25. | |
types.py | File | 10067 bytes | June 03 2025 18:38:25. | |
typing.py | File | 120951 bytes | June 03 2025 18:38:25. | |
uu.py | File | 7341 bytes | July 23 2025 10:03:41. | |
uuid.py | File | 27597 bytes | June 03 2025 18:38:25. | |
warnings.py | File | 21110 bytes | June 03 2025 18:38:25. | |
wave.py | File | 21818 bytes | June 03 2025 18:38:25. | |
weakref.py | File | 21513 bytes | June 03 2025 18:38:25. | |
webbrowser.py | File | 25155 bytes | June 03 2025 18:38:25. | |
xdrlib.py | File | 5977 bytes | June 03 2025 18:38:25. | |
zipapp.py | File | 7535 bytes | June 03 2025 18:38:25. | |
zipfile.py | File | 93788 bytes | June 03 2025 18:38:25. | |
zipimport.py | File | 30897 bytes | June 03 2025 18:38:25. |
"""Tokenization help for Python programs. tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for determining source file encoding. It accepts a readline-like method which is called repeatedly to get the next line of input (or b"" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. """ __author__ = 'Ka-Ping Yee <ping@lfw.org>' __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' 'Michael Foord') from builtins import open as _builtin_open from codecs import lookup, BOM_UTF8 import collections import functools from io import TextIOWrapper import itertools as _itertools import re import sys from token import * from token import EXACT_TOKEN_TYPES cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) import token __all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", "untokenize", "TokenInfo"] del token class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type)) @property def exact_type(self): if self.type == OP and self.string in EXACT_TOKEN_TYPES: return EXACT_TOKEN_TYPES[self.string] else: return self.type def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'\w+' Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' Binnumber = r'0[bB](?:_?[01])+' Octnumber = r'0[oO](?:_?[0-7])+' Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) Expfloat = r'[0-9](?:_?[0-9])*' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Return the empty string, plus all of the valid string prefixes. def _all_string_prefixes(): # The valid string prefixes. Only contain the lower case versions, # and don't contain any permutations (include 'fr', but not # 'rf'). The various permutations will be generated. _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr'] # if we add binary f-strings, add: ['fb', 'fbr'] result = {''} for prefix in _valid_string_prefixes: for t in _itertools.permutations(prefix): # create a list with upper and lower versions of each # character for u in _itertools.product(*[(c, c.upper()) for c in t]): result.add(''.join(u)) return result @functools.lru_cache def _compile(expr): return re.compile(expr, re.UNICODE) # Note that since _all_string_prefixes includes the empty string, # StringPrefix can be the empty string (making it optional). StringPrefix = group(*_all_string_prefixes()) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group(StringPrefix + "'''", StringPrefix + '"""') # Single-line ' or " string. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Sorting in reverse order puts the long operators before their prefixes. # Otherwise if = came before ==, == would get recognized as two instances # of =. Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) Funny = group(r'\r?\n', Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) # For a given string prefix plus quotes, endpats maps it to a regex # to match the remainder of that string. _prefix can be empty, for # a normal single or triple quoted string (with no prefix). endpats = {} for _prefix in _all_string_prefixes(): endpats[_prefix + "'"] = Single endpats[_prefix + '"'] = Double endpats[_prefix + "'''"] = Single3 endpats[_prefix + '"""'] = Double3 del _prefix # A set of all of the single and triple quoted string prefixes, # including the opening quotes. single_quoted = set() triple_quoted = set() for t in _all_string_prefixes(): for u in (t + '"', t + "'"): single_quoted.add(u) for u in (t + '"""', t + "'''"): triple_quoted.add(u) del t, u tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 self.encoding = None def add_whitespace(self, start): row, col = start if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) row_offset = row - self.prev_row if row_offset: self.tokens.append("\\\n" * row_offset) self.prev_col = 0 col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): it = iter(iterable) indents = [] startline = False for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break if tok_type == INDENT: indents.append(token) continue elif tok_type == DEDENT: indents.pop() self.prev_row, self.prev_col = end continue elif tok_type in (NEWLINE, NL): startline = True elif startline and indents: indent = indents[-1] if start[1] >= len(indent): self.tokens.append(indent) self.prev_col = len(indent) startline = False self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): indents = [] toks_append = self.tokens.append startline = token[0] in (NEWLINE, NL) prevstring = False for tok in _itertools.chain([token], iterable): toknum, tokval = tok[:2] if toknum == ENCODING: self.encoding = tokval continue if toknum in (NAME, NUMBER): tokval += ' ' # Insert a space between two consecutive strings if toknum == STRING: if prevstring: tokval = ' ' + tokval prevstring = True else: prevstring = False if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) def untokenize(iterable): """Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited input: # Output bytes will tokenize back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 """ ut = Untokenizer() out = ut.untokenize(iterable) if ut.encoding is not None: out = out.encode(ut.encoding) return out def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if encoding != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def open(filename): """Open a file in read only mode using the encoding detected by detect_encoding(). """ buffer = _builtin_open(filename, 'rb') try: encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' return text except: buffer.close() raise def tokenize(readline): """ The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternatively, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the physical line. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ encoding, consumed = detect_encoding(readline) empty = _itertools.repeat(b"") rl_gen = _itertools.chain(consumed, iter(readline, b""), empty) return _tokenize(rl_gen.__next__, encoding) def _tokenize(readline, encoding): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] if encoding is not None: if encoding == "utf-8-sig": # BOM will already have been stripped. encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') last_line = b'' line = b'' while True: # loop over lines in stream try: # We capture the value of the line variable here because # readline uses the empty string '' to signal end of input, # hence `line` itself will always be overwritten at the end # of this loop. last_line = line line = readline() except StopIteration: line = b'' if encoding is not None: line = line.decode(encoding) lnum += 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield TokenInfo(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column += 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos += 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') yield TokenInfo(COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) pos += len(comment_token) yield TokenInfo(NL, line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = _compile(PseudoToken).match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end if start == end: continue token, initial = line[start:end], line[start] if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos, line) elif initial in '\r\n': if parenlev > 0: yield TokenInfo(NL, token, spos, epos, line) else: yield TokenInfo(NEWLINE, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield TokenInfo(COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = _compile(endpats[token]) endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield TokenInfo(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break # Check up to the first 3 chars of the token to see if # they're in the single_quoted set. If so, they start # a string. # We're using the first 3, because we're looking for # "rb'" (for example) at the start of the token. If # we switch to longer prefixes, this needs to be # adjusted. # Note that initial == token[:1]. # Also note that single quote checking must come after # triple quote checking (above). elif (initial in single_quoted or token[:2] in single_quoted or token[:3] in single_quoted): if token[-1] == '\n': # continued string strstart = (lnum, start) # Again, using the first 3 chars of the # token. This is looking for the matching end # regex for the correct type of quote # character. So it's really looking for # endpats["'"] or endpats['"'], by trying to # skip string prefix characters, if any. endprog = _compile(endpats.get(initial) or endpats.get(token[1]) or endpats.get(token[2])) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) elif initial.isidentifier(): # ordinary name yield TokenInfo(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev += 1 elif initial in ')]}': parenlev -= 1 yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos += 1 # Add an implicit NEWLINE if the input doesn't end in one if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"): yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '') for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') def generate_tokens(readline): """Tokenize a source reading Python code as unicode strings. This has the same API as tokenize(), except that it expects the *readline* callable to return str objects instead of bytes. """ return _tokenize(readline, None) def main(): import argparse # Helper error handling routines def perror(message): sys.stderr.write(message) sys.stderr.write('\n') def error(message, filename=None, location=None): if location: args = (filename,) + location + (message,) perror("%s:%d:%d: error: %s" % args) elif filename: perror("%s: error: %s" % (filename, message)) else: perror("error: %s" % message) sys.exit(1) # Parse the arguments and options parser = argparse.ArgumentParser(prog='python -m tokenize') parser.add_argument(dest='filename', nargs='?', metavar='filename.py', help='the file to tokenize; defaults to stdin') parser.add_argument('-e', '--exact', dest='exact', action='store_true', help='display token names using the exact type') args = parser.parse_args() try: # Tokenize the input if args.filename: filename = args.filename with _builtin_open(filename, 'rb') as f: tokens = list(tokenize(f.readline)) else: filename = "<stdin>" tokens = _tokenize(sys.stdin.readline, None) # Output the tokenization for token in tokens: token_type = token.type if args.exact: token_type = token.exact_type token_range = "%d,%d-%d,%d:" % (token.start + token.end) print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) except IndentationError as err: line, column = err.args[1][1:3] error(err.args[0], filename, (line, column)) except TokenError as err: line, column = err.args[1] error(err.args[0], filename, (line, column)) except SyntaxError as err: error(err, filename) except OSError as err: error(err) except KeyboardInterrupt: print("interrupted\n") except Exception as err: perror("unexpected error: %s" % err) raise def _generate_tokens_from_c_tokenizer(source): """Tokenize a source reading Python code as unicode strings using the internal C tokenizer""" import _tokenize as c_tokenizer for info in c_tokenizer.TokenizerIter(source): tok, type, lineno, end_lineno, col_off, end_col_off, line = info yield TokenInfo(type, tok, (lineno, col_off), (end_lineno, end_col_off), line) if __name__ == "__main__": main()
SILENT KILLER Tool