Current Path: > > opt > alt > python310 > > lib64 > python3.10
Operation : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64 Software : Apache Server IP : 162.0.232.56 | Your IP: 216.73.216.111 Domains : 1034 Domain(s) Permission : [ 0755 ]
Name | Type | Size | Last Modified | Actions |
---|---|---|---|---|
__pycache__ | Directory | - | - | |
asyncio | Directory | - | - | |
collections | Directory | - | - | |
concurrent | Directory | - | - | |
config-3.10-x86_64-linux-gnu | Directory | - | - | |
ctypes | Directory | - | - | |
curses | Directory | - | - | |
dbm | Directory | - | - | |
distutils | Directory | - | - | |
Directory | - | - | ||
encodings | Directory | - | - | |
ensurepip | Directory | - | - | |
html | Directory | - | - | |
http | Directory | - | - | |
importlib | Directory | - | - | |
json | Directory | - | - | |
lib-dynload | Directory | - | - | |
lib2to3 | Directory | - | - | |
logging | Directory | - | - | |
multiprocessing | Directory | - | - | |
pydoc_data | Directory | - | - | |
site-packages | Directory | - | - | |
sqlite3 | Directory | - | - | |
unittest | Directory | - | - | |
urllib | Directory | - | - | |
venv | Directory | - | - | |
wsgiref | Directory | - | - | |
xml | Directory | - | - | |
xmlrpc | Directory | - | - | |
zoneinfo | Directory | - | - | |
LICENSE.txt | File | 13936 bytes | June 03 2025 18:23:41. | |
__future__.py | File | 5155 bytes | June 03 2025 18:23:41. | |
__phello__.foo.py | File | 64 bytes | June 03 2025 18:23:41. | |
_aix_support.py | File | 3270 bytes | June 03 2025 18:23:41. | |
_bootsubprocess.py | File | 2675 bytes | June 03 2025 18:23:41. | |
_collections_abc.py | File | 32284 bytes | June 03 2025 18:23:41. | |
_compat_pickle.py | File | 8749 bytes | June 03 2025 18:23:41. | |
_compression.py | File | 5681 bytes | June 03 2025 18:23:41. | |
_markupbase.py | File | 14653 bytes | June 03 2025 18:23:41. | |
_osx_support.py | File | 21787 bytes | June 03 2025 18:23:41. | |
_py_abc.py | File | 6189 bytes | June 03 2025 18:23:41. | |
_pydecimal.py | File | 228676 bytes | June 03 2025 18:23:41. | |
_pyio.py | File | 94467 bytes | June 03 2025 18:23:41. | |
_sitebuiltins.py | File | 3128 bytes | June 03 2025 18:23:41. | |
_strptime.py | File | 25277 bytes | June 03 2025 18:23:41. | |
_sysconfigdata__linux_x86_64-linux-gnu.py | File | 41345 bytes | June 18 2025 15:53:46. | |
_sysconfigdata_d_linux_x86_64-linux-gnu.py | File | 40763 bytes | June 18 2025 15:42:57. | |
_threading_local.py | File | 7220 bytes | June 03 2025 18:23:41. | |
_weakrefset.py | File | 5923 bytes | June 03 2025 18:23:41. | |
abc.py | File | 6522 bytes | June 03 2025 18:23:41. | |
aifc.py | File | 32605 bytes | June 03 2025 18:23:41. | |
antigravity.py | File | 500 bytes | June 03 2025 18:23:41. | |
argparse.py | File | 98543 bytes | June 03 2025 18:23:41. | |
ast.py | File | 59900 bytes | June 03 2025 18:23:41. | |
asynchat.py | File | 11520 bytes | June 03 2025 18:23:41. | |
asyncore.py | File | 20268 bytes | June 03 2025 18:23:41. | |
base64.py | File | 20860 bytes | June 03 2025 18:23:41. | |
bdb.py | File | 32396 bytes | June 03 2025 18:23:41. | |
binhex.py | File | 14784 bytes | June 03 2025 18:23:41. | |
bisect.py | File | 3135 bytes | June 03 2025 18:23:41. | |
bz2.py | File | 11847 bytes | June 03 2025 18:23:41. | |
cProfile.py | File | 6360 bytes | June 03 2025 18:23:41. | |
calendar.py | File | 24575 bytes | June 03 2025 18:23:41. | |
cgi.py | File | 34111 bytes | June 03 2025 18:23:41. | |
cgitb.py | File | 12096 bytes | June 03 2025 18:23:41. | |
chunk.py | File | 5435 bytes | June 03 2025 18:23:41. | |
cmd.py | File | 14860 bytes | June 03 2025 18:23:41. | |
code.py | File | 10622 bytes | June 03 2025 18:23:41. | |
codecs.py | File | 36714 bytes | June 03 2025 18:23:41. | |
codeop.py | File | 5609 bytes | June 03 2025 18:23:41. | |
colorsys.py | File | 4017 bytes | June 03 2025 18:23:41. | |
compileall.py | File | 20252 bytes | June 03 2025 18:23:41. | |
configparser.py | File | 54612 bytes | June 03 2025 18:23:41. | |
contextlib.py | File | 25882 bytes | June 03 2025 18:23:41. | |
contextvars.py | File | 129 bytes | June 03 2025 18:23:41. | |
copy.py | File | 8681 bytes | June 03 2025 18:23:41. | |
copyreg.py | File | 7426 bytes | June 03 2025 18:23:41. | |
crypt.py | File | 3848 bytes | June 03 2025 18:23:41. | |
csv.py | File | 16030 bytes | June 03 2025 18:23:41. | |
dataclasses.py | File | 56390 bytes | June 03 2025 18:23:41. | |
datetime.py | File | 88086 bytes | June 03 2025 18:23:41. | |
decimal.py | File | 320 bytes | June 03 2025 18:23:41. | |
difflib.py | File | 83308 bytes | June 03 2025 18:23:41. | |
dis.py | File | 20020 bytes | June 03 2025 18:23:41. | |
doctest.py | File | 105143 bytes | June 03 2025 18:23:41. | |
enum.py | File | 39831 bytes | June 03 2025 18:23:41. | |
filecmp.py | File | 10178 bytes | June 03 2025 18:23:41. | |
fileinput.py | File | 16442 bytes | June 03 2025 18:23:41. | |
fnmatch.py | File | 6713 bytes | June 03 2025 18:23:41. | |
fractions.py | File | 28242 bytes | June 03 2025 18:23:41. | |
ftplib.py | File | 35496 bytes | June 03 2025 18:23:41. | |
functools.py | File | 38076 bytes | June 03 2025 18:23:41. | |
genericpath.py | File | 5246 bytes | June 03 2025 18:23:41. | |
getopt.py | File | 7489 bytes | June 03 2025 18:23:41. | |
getpass.py | File | 5990 bytes | June 03 2025 18:23:41. | |
gettext.py | File | 27266 bytes | June 03 2025 18:23:41. | |
glob.py | File | 7888 bytes | June 03 2025 18:23:41. | |
graphlib.py | File | 9573 bytes | June 03 2025 18:23:41. | |
gzip.py | File | 21849 bytes | June 03 2025 18:23:41. | |
hashlib.py | File | 10229 bytes | June 03 2025 18:23:41. | |
heapq.py | File | 22877 bytes | June 03 2025 18:23:41. | |
hmac.py | File | 7717 bytes | June 03 2025 18:23:41. | |
imaplib.py | File | 55218 bytes | June 03 2025 18:23:41. | |
imghdr.py | File | 3808 bytes | June 03 2025 18:23:41. | |
imp.py | File | 10591 bytes | June 03 2025 18:23:41. | |
inspect.py | File | 124378 bytes | June 03 2025 18:23:41. | |
io.py | File | 4196 bytes | June 03 2025 18:23:41. | |
ipaddress.py | File | 80837 bytes | June 03 2025 18:23:41. | |
keyword.py | File | 1061 bytes | June 03 2025 18:23:41. | |
linecache.py | File | 5690 bytes | June 03 2025 18:23:41. | |
locale.py | File | 78124 bytes | June 03 2025 18:23:41. | |
lzma.py | File | 13277 bytes | June 03 2025 18:23:41. | |
mailbox.py | File | 78794 bytes | June 03 2025 18:23:41. | |
mailcap.py | File | 9116 bytes | June 03 2025 18:23:41. | |
mimetypes.py | File | 22539 bytes | June 03 2025 18:23:41. | |
modulefinder.py | File | 24401 bytes | June 03 2025 18:23:41. | |
netrc.py | File | 5747 bytes | June 03 2025 18:23:41. | |
nntplib.py | File | 41023 bytes | June 03 2025 18:23:41. | |
ntpath.py | File | 29944 bytes | June 03 2025 18:23:41. | |
nturl2path.py | File | 2887 bytes | June 03 2025 18:23:41. | |
numbers.py | File | 10348 bytes | June 03 2025 18:23:41. | |
opcode.py | File | 5902 bytes | June 03 2025 18:23:41. | |
operator.py | File | 10751 bytes | June 03 2025 18:23:41. | |
optparse.py | File | 60369 bytes | June 03 2025 18:23:41. | |
os.py | File | 39557 bytes | June 03 2025 18:23:41. | |
pathlib.py | File | 49575 bytes | June 03 2025 18:23:41. | |
pdb.py | File | 63238 bytes | June 03 2025 18:23:41. | |
pickle.py | File | 64949 bytes | June 03 2025 18:23:41. | |
pickletools.py | File | 93486 bytes | June 03 2025 18:23:41. | |
pipes.py | File | 8914 bytes | June 03 2025 18:23:41. | |
pkgutil.py | File | 24576 bytes | June 03 2025 18:23:41. | |
platform.py | File | 42036 bytes | June 03 2025 18:23:41. | |
plistlib.py | File | 28352 bytes | June 03 2025 18:23:41. | |
poplib.py | File | 15198 bytes | June 03 2025 18:23:41. | |
posixpath.py | File | 16436 bytes | June 03 2025 18:23:41. | |
pprint.py | File | 24444 bytes | June 03 2025 18:23:41. | |
profile.py | File | 22896 bytes | June 03 2025 18:23:41. | |
pstats.py | File | 29326 bytes | June 03 2025 18:23:41. | |
pty.py | File | 5213 bytes | June 03 2025 18:23:41. | |
py_compile.py | File | 7892 bytes | June 18 2025 15:40:22. | |
pyclbr.py | File | 11396 bytes | June 03 2025 18:23:41. | |
pydoc.py | File | 109603 bytes | June 03 2025 18:23:41. | |
queue.py | File | 11496 bytes | June 03 2025 18:23:41. | |
quopri.py | File | 7281 bytes | June 03 2025 18:23:41. | |
random.py | File | 33221 bytes | June 03 2025 18:23:41. | |
re.py | File | 15860 bytes | June 03 2025 18:23:41. | |
reprlib.py | File | 5267 bytes | June 03 2025 18:23:41. | |
rlcompleter.py | File | 7817 bytes | June 03 2025 18:23:41. | |
runpy.py | File | 13111 bytes | June 03 2025 18:23:41. | |
sched.py | File | 6351 bytes | June 03 2025 18:23:41. | |
secrets.py | File | 2036 bytes | June 03 2025 18:23:41. | |
selectors.py | File | 19536 bytes | June 03 2025 18:23:41. | |
shelve.py | File | 8560 bytes | June 03 2025 18:23:41. | |
shlex.py | File | 13501 bytes | June 03 2025 18:23:41. | |
shutil.py | File | 54572 bytes | June 03 2025 18:23:41. | |
signal.py | File | 2438 bytes | June 03 2025 18:23:41. | |
site.py | File | 22926 bytes | June 03 2025 18:23:41. | |
smtpd.py | File | 35178 bytes | June 03 2025 18:23:41. | |
smtplib.py | File | 45431 bytes | June 03 2025 18:23:41. | |
sndhdr.py | File | 7099 bytes | June 03 2025 18:23:41. | |
socket.py | File | 37006 bytes | June 03 2025 18:23:41. | |
socketserver.py | File | 27296 bytes | June 03 2025 18:23:41. | |
sre_compile.py | File | 27973 bytes | June 03 2025 18:23:41. | |
sre_constants.py | File | 7177 bytes | June 03 2025 18:23:41. | |
sre_parse.py | File | 40779 bytes | June 03 2025 18:23:41. | |
ssl.py | File | 53895 bytes | June 03 2025 18:23:41. | |
stat.py | File | 5485 bytes | June 03 2025 18:23:41. | |
statistics.py | File | 43205 bytes | June 03 2025 18:23:41. | |
string.py | File | 10566 bytes | June 03 2025 18:23:41. | |
stringprep.py | File | 12917 bytes | June 03 2025 18:23:41. | |
struct.py | File | 257 bytes | June 03 2025 18:23:41. | |
subprocess.py | File | 84917 bytes | June 03 2025 18:23:41. | |
sunau.py | File | 18158 bytes | June 03 2025 18:23:41. | |
symtable.py | File | 10217 bytes | June 03 2025 18:23:41. | |
sysconfig.py | File | 27609 bytes | June 03 2025 18:23:41. | |
tabnanny.py | File | 11312 bytes | June 03 2025 18:23:41. | |
tarfile.py | File | 111609 bytes | June 03 2025 18:23:41. | |
telnetlib.py | File | 23254 bytes | June 03 2025 18:23:41. | |
tempfile.py | File | 29469 bytes | June 03 2025 18:23:41. | |
textwrap.py | File | 19772 bytes | June 03 2025 18:23:41. | |
this.py | File | 1003 bytes | June 03 2025 18:23:41. | |
threading.py | File | 56742 bytes | June 18 2025 15:40:22. | |
timeit.py | File | 13508 bytes | June 03 2025 18:23:41. | |
token.py | File | 2386 bytes | June 03 2025 18:23:41. | |
tokenize.py | File | 25921 bytes | June 03 2025 18:23:41. | |
trace.py | File | 29229 bytes | June 03 2025 18:23:41. | |
traceback.py | File | 26222 bytes | June 03 2025 18:23:41. | |
tracemalloc.py | File | 18047 bytes | June 03 2025 18:23:41. | |
tty.py | File | 879 bytes | June 03 2025 18:23:41. | |
types.py | File | 10117 bytes | June 03 2025 18:23:41. | |
typing.py | File | 92557 bytes | June 03 2025 18:23:41. | |
uu.py | File | 7277 bytes | June 18 2025 15:55:11. | |
uuid.py | File | 27500 bytes | June 03 2025 18:23:41. | |
warnings.py | File | 19688 bytes | June 03 2025 18:23:41. | |
wave.py | File | 18004 bytes | June 03 2025 18:23:41. | |
weakref.py | File | 21560 bytes | June 03 2025 18:23:41. | |
webbrowser.py | File | 24258 bytes | June 03 2025 18:23:41. | |
xdrlib.py | File | 5913 bytes | June 03 2025 18:23:41. | |
zipapp.py | File | 7535 bytes | June 03 2025 18:23:41. | |
zipfile.py | File | 90201 bytes | June 03 2025 18:23:41. | |
zipimport.py | File | 30891 bytes | June 03 2025 18:23:41. |
"""Tokenization help for Python programs. tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for determining source file encoding. It accepts a readline-like method which is called repeatedly to get the next line of input (or b"" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. """ __author__ = 'Ka-Ping Yee <ping@lfw.org>' __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' 'Michael Foord') from builtins import open as _builtin_open from codecs import lookup, BOM_UTF8 import collections import functools from io import TextIOWrapper import itertools as _itertools import re import sys from token import * from token import EXACT_TOKEN_TYPES cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII) blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII) import token __all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding", "untokenize", "TokenInfo"] del token class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type)) @property def exact_type(self): if self.type == OP and self.string in EXACT_TOKEN_TYPES: return EXACT_TOKEN_TYPES[self.string] else: return self.type def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' # Note: we use unicode matching for names ("\w") but ascii matching for # number literals. Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'\w+' Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' Binnumber = r'0[bB](?:_?[01])+' Octnumber = r'0[oO](?:_?[0-7])+' Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) Expfloat = r'[0-9](?:_?[0-9])*' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Return the empty string, plus all of the valid string prefixes. def _all_string_prefixes(): # The valid string prefixes. Only contain the lower case versions, # and don't contain any permutations (include 'fr', but not # 'rf'). The various permutations will be generated. _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr'] # if we add binary f-strings, add: ['fb', 'fbr'] result = {''} for prefix in _valid_string_prefixes: for t in _itertools.permutations(prefix): # create a list with upper and lower versions of each # character for u in _itertools.product(*[(c, c.upper()) for c in t]): result.add(''.join(u)) return result @functools.lru_cache def _compile(expr): return re.compile(expr, re.UNICODE) # Note that since _all_string_prefixes includes the empty string, # StringPrefix can be the empty string (making it optional). StringPrefix = group(*_all_string_prefixes()) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group(StringPrefix + "'''", StringPrefix + '"""') # Single-line ' or " string. String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Sorting in reverse order puts the long operators before their prefixes. # Otherwise if = came before ==, == would get recognized as two instances # of =. Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True))) Funny = group(r'\r?\n', Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) # For a given string prefix plus quotes, endpats maps it to a regex # to match the remainder of that string. _prefix can be empty, for # a normal single or triple quoted string (with no prefix). endpats = {} for _prefix in _all_string_prefixes(): endpats[_prefix + "'"] = Single endpats[_prefix + '"'] = Double endpats[_prefix + "'''"] = Single3 endpats[_prefix + '"""'] = Double3 # A set of all of the single and triple quoted string prefixes, # including the opening quotes. single_quoted = set() triple_quoted = set() for t in _all_string_prefixes(): for u in (t + '"', t + "'"): single_quoted.add(u) for u in (t + '"""', t + "'''"): triple_quoted.add(u) tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 self.encoding = None def add_whitespace(self, start): row, col = start if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) row_offset = row - self.prev_row if row_offset: self.tokens.append("\\\n" * row_offset) self.prev_col = 0 col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): it = iter(iterable) indents = [] startline = False for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break if tok_type == INDENT: indents.append(token) continue elif tok_type == DEDENT: indents.pop() self.prev_row, self.prev_col = end continue elif tok_type in (NEWLINE, NL): startline = True elif startline and indents: indent = indents[-1] if start[1] >= len(indent): self.tokens.append(indent) self.prev_col = len(indent) startline = False self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): indents = [] toks_append = self.tokens.append startline = token[0] in (NEWLINE, NL) prevstring = False for tok in _itertools.chain([token], iterable): toknum, tokval = tok[:2] if toknum == ENCODING: self.encoding = tokval continue if toknum in (NAME, NUMBER): tokval += ' ' # Insert a space between two consecutive strings if toknum == STRING: if prevstring: tokval = ' ' + tokval prevstring = True else: prevstring = False if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) def untokenize(iterable): """Transform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited input: # Output bytes will tokenize back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 """ ut = Untokenizer() out = ut.untokenize(iterable) if ut.encoding is not None: out = out.encode(ut.encoding) return out def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if encoding != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def open(filename): """Open a file in read only mode using the encoding detected by detect_encoding(). """ buffer = _builtin_open(filename, 'rb') try: encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True) text.mode = 'r' return text except: buffer.close() raise def tokenize(readline): """ The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternatively, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the physical line. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ encoding, consumed = detect_encoding(readline) empty = _itertools.repeat(b"") rl_gen = _itertools.chain(consumed, iter(readline, b""), empty) return _tokenize(rl_gen.__next__, encoding) def _tokenize(readline, encoding): lnum = parenlev = continued = 0 numchars = '0123456789' contstr, needcont = '', 0 contline = None indents = [0] if encoding is not None: if encoding == "utf-8-sig": # BOM will already have been stripped. encoding = "utf-8" yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') last_line = b'' line = b'' while True: # loop over lines in stream try: # We capture the value of the line variable here because # readline uses the empty string '' to signal end of input, # hence `line` itself will always be overwritten at the end # of this loop. last_line = line line = readline() except StopIteration: line = b'' if encoding is not None: line = line.decode(encoding) lnum += 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield TokenInfo(STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield TokenInfo(ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column += 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos += 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') yield TokenInfo(COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) pos += len(comment_token) yield TokenInfo(NL, line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = _compile(PseudoToken).match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end if start == end: continue token, initial = line[start:end], line[start] if (initial in numchars or # ordinary number (initial == '.' and token != '.' and token != '...')): yield TokenInfo(NUMBER, token, spos, epos, line) elif initial in '\r\n': if parenlev > 0: yield TokenInfo(NL, token, spos, epos, line) else: yield TokenInfo(NEWLINE, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield TokenInfo(COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = _compile(endpats[token]) endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield TokenInfo(STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break # Check up to the first 3 chars of the token to see if # they're in the single_quoted set. If so, they start # a string. # We're using the first 3, because we're looking for # "rb'" (for example) at the start of the token. If # we switch to longer prefixes, this needs to be # adjusted. # Note that initial == token[:1]. # Also note that single quote checking must come after # triple quote checking (above). elif (initial in single_quoted or token[:2] in single_quoted or token[:3] in single_quoted): if token[-1] == '\n': # continued string strstart = (lnum, start) # Again, using the first 3 chars of the # token. This is looking for the matching end # regex for the correct type of quote # character. So it's really looking for # endpats["'"] or endpats['"'], by trying to # skip string prefix characters, if any. endprog = _compile(endpats.get(initial) or endpats.get(token[1]) or endpats.get(token[2])) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield TokenInfo(STRING, token, spos, epos, line) elif initial.isidentifier(): # ordinary name yield TokenInfo(NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev += 1 elif initial in ')]}': parenlev -= 1 yield TokenInfo(OP, token, spos, epos, line) else: yield TokenInfo(ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos += 1 # Add an implicit NEWLINE if the input doesn't end in one if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"): yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '') for indent in indents[1:]: # pop remaining indent levels yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') def generate_tokens(readline): """Tokenize a source reading Python code as unicode strings. This has the same API as tokenize(), except that it expects the *readline* callable to return str objects instead of bytes. """ return _tokenize(readline, None) def main(): import argparse # Helper error handling routines def perror(message): sys.stderr.write(message) sys.stderr.write('\n') def error(message, filename=None, location=None): if location: args = (filename,) + location + (message,) perror("%s:%d:%d: error: %s" % args) elif filename: perror("%s: error: %s" % (filename, message)) else: perror("error: %s" % message) sys.exit(1) # Parse the arguments and options parser = argparse.ArgumentParser(prog='python -m tokenize') parser.add_argument(dest='filename', nargs='?', metavar='filename.py', help='the file to tokenize; defaults to stdin') parser.add_argument('-e', '--exact', dest='exact', action='store_true', help='display token names using the exact type') args = parser.parse_args() try: # Tokenize the input if args.filename: filename = args.filename with _builtin_open(filename, 'rb') as f: tokens = list(tokenize(f.readline)) else: filename = "<stdin>" tokens = _tokenize(sys.stdin.readline, None) # Output the tokenization for token in tokens: token_type = token.type if args.exact: token_type = token.exact_type token_range = "%d,%d-%d,%d:" % (token.start + token.end) print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) except IndentationError as err: line, column = err.args[1][1:3] error(err.args[0], filename, (line, column)) except TokenError as err: line, column = err.args[1] error(err.args[0], filename, (line, column)) except SyntaxError as err: error(err, filename) except OSError as err: error(err) except KeyboardInterrupt: print("interrupted\n") except Exception as err: perror("unexpected error: %s" % err) raise if __name__ == "__main__": main()
SILENT KILLER Tool