SILENT KILLERPanel

Current Path: > > opt > alt > python33 > lib64 > python3.3


Operation   : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64
Software     : Apache
Server IP    : 162.0.232.56 | Your IP: 216.73.216.111
Domains      : 1034 Domain(s)
Permission   : [ 0755 ]

Files and Folders in: //opt/alt/python33/lib64/python3.3

NameTypeSizeLast ModifiedActions
__pycache__ Directory - -
collections Directory - -
concurrent Directory - -
config-3.3m Directory - -
ctypes Directory - -
curses Directory - -
dbm Directory - -
distutils Directory - -
email Directory - -
encodings Directory - -
html Directory - -
http Directory - -
idlelib Directory - -
importlib Directory - -
json Directory - -
lib-dynload Directory - -
lib2to3 Directory - -
logging Directory - -
multiprocessing Directory - -
plat-linux Directory - -
pydoc_data Directory - -
site-packages Directory - -
sqlite3 Directory - -
test Directory - -
unittest Directory - -
urllib Directory - -
venv Directory - -
wsgiref Directory - -
xml Directory - -
xmlrpc Directory - -
__future__.py File 4584 bytes April 17 2024 16:58:21.
__phello__.foo.py File 64 bytes April 17 2024 16:58:20.
_compat_pickle.py File 4338 bytes April 17 2024 16:58:19.
_dummy_thread.py File 4769 bytes April 17 2024 16:58:20.
_markupbase.py File 14598 bytes April 17 2024 16:58:15.
_osx_support.py File 18855 bytes April 17 2024 16:58:20.
_pyio.py File 72905 bytes April 17 2024 16:58:17.
_strptime.py File 21674 bytes April 17 2024 16:58:20.
_sysconfigdata.py File 22842 bytes April 17 2024 16:58:20.
_threading_local.py File 7410 bytes April 17 2024 16:58:15.
_weakrefset.py File 5705 bytes April 17 2024 16:58:14.
abc.py File 8057 bytes April 17 2024 16:58:15.
aifc.py File 31054 bytes April 17 2024 16:58:21.
antigravity.py File 475 bytes April 17 2024 16:58:16.
argparse.py File 89069 bytes April 17 2024 16:58:20.
ast.py File 12142 bytes April 17 2024 16:58:20.
asynchat.py File 11588 bytes April 17 2024 16:58:19.
asyncore.py File 20753 bytes April 17 2024 16:58:21.
base64.py File 13986 bytes April 17 2024 16:58:17.
bdb.py File 21894 bytes April 17 2024 16:58:19.
binhex.py File 13708 bytes April 17 2024 16:58:14.
bisect.py File 2595 bytes April 17 2024 16:58:13.
bz2.py File 18473 bytes April 17 2024 16:58:20.
cProfile.py File 6361 bytes April 17 2024 16:58:14.
calendar.py File 22940 bytes April 17 2024 16:58:20.
cgi.py File 35554 bytes April 17 2024 16:58:20.
cgitb.py File 12041 bytes April 17 2024 16:58:21.
chunk.py File 5377 bytes April 17 2024 16:58:17.
cmd.py File 14860 bytes April 17 2024 16:58:14.
code.py File 10030 bytes April 17 2024 16:58:16.
codecs.py File 35956 bytes April 17 2024 16:58:15.
codeop.py File 5994 bytes April 17 2024 16:58:14.
colorsys.py File 3691 bytes April 17 2024 16:58:15.
compileall.py File 9743 bytes April 17 2024 16:58:14.
configparser.py File 49437 bytes April 17 2024 16:58:15.
contextlib.py File 9125 bytes April 17 2024 16:58:14.
copy.py File 8991 bytes April 17 2024 16:58:15.
copyreg.py File 6611 bytes April 17 2024 16:58:20.
crypt.py File 1879 bytes April 17 2024 16:58:14.
csv.py File 16185 bytes April 17 2024 16:58:15.
datetime.py File 74954 bytes April 17 2024 16:58:21.
decimal.py File 228558 bytes April 17 2024 16:58:19.
difflib.py File 82519 bytes April 17 2024 16:58:17.
dis.py File 10134 bytes April 17 2024 16:58:15.
doctest.py File 102933 bytes April 17 2024 16:58:15.
dummy_threading.py File 2815 bytes April 17 2024 16:58:14.
filecmp.py File 9597 bytes April 17 2024 16:58:15.
fileinput.py File 14256 bytes April 17 2024 16:58:17.
fnmatch.py File 3163 bytes April 17 2024 16:58:15.
formatter.py File 14930 bytes April 17 2024 16:58:15.
fractions.py File 23033 bytes April 17 2024 16:58:14.
ftplib.py File 40253 bytes April 17 2024 16:58:15.
functools.py File 13596 bytes April 17 2024 16:58:21.
genericpath.py File 3093 bytes April 17 2024 16:58:21.
getopt.py File 7488 bytes April 17 2024 16:58:20.
getpass.py File 5793 bytes April 17 2024 16:58:14.
gettext.py File 20637 bytes April 17 2024 16:58:20.
glob.py File 2838 bytes April 17 2024 16:58:14.
gzip.py File 24403 bytes April 17 2024 16:58:20.
hashlib.py File 6193 bytes April 17 2024 16:58:21.
heapq.py File 17997 bytes April 17 2024 16:58:13.
hmac.py File 4440 bytes April 17 2024 16:58:17.
imaplib.py File 50111 bytes April 17 2024 16:58:20.
imghdr.py File 3528 bytes April 17 2024 16:58:20.
imp.py File 9727 bytes April 17 2024 16:58:15.
inspect.py File 78960 bytes April 17 2024 16:58:19.
io.py File 3280 bytes April 17 2024 16:58:15.
ipaddress.py File 70303 bytes April 17 2024 16:58:20.
keyword.py File 2060 bytes April 17 2024 16:58:20.
linecache.py File 3864 bytes April 17 2024 16:58:16.
locale.py File 93215 bytes April 17 2024 16:58:19.
lzma.py File 17454 bytes April 17 2024 16:58:20.
macpath.py File 5617 bytes April 17 2024 16:58:15.
macurl2path.py File 2732 bytes April 17 2024 16:58:15.
mailbox.py File 79093 bytes April 17 2024 16:58:19.
mailcap.py File 7437 bytes April 17 2024 16:58:14.
mimetypes.py File 20735 bytes April 17 2024 16:58:19.
modulefinder.py File 23198 bytes April 17 2024 16:58:14.
netrc.py File 5747 bytes April 17 2024 16:58:17.
nntplib.py File 42786 bytes April 17 2024 16:58:13.
ntpath.py File 20437 bytes April 17 2024 16:58:14.
nturl2path.py File 2396 bytes April 17 2024 16:58:20.
numbers.py File 10398 bytes April 17 2024 16:58:20.
opcode.py File 5098 bytes April 17 2024 16:58:21.
optparse.py File 60346 bytes April 17 2024 16:58:20.
os.py File 34779 bytes April 17 2024 16:58:14.
os2emxpath.py File 4659 bytes April 17 2024 16:58:15.
pdb.py File 60653 bytes April 17 2024 16:58:16.
pickle.py File 47858 bytes April 17 2024 16:58:17.
pickletools.py File 81349 bytes April 17 2024 16:58:15.
pipes.py File 8916 bytes April 17 2024 16:58:20.
pkgutil.py File 21539 bytes April 17 2024 16:58:15.
platform.py File 50742 bytes April 17 2024 16:58:15.
plistlib.py File 14777 bytes April 17 2024 16:58:13.
poplib.py File 11372 bytes April 17 2024 16:58:13.
posixpath.py File 14254 bytes April 17 2024 16:58:16.
pprint.py File 12700 bytes April 17 2024 16:58:15.
profile.py File 21448 bytes April 17 2024 16:58:17.
pstats.py File 26372 bytes April 17 2024 16:58:14.
pty.py File 5055 bytes April 17 2024 16:58:14.
py_compile.py File 6717 bytes April 17 2024 16:58:19.
pyclbr.py File 13438 bytes April 17 2024 16:58:13.
pydoc.py File 101644 bytes April 17 2024 16:58:15.
queue.py File 8835 bytes April 17 2024 16:58:20.
quopri.py File 7315 bytes April 17 2024 16:58:20.
random.py File 25660 bytes April 17 2024 16:58:14.
re.py File 14973 bytes April 17 2024 16:58:17.
reprlib.py File 5110 bytes April 17 2024 16:58:15.
rlcompleter.py File 5526 bytes April 17 2024 16:58:21.
runpy.py File 10413 bytes April 17 2024 16:58:14.
sched.py File 6399 bytes April 17 2024 16:58:19.
shelve.py File 8243 bytes April 17 2024 16:58:20.
shlex.py File 11502 bytes April 17 2024 16:58:21.
shutil.py File 39147 bytes April 17 2024 16:58:20.
site.py File 21971 bytes April 17 2024 16:58:19.
smtpd.py File 30207 bytes April 17 2024 16:58:16.
smtplib.py File 38021 bytes April 17 2024 16:58:14.
sndhdr.py File 6219 bytes April 17 2024 16:58:20.
socket.py File 14913 bytes April 17 2024 16:58:20.
socketserver.py File 24196 bytes April 17 2024 16:58:21.
sre_compile.py File 16345 bytes April 17 2024 16:58:14.
sre_constants.py File 7231 bytes April 17 2024 16:58:14.
sre_parse.py File 30212 bytes April 17 2024 16:58:16.
ssl.py File 24478 bytes April 17 2024 16:58:19.
stat.py File 4304 bytes April 17 2024 16:58:19.
string.py File 9410 bytes April 17 2024 16:58:20.
stringprep.py File 12917 bytes April 17 2024 16:58:17.
struct.py File 238 bytes April 17 2024 16:58:16.
subprocess.py File 67578 bytes April 17 2024 16:58:14.
sunau.py File 17523 bytes April 17 2024 16:58:15.
symbol.py File 2051 bytes April 17 2024 16:58:14.
symtable.py File 7383 bytes April 17 2024 16:58:20.
sysconfig.py File 25174 bytes April 17 2024 16:58:20.
tabnanny.py File 11410 bytes April 17 2024 16:58:20.
tarfile.py File 88864 bytes April 17 2024 16:58:15.
telnetlib.py File 27349 bytes April 17 2024 16:58:15.
tempfile.py File 23013 bytes April 17 2024 16:58:14.
textwrap.py File 16488 bytes April 17 2024 16:58:14.
this.py File 1003 bytes April 17 2024 16:58:17.
threading.py File 45641 bytes April 17 2024 16:58:19.
timeit.py File 12395 bytes April 17 2024 16:58:15.
token.py File 3034 bytes April 17 2024 16:58:14.
tokenize.py File 24876 bytes April 17 2024 16:58:20.
trace.py File 31487 bytes April 17 2024 16:58:13.
traceback.py File 11982 bytes April 17 2024 16:58:20.
tty.py File 879 bytes April 17 2024 16:58:13.
types.py File 3167 bytes April 17 2024 16:58:14.
uu.py File 6766 bytes April 17 2024 16:58:14.
uuid.py File 22349 bytes April 17 2024 16:58:15.
warnings.py File 13825 bytes April 17 2024 16:58:15.
wave.py File 18579 bytes April 17 2024 16:58:15.
weakref.py File 11495 bytes April 17 2024 16:58:19.
webbrowser.py File 22913 bytes April 17 2024 16:58:20.
xdrlib.py File 5381 bytes April 17 2024 16:58:21.
zipfile.py File 66424 bytes April 17 2024 16:58:21.

Reading File: //opt/alt/python33/lib64/python3.3/csv.py

"""
csv.py - read/write/investigate CSV files
"""

import re
from _csv import Error, __version__, writer, reader, register_dialect, \
                 unregister_dialect, get_dialect, list_dialects, \
                 field_size_limit, \
                 QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
                 __doc__
from _csv import Dialect as _Dialect

from io import StringIO

__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
            "Error", "Dialect", "__doc__", "excel", "excel_tab",
            "field_size_limit", "reader", "writer",
            "register_dialect", "get_dialect", "list_dialects", "Sniffer",
            "unregister_dialect", "__version__", "DictReader", "DictWriter" ]

class Dialect:
    """Describe a CSV dialect.

    This must be subclassed (see csv.excel).  Valid attributes are:
    delimiter, quotechar, escapechar, doublequote, skipinitialspace,
    lineterminator, quoting.

    """
    _name = ""
    _valid = False
    # placeholders
    delimiter = None
    quotechar = None
    escapechar = None
    doublequote = None
    skipinitialspace = None
    lineterminator = None
    quoting = None

    def __init__(self):
        if self.__class__ != Dialect:
            self._valid = True
        self._validate()

    def _validate(self):
        try:
            _Dialect(self)
        except TypeError as e:
            # We do this for compatibility with py2.3
            raise Error(str(e))

class excel(Dialect):
    """Describe the usual properties of Excel-generated CSV files."""
    delimiter = ','
    quotechar = '"'
    doublequote = True
    skipinitialspace = False
    lineterminator = '\r\n'
    quoting = QUOTE_MINIMAL
register_dialect("excel", excel)

class excel_tab(excel):
    """Describe the usual properties of Excel-generated TAB-delimited files."""
    delimiter = '\t'
register_dialect("excel-tab", excel_tab)

class unix_dialect(Dialect):
    """Describe the usual properties of Unix-generated CSV files."""
    delimiter = ','
    quotechar = '"'
    doublequote = True
    skipinitialspace = False
    lineterminator = '\n'
    quoting = QUOTE_ALL
register_dialect("unix", unix_dialect)


class DictReader:
    def __init__(self, f, fieldnames=None, restkey=None, restval=None,
                 dialect="excel", *args, **kwds):
        self._fieldnames = fieldnames   # list of keys for the dict
        self.restkey = restkey          # key to catch long rows
        self.restval = restval          # default value for short rows
        self.reader = reader(f, dialect, *args, **kwds)
        self.dialect = dialect
        self.line_num = 0

    def __iter__(self):
        return self

    @property
    def fieldnames(self):
        if self._fieldnames is None:
            try:
                self._fieldnames = next(self.reader)
            except StopIteration:
                pass
        self.line_num = self.reader.line_num
        return self._fieldnames

    @fieldnames.setter
    def fieldnames(self, value):
        self._fieldnames = value

    def __next__(self):
        if self.line_num == 0:
            # Used only for its side effect.
            self.fieldnames
        row = next(self.reader)
        self.line_num = self.reader.line_num

        # unlike the basic reader, we prefer not to return blanks,
        # because we will typically wind up with a dict full of None
        # values
        while row == []:
            row = next(self.reader)
        d = dict(zip(self.fieldnames, row))
        lf = len(self.fieldnames)
        lr = len(row)
        if lf < lr:
            d[self.restkey] = row[lf:]
        elif lf > lr:
            for key in self.fieldnames[lr:]:
                d[key] = self.restval
        return d


class DictWriter:
    def __init__(self, f, fieldnames, restval="", extrasaction="raise",
                 dialect="excel", *args, **kwds):
        self.fieldnames = fieldnames    # list of keys for the dict
        self.restval = restval          # for writing short dicts
        if extrasaction.lower() not in ("raise", "ignore"):
            raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
                             % extrasaction)
        self.extrasaction = extrasaction
        self.writer = writer(f, dialect, *args, **kwds)

    def writeheader(self):
        header = dict(zip(self.fieldnames, self.fieldnames))
        self.writerow(header)

    def _dict_to_list(self, rowdict):
        if self.extrasaction == "raise":
            wrong_fields = [k for k in rowdict if k not in self.fieldnames]
            if wrong_fields:
                raise ValueError("dict contains fields not in fieldnames: "
                                 + ", ".join([repr(x) for x in wrong_fields]))
        return [rowdict.get(key, self.restval) for key in self.fieldnames]

    def writerow(self, rowdict):
        return self.writer.writerow(self._dict_to_list(rowdict))

    def writerows(self, rowdicts):
        rows = []
        for rowdict in rowdicts:
            rows.append(self._dict_to_list(rowdict))
        return self.writer.writerows(rows)

# Guard Sniffer's type checking against builds that exclude complex()
try:
    complex
except NameError:
    complex = float

class Sniffer:
    '''
    "Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
    Returns a Dialect object.
    '''
    def __init__(self):
        # in case there is more than one possible delimiter
        self.preferred = [',', '\t', ';', ' ', ':']


    def sniff(self, sample, delimiters=None):
        """
        Returns a dialect (or None) corresponding to the sample
        """

        quotechar, doublequote, delimiter, skipinitialspace = \
                   self._guess_quote_and_delimiter(sample, delimiters)
        if not delimiter:
            delimiter, skipinitialspace = self._guess_delimiter(sample,
                                                                delimiters)

        if not delimiter:
            raise Error("Could not determine delimiter")

        class dialect(Dialect):
            _name = "sniffed"
            lineterminator = '\r\n'
            quoting = QUOTE_MINIMAL
            # escapechar = ''

        dialect.doublequote = doublequote
        dialect.delimiter = delimiter
        # _csv.reader won't accept a quotechar of ''
        dialect.quotechar = quotechar or '"'
        dialect.skipinitialspace = skipinitialspace

        return dialect


    def _guess_quote_and_delimiter(self, data, delimiters):
        """
        Looks for text enclosed between two identical quotes
        (the probable quotechar) which are preceded and followed
        by the same character (the probable delimiter).
        For example:
                         ,'some text',
        The quote with the most wins, same with the delimiter.
        If there is no quotechar the delimiter can't be determined
        this way.
        """

        matches = []
        for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
                      '(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)',   #  ".*?",
                      '(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)',  # ,".*?"
                      '(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'):                            #  ".*?" (no delim, no space)
            regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
            matches = regexp.findall(data)
            if matches:
                break

        if not matches:
            # (quotechar, doublequote, delimiter, skipinitialspace)
            return ('', False, None, 0)
        quotes = {}
        delims = {}
        spaces = 0
        for m in matches:
            n = regexp.groupindex['quote'] - 1
            key = m[n]
            if key:
                quotes[key] = quotes.get(key, 0) + 1
            try:
                n = regexp.groupindex['delim'] - 1
                key = m[n]
            except KeyError:
                continue
            if key and (delimiters is None or key in delimiters):
                delims[key] = delims.get(key, 0) + 1
            try:
                n = regexp.groupindex['space'] - 1
            except KeyError:
                continue
            if m[n]:
                spaces += 1

        quotechar = max(quotes, key=quotes.get)

        if delims:
            delim = max(delims, key=delims.get)
            skipinitialspace = delims[delim] == spaces
            if delim == '\n': # most likely a file with a single column
                delim = ''
        else:
            # there is *no* delimiter, it's a single column of quoted data
            delim = ''
            skipinitialspace = 0

        # if we see an extra quote between delimiters, we've got a
        # double quoted format
        dq_regexp = re.compile(
                               r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
                               {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)



        if dq_regexp.search(data):
            doublequote = True
        else:
            doublequote = False

        return (quotechar, doublequote, delim, skipinitialspace)


    def _guess_delimiter(self, data, delimiters):
        """
        The delimiter /should/ occur the same number of times on
        each row. However, due to malformed data, it may not. We don't want
        an all or nothing approach, so we allow for small variations in this
        number.
          1) build a table of the frequency of each character on every line.
          2) build a table of frequencies of this frequency (meta-frequency?),
             e.g.  'x occurred 5 times in 10 rows, 6 times in 1000 rows,
             7 times in 2 rows'
          3) use the mode of the meta-frequency to determine the /expected/
             frequency for that character
          4) find out how often the character actually meets that goal
          5) the character that best meets its goal is the delimiter
        For performance reasons, the data is evaluated in chunks, so it can
        try and evaluate the smallest portion of the data possible, evaluating
        additional chunks as necessary.
        """

        data = list(filter(None, data.split('\n')))

        ascii = [chr(c) for c in range(127)] # 7-bit ASCII

        # build frequency tables
        chunkLength = min(10, len(data))
        iteration = 0
        charFrequency = {}
        modes = {}
        delims = {}
        start, end = 0, min(chunkLength, len(data))
        while start < len(data):
            iteration += 1
            for line in data[start:end]:
                for char in ascii:
                    metaFrequency = charFrequency.get(char, {})
                    # must count even if frequency is 0
                    freq = line.count(char)
                    # value is the mode
                    metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
                    charFrequency[char] = metaFrequency

            for char in charFrequency.keys():
                items = list(charFrequency[char].items())
                if len(items) == 1 and items[0][0] == 0:
                    continue
                # get the mode of the frequencies
                if len(items) > 1:
                    modes[char] = max(items, key=lambda x: x[1])
                    # adjust the mode - subtract the sum of all
                    # other frequencies
                    items.remove(modes[char])
                    modes[char] = (modes[char][0], modes[char][1]
                                   - sum(item[1] for item in items))
                else:
                    modes[char] = items[0]

            # build a list of possible delimiters
            modeList = modes.items()
            total = float(chunkLength * iteration)
            # (rows of consistent data) / (number of rows) = 100%
            consistency = 1.0
            # minimum consistency threshold
            threshold = 0.9
            while len(delims) == 0 and consistency >= threshold:
                for k, v in modeList:
                    if v[0] > 0 and v[1] > 0:
                        if ((v[1]/total) >= consistency and
                            (delimiters is None or k in delimiters)):
                            delims[k] = v
                consistency -= 0.01

            if len(delims) == 1:
                delim = list(delims.keys())[0]
                skipinitialspace = (data[0].count(delim) ==
                                    data[0].count("%c " % delim))
                return (delim, skipinitialspace)

            # analyze another chunkLength lines
            start = end
            end += chunkLength

        if not delims:
            return ('', 0)

        # if there's more than one, fall back to a 'preferred' list
        if len(delims) > 1:
            for d in self.preferred:
                if d in delims.keys():
                    skipinitialspace = (data[0].count(d) ==
                                        data[0].count("%c " % d))
                    return (d, skipinitialspace)

        # nothing else indicates a preference, pick the character that
        # dominates(?)
        items = [(v,k) for (k,v) in delims.items()]
        items.sort()
        delim = items[-1][1]

        skipinitialspace = (data[0].count(delim) ==
                            data[0].count("%c " % delim))
        return (delim, skipinitialspace)


    def has_header(self, sample):
        # Creates a dictionary of types of data in each column. If any
        # column is of a single type (say, integers), *except* for the first
        # row, then the first row is presumed to be labels. If the type
        # can't be determined, it is assumed to be a string in which case
        # the length of the string is the determining factor: if all of the
        # rows except for the first are the same length, it's a header.
        # Finally, a 'vote' is taken at the end for each column, adding or
        # subtracting from the likelihood of the first row being a header.

        rdr = reader(StringIO(sample), self.sniff(sample))

        header = next(rdr) # assume first row is header

        columns = len(header)
        columnTypes = {}
        for i in range(columns): columnTypes[i] = None

        checked = 0
        for row in rdr:
            # arbitrary number of rows to check, to keep it sane
            if checked > 20:
                break
            checked += 1

            if len(row) != columns:
                continue # skip rows that have irregular number of columns

            for col in list(columnTypes.keys()):

                for thisType in [int, float, complex]:
                    try:
                        thisType(row[col])
                        break
                    except (ValueError, OverflowError):
                        pass
                else:
                    # fallback to length of string
                    thisType = len(row[col])

                if thisType != columnTypes[col]:
                    if columnTypes[col] is None: # add new column type
                        columnTypes[col] = thisType
                    else:
                        # type is inconsistent, remove column from
                        # consideration
                        del columnTypes[col]

        # finally, compare results against first row and "vote"
        # on whether it's a header
        hasHeader = 0
        for col, colType in columnTypes.items():
            if type(colType) == type(0): # it's a length
                if len(header[col]) != colType:
                    hasHeader += 1
                else:
                    hasHeader -= 1
            else: # attempt typecast
                try:
                    colType(header[col])
                except (ValueError, TypeError):
                    hasHeader += 1
                else:
                    hasHeader -= 1

        return hasHeader > 0

SILENT KILLER Tool