SILENT KILLERPanel

Current Path: > > opt > cloudlinux > venv > lib64 > python3.11 > site-packages > lvestats > plugins > generic


Operation   : Linux premium131.web-hosting.com 4.18.0-553.44.1.lve.el8.x86_64 #1 SMP Thu Mar 13 14:29:12 UTC 2025 x86_64
Software     : Apache
Server IP    : 162.0.232.56 | Your IP: 216.73.216.111
Domains      : 1034 Domain(s)
Permission   : [ 0755 ]

Files and Folders in: //opt/cloudlinux/venv/lib64/python3.11/site-packages/lvestats/plugins/generic

NameTypeSizeLast ModifiedActions
__pycache__ Directory - -
burster Directory - -
__init__.py File 219 bytes May 30 2025 10:30:46.
aggregators.py File 5576 bytes May 30 2025 10:30:46.
analyzers.py File 9609 bytes May 30 2025 10:30:46.
cleaners.py File 4189 bytes May 30 2025 10:30:46.
cm_collector.py File 10833 bytes May 30 2025 10:30:46.
collectors.py File 3388 bytes May 30 2025 10:30:46.
dbgov_saver.py File 6898 bytes May 30 2025 10:30:46.
dbsaver.py File 5790 bytes May 30 2025 10:30:46.
dbsaver_x60.py File 10267 bytes May 30 2025 10:30:46.
lvestats_memory_usage.py File 1573 bytes May 30 2025 10:30:46.
persistors.py File 13830 bytes May 30 2025 10:30:46.
snapshot_saver.py File 17095 bytes May 30 2025 10:30:46.
statsnotifier.py File 51491 bytes May 30 2025 10:30:46.

Reading File: //opt/cloudlinux/venv/lib64/python3.11/site-packages/lvestats/plugins/generic/dbsaver.py

# coding=utf-8
#
# Copyright © Cloud Linux GmbH & Cloud Linux Software, Inc 2010-2019 All Rights Reserved
#
# Licensed under CLOUD LINUX LICENSE AGREEMENT
# http://cloudlinux.com/docs/LICENSE.TXT

import logging

from sqlalchemy import insert, select
from sqlalchemy.orm import sessionmaker

from lvestats.core.plugin import LveStatsPlugin
from lvestats.lib.commons.func import get_chunks, reboot_lock
from lvestats.orm import user as user_class
from lvestats.orm.history import history
from lvestats.orm.servers import servers


class DBSaver(LveStatsPlugin):
    def __init__(self):
        self.log = logging.getLogger('plugin.DBSaver')
        self.now = 0  # This changes in MainLoop
        self.config = None
        self.period = 60
        self.server_id = 'localhost'

    def set_config(self, config):
        self.period = int(config.get('db_timeout', self.period))
        self.server_id = config.get('server_id', self.server_id)

    def execute(self, lve_data):
        sql_insert_query = insert(history)

        sql_select_servers = select([servers]).where(servers.server_id == self.server_id)
        sql_insert_servers = insert(servers)
        sql_update_servers = servers.__table__.update().where(servers.server_id == self.server_id)

        with reboot_lock():
            conn = self.engine.connect()
            tx = conn.begin()
            try:
                row = conn.execute(sql_select_servers)
                if row.returns_rows:
                    res = row.fetchone()
                    if res is not None:
                        if res['lve_version'] != lve_data['LVE_VERSION']:
                            conn.execute(sql_update_servers.values({'lve_version': lve_data['LVE_VERSION']}))
                    else:
                        conn.execute(sql_insert_servers, server_id=self.server_id, lve_version=lve_data['LVE_VERSION'])
                else:
                    conn.execute(sql_insert_servers, server_id=self.server_id, lve_version=lve_data['LVE_VERSION'])

                sql_insert_list = []
                for lve_id, v in lve_data.get('lve_usage', {}).items():
                    sql_insert_list.append(
                        {
                            'id': lve_id,
                            'cpu': int(round(v.cpu_usage)),
                            'cpu_limit': v.lcpu,
                            'cpu_fault': v.cpu_fault,
                            'mep': v.mep,
                            'mep_limit': v.lep,
                            'io': int(round(v.io_usage)),
                            'io_limit': v.io,
                            'mem': int(round(v.mem_usage)),
                            'mem_limit': v.lmem,
                            'mem_fault': v.mem_fault,
                            'mep_fault': v.mep_fault,
                            'created': int(round(self.now)),
                            'server_id': self.server_id,
                            'lmemphy': v.lmemphy,
                            'memphy': int(round(v.memphy)),
                            'memphy_fault': v.memphy_fault,
                            'lnproc': v.lnproc,
                            'nproc': int(round(v.nproc)),
                            'nproc_fault': v.nproc_fault,
                            'io_fault': v.io_fault,
                            'iops_fault': v.iops_fault,
                            'liops': v.liops,
                            'iops': int(round(v.iops)),
                        }
                    )

                for chunk in get_chunks(sql_insert_list):
                    try:
                        conn.execute(sql_insert_query, chunk)
                    except OverflowError:
                        self._try_executing_small_chunks(conn, sql_insert_query, chunk)
                        raise
                tx.commit()
            except Exception:
                tx.rollback()
                raise
            finally:
                conn.close()

    def _try_executing_small_chunks(self, conn, query, insert_params, small_chunk_size=10):
        """Try executing smaller chunks so that it will be feasible to log the one causing error.
        In case when data contains value which is bigger than column type can handle, OveflowError is raised.

        :param sqlalchemy.engine.Connection conn: db engine
        :param sqlalchemy.sql.expression.Insert query: query to execute
        :param typing.List[typing.Dict] insert_params: list of params to insert
        :param int chunk_length: length of small chunks
        """
        for small_chunk in get_chunks(insert_params, small_chunk_size):
            try:
                conn.execute(query, small_chunk)
            except OverflowError:
                self.log.error('Overflow detected in %s', small_chunk)
                break


class DbUsernamesSaver(LveStatsPlugin):
    def __init__(self):
        self.period = 60 * 60  # once an hour
        self.engine = None
        self.enabled = True

    def set_config(self, config):
        self.enabled = config.get('collect_usernames', 'false').lower() == 'true'

    def set_db_engine(self, engine):
        self.engine = engine

    def execute(self, lve_data):
        if self.enabled:
            session = sessionmaker(bind=self.engine)()
            for user_info in lve_data['users']:
                user = user_class()
                user.uid, user.user_name, user.server_id = user_info
                try:
                    session.merge(user)
                    session.flush()
                # workaround for sqlalchemy could not deal with complex key on detecting duplicates when merge
                except Exception:
                    session.rollback()

            session.commit()
            session.close()

SILENT KILLER Tool