import logging
import psycopg2
from psycopg2.extras import Json
import subprocess
import re
import importlib

result_cursor = None
target_cursor = None

S = None


def set_conf(conf):
    global S
    S = importlib.import_module(conf)
    return S


def setup_logging(filename):
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)  # Set the logging level

    file_handler = logging.FileHandler(filename)
    file_formatter = logging.Formatter(
        '%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    file_handler.setFormatter(file_formatter)

    # Create a stream handler (for stdout)
    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(file_formatter)

    # Add the handlers to the logger
    logger.addHandler(file_handler)
    logger.addHandler(stream_handler)


def log_msg(message):
    logging.info(message)
    for handler in logging.getLogger().handlers:
        handler.flush()


# run os command.

def _run_os_cmd(cmd, quite=False):
    log_msg(cmd)
    result = subprocess.run(cmd, capture_output=True, text=True, shell=True)
    # Print the command's output and error if there's any
    if result.returncode == 0:
        if not quite:
            log_msg("Output:")
            log_msg(result.stdout)  # Standard output
        return result.returncode, result.stdout
    else:
        if not quite:
            log_msg("Error:")
            log_msg(result.stderr)   # Standard error
        return result.returncode, result.stderr


def change_guc_on_target(guc):
    if guc is None:
        return
    t_cursor = get_target_cursor()
    sql = f'alter system set {guc};'
    log_msg(sql)
    t_cursor.execute(sql)
    t_cursor.execute('select pg_reload_conf()')


def get_database_size(dbname):
    t_cursor = get_target_cursor()
    t_cursor.execute(f"select pg_size_pretty(pg_database_size('{dbname}'))")
    res = t_cursor.fetchall()
    return res[0][0]

# parsing some common output.


def run_pgbench_cmd(cmd):
    (ret, output) = _run_os_cmd(cmd)
    if ret != 0:
        logging.warning(output)
        return {
            'tps': -1,
            'err': output,
            'latency': -1
        }
    return parse_pgbench_output(output)


def get_select1_tps(degree):
    with open('select1.sql', 'w') as w:
        w.write("select 1;")
    # same as sysbench
    cmd = 'pgbench -n -f select1.sql -h %s -p %s -d %s -U%s -c %s -T30' % (
        S.PGHOST, S.PGPORT, S.PGDATABASE, S.PGUSER, degree)
    _, output = _run_os_cmd(cmd)
    res = {'degree': degree}
    res.update(parse_pgbench_output(output))
    return res


def parse_pgbench_output(output):
    latency_pattern = r"latency average = ([\d.]+) ms"
    tps_pattern = r"tps = ([\d.]+)"

    latency_match = re.search(latency_pattern, output)
    latency = latency_match.group(1) if latency_match else None

    tps_match = re.search(tps_pattern, output)
    tps = tps_match.group(1) if tps_match else None
    return {'tps': float(tps),
            'latency': float(latency)}


def get_ping_latency():
    cmd = "ping -c 10 %s" % S.PGHOST
    _, output = _run_os_cmd(cmd)
    pattern = r"rtt min/avg/max/mdev = (\d+\.\d+)/(\d+\.\d+)/(\d+\.\d+)/\d+\.\d+ ms"
    match = re.search(pattern, output)
    return {'min': float(match.group(1)),
            'avg': float(match.group(2)),
            'max': float(match.group(3))}


def get_result_cursor():
    """cursor for result database"""
    global result_cursor
    if result_cursor is None:
        meta_db = psycopg2.connect(dbname=S.RESULT_PGDATABASE,
                                   user=S.RESULT_PGUSER,
                                   password=S.RESULT_PGPASSWORD,
                                   host=S.RESULT_PGHOST,
                                   port=S.RESULT_PGPORT)
        meta_db.autocommit = True
        result_cursor = meta_db.cursor()
    return result_cursor


def get_target_cursor():
    global target_cursor
    if not target_cursor:
        conn = psycopg2.connect(dbname=S.PGDATABASE,
                                user=S.PGUSER,
                                password=S.PGPASSWORD,
                                host=S.PGHOST,
                                port=S.PGPORT)
        conn.autocommit = True
        target_cursor = conn.cursor()
    return target_cursor


def pg_save_autotest(project, start_time, end_time, data):
    c = get_result_cursor()
    c.execute(f"INSERT INTO autotest(project, start_time, end_time, data) values(%s, %s, %s, %s)",
              (project, start_time, end_time, Json(data), ))


def get_extra_osparam():
    # only works with superuser
    cursor = get_target_cursor()
    cursor.execute("select pg_read_file('/proc/sys/kernel/osrelease'), pg_read_file('/proc/meminfo'), version(), pg_read_file('/proc/cpuinfo'), pg_read_file('/etc/fstab')")
    (os, meminfo_str, pgversion, cpuinfo_str, fstab) = cursor.fetchone()

    d = {'osversion': os.strip(),
         'dbversion': pgversion.strip(),
         'meminfo': {},
         'cpuinfo': {'raw': cpuinfo_str,
                     'count': 0}
         }
    meminfo = d['meminfo']
    for line in meminfo_str.strip().split('\n'):
        (key, value) = line.split(':')
        meminfo[key] = int(value.strip().split()[0])
    cpuinfo = d['cpuinfo']

    for line in cpuinfo_str.split('\n'):
        if line.startswith('processor\t'):
            cpuinfo['count'] += 1

    for line in fstab.split('\n'):
        if line.startswith('/dev/vdb'):
            d['xfs'] = line.split()[-3]
    return d


def get_readahead():
    cursor = get_target_cursor()
    cursor.execute(
        "select pg_read_file('/sys/block/vdb/queue/read_ahead_kb')::int")
    return cursor.fetchone()[0]


def get_gucs():
    cursor = get_target_cursor()
    cursor.execute(
        "select name, setting || coalesce(unit,'') from pg_settings")
    res = {}
    for name, val in cursor.fetchall():
        res[name] = val
    return res


def get_bgworkers():
    cursor = get_target_cursor()
    cursor.execute(
        "select backend_type, count(*) from pg_stat_activity group by backend_type")
    res = {}
    for _typ, cnt in cursor.fetchall():
        res[_typ] = cnt
    return res


def get_bgworkers_task(start_time):

    res = {'checkpoint': {}, 'autovacuum': {'relname': []}}

    cursor = get_target_cursor()
    cursor.execute(
        "select checkpoint_lsn, checkpoint_time " +
        "from pg_control_checkpoint() " +
        "where checkpoint_time > %s",
        (start_time, ))
    for lsn, time in cursor.fetchall():
        res['checkpoint'] = {'lsn': lsn, 'time': str(time)}

    cursor.execute(
        "select relname from pg_stat_user_tables where last_autovacuum>%s",
        (start_time,))

    for relname in cursor.fetchall():
        res['autovacuum']['relname'].append(relname)
    res['autovacuum']['count'] = len(res['autovacuum']['relname'])

    return res


def print_psql_cmd(target):
    if target:
        h = S.PGHOST
        port = S.PGPORT
        d = S.PGDATABASE
        u = S.PGUSER
        p = S.PGPASSWORD
        print('Target conn str: ')
    else:
        h = S.RESULT_PGHOST
        port = S.RESULT_PGPORT
        d = S.RESULT_PGDATABASE
        u = S.RESULT_PGUSER
        p = S.RESULT_PGPASSWORD
        print('Result conn str: ')
    print(
        f"PGPASSWORD='{p}' psql -h{h} -p{port} -U{u} {d}")
