"""
Test the sysbench workload and save the result/logs into database.
Configuration is read from sb_setting.py and can be overwrited by
command line options.
"""

import argparse
import concurrent.futures
import datetime
from psycopg2.extras import Json
import re

import logging
import os
import sys
import time

import lib

# update during parsing the arg.
S = None


def init_meta(cursor):
    SQL = """
CREATE TABLE IF NOT EXISTS sb_result(
 vendor text,
 declaim_spec text,
 rspec text,
 instance_name text,
 table_count int,
 table_size int,
 degree  int,
 start_time  timestamp,
 end_time  timestamp,
 gucs jsonb,
 ping_ms jsonb,
 select1 jsonb,
 read_ahead int,
 tps  float8,
 qps  float8,
 full_sb_log text,
 lua text,
 id serial primary key,
 extra jsonb
)"""
    cursor.execute(SQL)


sb_basic = "%s %s --db-driver=pgsql --pgsql-db=%s --pgsql-user=%s --pgsql-password='%s' "
sb_basic += " --pgsql-port=%s  --pgsql-host=%s "
sb_basic += "--oltp-tables-count=%s --oltp-table-size=%s "


def init_sb():
    cmd = sb_basic % (S.SYSBENCH, S.LUA, S.PGDATABASE, S.PGUSER, S.PGPASSWORD,
                      S.PGPORT, S.PGHOST, S.SB_TABLECOUNT, S.SB_TABLESIZE)
    lib._run_os_cmd(cmd + ' prepare')


def run_sb(args):
    cmd = sb_basic % (S.SYSBENCH, S.LUA, S.PGDATABASE, S.PGUSER, S.PGPASSWORD,
                      S.PGPORT, S.PGHOST, S.SB_TABLECOUNT, S.SB_TABLESIZE)
    cmd = '%s --time=%s --threads=%s  --report-interval=5 run' % (
        cmd, args.sb_run_duration, S.SB_DEGREE)
    with open(f'sb_cmd_{S.INSTANCE_NAME}.sh', 'w') as w:
        w.write(cmd)
    _, output = lib._run_os_cmd(cmd, True)

    tps_pattern = r'tps:\s*([\d.]+)'
    tps_values = re.findall(tps_pattern, output)
    tps_values = [float(tps) for tps in tps_values]
    average_tps = sum(tps_values) / len(tps_values)

    qps_pattern = r'qps:\s*([\d.]+)'
    qps_values = re.findall(qps_pattern, output)
    qps_values = [float(qps) for qps in qps_values]
    average_qps = sum(qps_values) / len(qps_values)

    return (average_tps, average_qps)


def cluster_sb(vacuum):
    psql = 'psql -h%s -p%s -U%s %s' % (S.PGHOST,
                                       S.PGPORT, S.PGUSER, S.PGDATABASE)
    if vacuum:
        cmds = ['%s -c "vacuum verbose sbtest%d;"' % (psql, i)
                for i in range(1, S.SB_TABLECOUNT+1)]
    else:
        cmds = ['%s -c "cluster sbtest%d using sbtest%d_pkey;"' % (psql, i, i)
                for i in range(1, S.SB_TABLECOUNT+1)]
    with concurrent.futures.ProcessPoolExecutor(max_workers=16) as executor:
        _ = list(executor.map(lib._run_os_cmd, cmds))


def run_main(args, S):
    extra = lib.get_extra_osparam()
    gucs = lib.get_gucs()

    read_ahead = -1
    if S.VENDOR == 'teledb':
        read_ahead = lib.get_readahead()

    if args.quick:
        ping_ms = {}
        select1_tps = {}
    else:
        ping_ms = lib.get_ping_latency()
        select1_tps = lib.get_select1_tps(64)

    extra['bgworker'] = lib.get_bgworkers()

    if args.cluster:
        cluster_sb(False)
        extra['cluster'] = 1
    elif args.vacuum:
        cluster_sb(True)
        extra['vacuum'] = 1

    args.cluster = False
    args.vacuum = False

    cursor = lib.get_target_cursor()
    lib.log_msg("start to run checkpoint")
    cursor.execute('checkpoint')
    lib.log_msg("checkpoint completed")

    time.sleep(2)
    sb_start_time = datetime.datetime.now()
    tps, qps = run_sb(args)
    end_time = datetime.datetime.now()

    for handler in logging.getLogger().handlers:
        handler.flush()

    with open(S.LOGFILE) as r:
        text = r.read()

    time.sleep(2)
    extra['bgtasks'] = lib.get_bgworkers_task(sb_start_time)
    extra['dbsize'] = lib.get_database_size(S.PGDATABASE)

    result_cursor.execute(
        """
        insert into sb_result values(
        %s, %s, %s, %s, %s,
        %s, %s, %s, %s, %s,
        %s, %s, %s, %s, %s,
        %s, %s, nextval('sb_result_id_seq'), %s)""",
        (S.VENDOR, S.CLAIM_SPEC, S.ACTURAL_SPEC, S.INSTANCE_NAME,
         S.SB_TABLECOUNT, S.SB_TABLESIZE, S.SB_DEGREE,
         sb_start_time, end_time,
         Json(gucs), Json(ping_ms), Json(select1_tps), read_ahead,
         tps, qps, text, S.LUA, Json(extra))
    )

    lib.log_msg(
        "********************************************************************")
    if not args.regress:
        from pprint import pprint
        pprint(extra['bgtasks'])
        print(f"#archive_command: {gucs['archive_command']}")

        print(f"#read_ahead: {read_ahead}")
        print(f"#max_wal_size: {gucs['max_wal_size']}")
    lib.log_msg(f"#tps: {int(tps)}")
    lib.log_msg(
        "********************************************************************")
    time.sleep(60)


if __name__ == '__main__':
    "overwrite the sb_setting.py when necessary"
    parser = argparse.ArgumentParser(description='sysbench.py opts.')
    parser.add_argument('--conf', type=str,
                        default="sb_setting",
                        help="configure file")
    parser.add_argument("--vendor", type=str, choices=['teledb', 'ali', 'huwei'],
                        help="instance vendor.")
    parser.add_argument("--spec", type=str, help="the claimed spec.")
    parser.add_argument("--rspec", type=str, help="the real spec.")

    parser.add_argument('--cluster', action="store_true",
                        help="cluster table before sysbench")
    parser.add_argument('--vacuum', action="store_true",
                        help="vacuum table before sysbench")
    parser.add_argument('--quick', action="store_true",
                        help="quick more, without select1 and ping")
    parser.add_argument("--sb_init", action="store_true",
                        help="init sb test, 64 * 1000w")
    parser.add_argument("--regress", action="store_true",
                        help="run regression")
    parser.add_argument("--sb_run_degree", type=int,
                        help="run with given degree")
    parser.add_argument("--loops", type=int, default=1,
                        help="how many loops to run")
    parser.add_argument("--sb_run_duration",  type=int, default=180,
                        help="run with given duration, unit: seconds")

    args = parser.parse_args()

    # load the default configurations.
    S = lib.set_conf(args.conf)

    print(f"dbsize: {lib.get_database_size(S.PGDATABASE)}")
    lib.print_psql_cmd(True)
    lib.print_psql_cmd(False)

    if args.vendor:
        S.VENDOR = args.vendor
    if args.spec:
        S.CLAIM_SPEC = args.spec
    if args.rspec:
        S.ACTURAL_SPEC = args.rspec
    if args.sb_run_degree:
        S.SB_DEGREE = args.sb_run_degree

    if args.sb_init:
        # init the sbtest if required.
        init_sb()

    os.environ['PGPASSWORD'] = S.PGPASSWORD

    lib.setup_logging(S.LOGFILE)
    lib.log_msg("init result db")

    result_cursor = lib.get_result_cursor()
    init_meta(result_cursor)
    lib.log_msg("init result db done")

    start_time = datetime.datetime.now()
    if not args.sb_init:
        if args.cluster:
            # if the data set is built before, let's cluster them first.
            cluster_sb(False)
            args.cluter = False

        if args.vacuum:
            cluster_sb(True)
            args.vacuum = False

    if args.cluster or args.vacuum:
        sys.exit(0)

    if args.regress:
        while True:
            args.cluster = True
            S.SB_DEGREE = 1
            run_main(args, S)
            run_main(args, S)
            run_main(args, S)

            args.cluster = True
            S.SB_DEGREE = 4
            run_main(args, S)
            run_main(args, S)
            run_main(args, S)

            args.cluster = True
            S.SB_DEGREE = 8
            run_main(args, S)
            run_main(args, S)
            run_main(args, S)

            args.cluster = True
            S.SB_DEGREE = 64
            run_main(args, S)
            run_main(args, S)
            run_main(args, S)

            args.cluster = True
            S.SB_DEGREE = 128
            run_main(args, S)
            run_main(args, S)
            run_main(args, S)

        sys.exit(1)
    for i in range(1, args.loops + 1):
        for guc1 in S.GUCS['guc1']:
            lib.change_guc_on_target(guc1)
            for guc2 in S.GUCS['guc2']:
                lib.change_guc_on_target(guc2)
                run_main(args, S)
        if i % 10 == 0:
            args.cluster = True
        elif i % 5 == 0:
            args.vacuum = True
