import copy
import logging
import subprocess
import os
import time
import argparse
import uuid
import shutil

from config import *

logging.basicConfig(filename='run_case.log',
                    filemode='a',
                    format='%(asctime)s %(name)s - %(levelname)s - %(message)s',
                    level=logging.DEBUG
                    )
parser = argparse.ArgumentParser(description='run or generate fio testcases')
parser.add_argument('--file-only',
                    action='count',
                    default=0,
                    help='if this argument is present. only generate testcase files'
                    )
parser.add_argument('--case-dir',
                    default="",
                    help='specific testcases. e.g: --case-dir=/opt/testcases'
                    )
args = parser.parse_args()


def print_msg():
    msg = """
Starting job.....
io types are: {}
work dir are: {}
output dir: {}
iodepeths: {}
block sizes: {}
file sizes: {}
number jobs: {}
nr files: {}
run time: {}
    """.format(
        IO_TYPES,
        DIR_WORK,
        OUT_PUT_DIR,
        IO_DEPTHS,
        BLOCK_SIZES,
        FILE_SIZES,
        NUM_JOBS,
        NR_FILES,
        RUN_TIME
    )
    print(msg)


def init_dirs():
    """
    初始化目录。没有则创建
    :return:
    """
    dirs = [
        OUT_PUT_DIR,
        TESTCASES_DIR
    ]
    for d in dirs:
        if not os.path.isdir(d):
            os.makedirs(d)


def add_task_sections(dirs: list):
    """
    向模板添加任务
    :return:
    :param dirs  ["path1", "path2", ...]
    """
    sub_dirs = set()
    sections = []
    sub_dir_num = SUB_DIR_NUM
    if SUB_DIR_NUM > 0:
        task_count = len(dirs) * SUB_DIR_NUM
        if task_count >= TASK_LIMIT:
            sub_dir_num = TASK_LIMIT // len(dirs)
            if sub_dir_num < 1:
                sub_dir_num = 1
    # 子目录添加
    for d in dirs:
        for i in range(sub_dir_num):
            path = "{}/sub{}".format(d, i)
            if not os.path.isdir(path):
                os.makedirs(path)
            sub_dirs.add(path)

    # 是否并行执行
    if PARALLEL_IO == True:
        dirs = ":".join(list(sub_dirs))
        section_name = "[task_%s]" % uuid.uuid4().hex
        sections.append(section_name)
        sections.append("directory={}".format(dirs))
    else:
        dirs = list(sub_dirs)
        for d in dirs:
            section_name = "[task_%s]" % uuid.uuid4().hex
            sections.append(section_name)
            sections.append("directory={}".format(d))

    return "\n".join(sections)


def gen_file_name(io_type, block_size, file_size, io_depth):
    """
    生成fio文件名,格式：iotype_blocksize_filesize_iodepth
    :return:
    """
    io_type = io_type.replace(",", "-")
    return "%s_%s_%s_%s" % (io_type, block_size, file_size, io_depth)


def run_cmd(cmd_list):
    for n in range(3):
        f = open("stdout.log", "a+")
        f1 = open("stderr.log", "a+")
        job = subprocess.Popen(cmd_list, stderr=f1, stdout=f)
        job.wait()
        if job.returncode != 0:
            logging.exception(
                "run fail. msg: {} retrying {}......".format(cmd_list, n + 1))
        else:
            break
        f.close()
        f1.close()


def run_fio_cases(case_file):
    for f in case_file:
        cmd_list = [
            "fio",
            f,
            "--output={}/{}".format(OUT_PUT_DIR, f.split("/")[-1]),
        ]
        logging.info("run: {}".format(cmd_list))
        run_cmd(cmd_list)
    logging.info("all testcases done")


def purge_char(char: str):
    """
    去除空格、换行
    :return:
    """
    char = char.replace("\n", "")
    char = char.replace(" ", "")
    char = char.replace("\t", "")
    char = char.replace("\r", "")
    return char


def capacity2kb(size_string: str):
    """
    换算容量到kb
    :param size_string:
    :return:
    """
    size_string = size_string.upper()
    size_string = purge_char(size_string)
    capacity = int(float(size_string[:-1]))
    if size_string.endswith("K"):
        return capacity
    elif size_string.endswith("M"):
        return capacity * 1024
    elif size_string.endswith("G"):
        return capacity * (1024 ** 2)
    elif size_string.endswith("T"):
        return capacity * (1024 ** 3)
    elif size_string.endswith("P"):
        return capacity * (1024 ** 4)
    else:
        print("not supported capacity of unit: `%s`" % size_string[-1])
        exit(1)


def gen_fio_files():
    """
    生成要执行fio的文件
    :return:
    """
    fio_file_list = []
    with open(JOB_TEMPLATE_NAME) as fio_tmp:
        source_tmp_str = fio_tmp.read()
        for iotype in IO_TYPES:
            iotype = purge_char(iotype)
            for iodep in IO_DEPTHS:
                for bs in BLOCK_SIZES:
                    for fs in FILE_SIZES:
                        # file size 不能小于 block size
                        if capacity2kb(fs) < capacity2kb(bs):
                            continue
                        file_name = gen_file_name(iotype, bs, fs, iodep)
                        new_fio_tmp_str = copy.deepcopy(source_tmp_str)
                        values = {
                            "NRFILES": NR_FILES,
                            "NUMJOBS": NUM_JOBS,
                            "RUNTIME": RUN_TIME,
                            "FILESIZE": fs,
                            "IODEPTH": iodep,
                            # "DIRWORK": DIR_WORK,
                            "BS": bs,
                            "RW": iotype,
                            "RWMIXREAD": ("rwmixread={}".format(RWMIXREAD) if iotype == "rw,readwrite" else "\n")
                        }
                        new_fio_tmp_str = new_fio_tmp_str.format(**values)
                        section_str = add_task_sections(DIR_WORK)
                        new_fio_tmp_str = "{}\n{}".format(
                            new_fio_tmp_str, section_str)
                        case_file = "%s/%s" % (TESTCASES_DIR, file_name)
                        with open(case_file, "w") as f:
                            f.write(new_fio_tmp_str)
                            print("generate fio testcase file: %s/%s successful" %
                                  (TESTCASES_DIR, file_name))
                            fio_file_list.append(case_file)

    return fio_file_list


def clean_up_env():
    rm_dirs = [
        OUT_PUT_DIR,
        TESTCASES_DIR
    ]
    for d in rm_dirs:
        print("deleting dir <%s>" % d)
        shutil.rmtree(d)


def tuning_case_jobs(files):
    """
    将任务文件调整到最佳执行顺序.原始要执行的顺序如下:
    ['./testcases/read_1M_100M_16', './testcases/read_1M_300M_16', './testcases/write_1M_100M_16', './testcases/write_1M_300M_16']
    经过排序优化后,先写,再读提升效率
    ['./testcases/write_1M_100M_16', './testcases/read_1M_100M_16', './testcases/write_1M_300M_16', './testcases/read_1M_300M_16', ]
    """
    f_list = copy.deepcopy(files)
    # 结果
    r = []
    # 写类型的任务关键字
    types_of_write_keyword = [
        "write",
        "randwrite",
        "trimwrite"
    ]
    # 读类型的任务关键字
    types_of_read_keyword = [
        "rw-readwrite",
        "read",
        "randrw",
        "randread"
    ]
    for i in range(len(files)):
        f_name = files[i].split("/")[-1]
        io_type, io_block_size, file_size, _ = f_name.split("_")
        if io_type in types_of_write_keyword:
            if files[i] not in r:
                r.append(files[i])
            f_list.remove(files[i])
            # 尝试找到相同的读任务
            for f1 in files:
                for w in types_of_read_keyword:
                    keyword = "%s_%s_%s" % (w, io_block_size, file_size)
                    f1_name = f1.split("/")[-1]
                    if f1_name.count(keyword) > 0:
                        if f1 not in r:
                            r.append(f1)
                        try:
                            f_list.remove(f1)
                        except ValueError:
                            pass
    return r + f_list


def main():
    args = parser.parse_args()
    init_dirs()
    # 指定了目录
    if args.case_dir:
        files = set()
        case_dir_root = list(os.walk(args.case_dir))[0]
        if len(case_dir_root) == 0:
            print("no one or more testcase file in directory: '{}'".format(
                args.case_dir))
            exit(1)
        if len(case_dir_root[2]) == 0:
            print("no one or more testcase file in directory: '{}'".format(
                args.case_dir))
            exit(1)
        # 先写,再读提升执行效率
        print(case_dir_root[2])
        for i in case_dir_root[2]:
            d = os.path.join(case_dir_root[0], i)
            files.add(d)
    else:
        files = gen_fio_files()
    r = tuning_case_jobs(files)

    # 是否只生成文件
    only_files = args.file_only
    if not only_files:
        print_msg()
        time.sleep(3)
        run_fio_cases(r)


if __name__ == '__main__':
    clean_up_env()
    main()
