import os
import yaml
import subprocess
import logging
import re
import time
import sys
import subprocess
import multiprocessing
import argparse
import json
import traceback
import GPUtil
from pathlib import Path
from datetime import datetime
from collections import Counter

COMPILE_FILE_PRE = 'nvidia_compile_bench_result'
RUN_FILE_PRE = 'nvidia_run_bench_result'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))


class Benchmark:
    def __init__(self, args, config):
        self.platform = args.platform
        if args.platform == "metax":
            COMPILE_FILE_PRE = 'metax_compile_bench_result'
            RUN_FILE_PRE = 'metax_run_bench_result'
        self.MAKE_ARGS = []
        if args.extra_compile_flags:
            flags = args.extra_compile_flags.replace(',', ' ')
            self.MAKE_ARGS.append('EXTRA_CFLAGS={}'.format(flags))

        self.name = config['name']
        caes_dir = config['build']['directory']
        if args.bench_dir:
            self.path = os.path.realpath(
                os.path.join(args.bench_dir, caes_dir))
        else:
            self.path = os.path.join(SCRIPT_DIR, '..', caes_dir)

        # self.path = config['build']['directory']
        self.make_cmd = config['build']['command'].split(' ')
        self.platform = args.platform
        self.log_dir = args.log_dir
        if args.platform == "metax":
            self.make_cmd.append('CC=cucc')
        # print(self.make_cmd)
        self.binary = config['execution']['target_name']
        self.metric_unit = config['expected_output']['unit']
        self.args = config['execution']['parameters']
        self.res_regex = config['expected_output']['reg_pattern']
        # TODO parse other parameters
        self.clean = args.clean
        self.verbose = args.verbose
        self.timeout = args.timeout
        self.make_cmds()
        
    def make_cmds(self):
        self.cmd = []
        # if len(self)
        if isinstance(self.args, str):
            if len(self.args) == 0:
                self.cmd = ["./" + self.binary]
            else:
                self.cmd = ["./" + self.binary] + self.args.split(' ')
        elif isinstance(self.args, list):
            self.cmd = ["./" + self.binary] + [str(x) for x in self.args]

    @staticmethod
    def get_now_str():
        now = datetime.now()
        return now.strftime("%Y-%m-%d %H:%M:%S")

    def compile(self):
        start_time = time.time()
        if self.clean:
            subprocess.run(["make", "clean"], cwd=self.path).check_returncode()
            # required to make sure clean is done before building, despite run waiting on the invoked executable
            time.sleep(1)
        out = subprocess.DEVNULL
        out = subprocess.PIPE

        try:
            proc = subprocess.run(self.make_cmd + self.MAKE_ARGS, cwd=self.path,
                                  stdout=out, stderr=subprocess.STDOUT, encoding="utf-8")
            # proc.check_returncode()
        except subprocess.CalledProcessError as e:
            print(f'Failed: compilation in {self.name}.\n{e}')
            traceback.print_exc()
            if e.stderr:
                print(e.stderr, file=sys.stderr)
            raise(e)

        if self.verbose:
            print(proc.stdout)
        if proc.returncode != 0:
            print(f'Failed: compilation in {self.name}.\n')
            traceback.print_exc()
            raise RuntimeError("Compile failed.")

        duration = time.time() - start_time
        result_json = {
            'date_time': self.get_now_str(),
            'module': self.name,
            'duration': duration * 1000,
            'exit_code': proc.returncode,
            'output': str(proc.stdout) + str(proc.stderr),
            'error': proc.stderr
        }
        # save compiler output
        with open(f'{self.log_dir}/{COMPILE_FILE_PRE}_{self.platform}_{self.name}.json', "w", encoding="utf-8") as f:
            json.dump(result_json, f, ensure_ascii=False, indent=4)
        return result_json

    # default warm up kernel
    def warmup(self):
        if not self.warmup:
            print("Not need warm up when using benchmark script\n")
            return 
        # print("Need warm up when using benchmark script\n")
        try:
            proc = subprocess.run(
                self.cmd, cwd=self.path, stdout=subprocess.PIPE, encoding="utf-8", timeout=self.timeout)
        except Exception as err:
            print("Error WarmUp running: ", self.name)
            traceback.print_exc()
        return

    def run(self):
        cmd_str = ' '.join(self.cmd)
        start_time = time.time()
        run_pass = True
        try:
            proc = subprocess.run(
                self.cmd, cwd=self.path, stdout=subprocess.PIPE, encoding="utf-8", timeout=self.timeout)
            end_time = time.time()
            duration = (end_time - start_time) * 1000
            exit_code = int(proc.returncode)
            out = proc.stdout
            if self.verbose:
                print(cmd_str)
                print(out)
            if 'FAIL' in out:
                run_pass = False
            res = re.findall(self.res_regex, out)
            std_err = proc.stderr
            
            if not res:
                std_err = f'{str(std_err)} {self.name} ":\nno regex match for: {self.res_regex}'
                print(std_err)
                exit_code += 1
            # in case of multiple outputs sum them
            res = sum([float(i) for i in res])
            result = {
                'date_time': self.get_now_str(),
                'module': self.name,
                'cmd': cmd_str,
                'duration': duration,
                'exit_code': exit_code,
                'output': str(proc.stdout) + str(std_err),
                'error': std_err,
                'metric_unit': self.metric_unit,
                'metric_value': res
            }
        except Exception as err:
            print("Error running: ", self.name)
            error_message = "".join(traceback.format_exc())
            result = {
                'date_time': self.get_now_str(),
                'module': self.name,
                'cmd': cmd_str,
                'duration': 0,
                'exit_code': 1,
                'output': error_message,
                'error': error_message,
                'metric_unit': self.metric_unit,
                'metric_value': res
            }
        # save run output
        with open(f'{self.log_dir}/{RUN_FILE_PRE}_{self.platform}_{self.name}.json', "w", encoding="utf-8") as f:
            json.dump(result, f, ensure_ascii=False, indent=4)
        return result, run_pass


def compile_bot(b):
    print("compiling: {}".format(b.name))
    return b.compile()


def gen_json_files(log_dir, prefix, result):
    output_file = f"{log_dir}/{prefix}.json"
    if os.path.exists(output_file):
        os.remove(output_file)
    with open(output_file, 'w') as outfile:
        json.dump(result, outfile, indent=4)
    print(f'ok.\nplease check result file: {output_file}')


def get_config_path():
    # Get the path to the YAML configuration file (assumed to be in the same directory as this script)
    config_name = 'bench_cases.yaml'
    return os.path.join(SCRIPT_DIR, config_name)


def load_config():
    # Load the YAML configuration file
    with open(get_config_path(), 'r') as file:
        return yaml.safe_load(file)


def get_workspace_path():
    # Get the workspace path (parent directory of this script's directory)
    return os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))

def has_duplicated(data):
    name_counts = Counter(item['name'] for item in data)
    return [name for name, count in name_counts.items() if count > 1]

def get_args():
    parser = argparse.ArgumentParser(description='Bench runner')
    parser.add_argument('--platform', '-p', default='nvidia',
                        help='select the operating platform')
    parser.add_argument('--output', '-o', default='bench_result.csv',
                        help='Output file for csv results')
    parser.add_argument('--extra-compile-flags', '-e', default='',
                        help='Additional compilation flags (inserted before the predefined CFLAGS)')
    parser.add_argument('--clean', '-c', action='store_true',
                        help='Clean the builds')
    parser.add_argument('--verbose', '-v', action='store_true',
                        help='Clean the builds')
    parser.add_argument('--bench-dir', '-b',
                        help='Benchmark directory')
    parser.add_argument('--log-dir', '-log', default=SCRIPT_DIR,
                        help='Benchmark directory')
    parser.add_argument('--bench-data', '-d',
                        help='Benchmark data')
    parser.add_argument('--is-test', '-t', action="store_true", default=False,
                        help='only test')
    parser.add_argument('--timeout', type=int, default=300,
                        help='Run single case with timetout')
    parser.add_argument('--report-dir', '-rep', default=SCRIPT_DIR,
                        help='Benchmark json directory')

    return parser.parse_args()


def main():
    # Get paths
    config_path = get_config_path()
    workspace_path = get_workspace_path()
    args = get_args()

    log_dir = args.log_dir
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    report_dir = args.report_dir
    if not os.path.exists(report_dir):
        os.mkdir(report_dir)

    # Build benchmark list
    benches = []
    config_data = load_config()
    for bench in config_data:
        benches.append(Benchmark(args, bench))
    
    # must keep name as unique
    names = has_duplicated(config_data)
    if len(names) != 0:
        raise ValueError(f'duplicated name found.\n {names}\n file: bench_cases.yaml')
    # compile the bench cases
    outputs = []

    # get gpu info
    gpu_name = ""
    if args.platform == "metax":
        global COMPILE_FILE_PRE
        global RUN_FILE_PRE
        COMPILE_FILE_PRE = 'metax_compile_bench_result'
        RUN_FILE_PRE = 'metax_run_bench_result'
        try:
            result = subprocess.run(['mx-smi','--show-memory'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
            if result.returncode != 0:
                print("Error exectuing mx-smi:", result.stderr)
            output_str = result.stdout
            
            # Extract the number of GPUs
            gpu_count = len(re.findall(r"GPU#\d+", output_str))
            gpu_names = re.findall(r"GPU#\d+  ([A-Za-z0-9]+)", output_str)
            vram_totals = re.findall(r"vis_vram total\s*:\s*(\d+)", output_str)
            if len(gpu_names) == 0:
                print("No metax gpu.")
            else:
                gpu_name = gpu_names[0]
        except Exception as err:
            print(err)
    else:
        # get gpu name
        gpus = GPUtil.getGPUs()
        for gpu in gpus:
            if gpu.id == 0:
                gpu_name = gpu.name
    
    # run
    compile_benches = []
    benches = sorted(benches, key=lambda x: x.binary)
    for bench in benches:
        if len(compile_benches) == 0 or compile_benches[-1].binary != bench.binary:
            compile_benches.append(bench)

    try:
        with multiprocessing.Pool(20) as p:
            outputs = p.map(compile_bot, compile_benches)
    except Exception as e:
        print("Compilation failed, exiting")
        traceback.print_exc()
        print(e)
        exit(-1)

    run_all_pass = True
    # run
    run_results = []
    for bench in benches:
        try:
            if args.verbose:
                print("running: {}".format(bench.name))

            bench.warmup()
            _result, run_pass = bench.run()
            if not run_pass:
                run_all_pass = False
            run_results.append(_result)

        except Exception as err:
            print("Error running: ", bench.name)
            traceback.print_exc()
            print(err)
    run_results = {
        'gpu_name': gpu_name,
        'result': run_results
        }
    gen_json_files(report_dir, RUN_FILE_PRE, run_results)
    if not run_all_pass:
        exit(-1)


if __name__ == "__main__":
    main()
