import math
import os
import shutil
import concurrent.futures
from utils.utils import Utils
from common.benchmark_var import MeasureUnit, ResultType, StatusCode, GlobalEnv
from common.logger import log
from collections import OrderedDict


class Instance:
    def __init__(self, instance_id, run_dir):
        self.id = instance_id
        self.run_dir = run_dir
        self.instance_dir = os.path.join(self.run_dir, "instance_{}".format(instance_id))
        self.start_time = 0
        self.end_time = 0
        self.elapsed_time = 0
        self.raw_score = 0
        self.sub_item = {}
        self.run_res = True
        self.validate_res = True

    def get_instance_result(self):
        return {
            "id": self.id,
            "start_time": self.start_time,
            "end_time": self.end_time,
            "score": self.raw_score,
            "sub_item": self.sub_item,
            "valid": self.validate_res
        }

    def init_instance_dir(self):
        if os.path.exists(self.instance_dir):
            shutil.rmtree(self.instance_dir)

        os.makedirs(self.instance_dir)


class BaseWorkload:
    def __init__(self, name, options, baseline):
        self.options = options
        self.name = name
        self.description = ''
        self.lang = ''
        self.debug = self.options.debug
        self.program = []
        self.iterations = self.options.iterations
        self.instances = OrderedDict()
        for round in range(self.iterations):
            self.instances["round{}".format(round + 1)] = []
        # work dir
        self.work_dir = ""
        self.src_dir = ""
        self.build_dir = ""
        self.data_dir = ""
        self.log_dir = ""
        self.run_dir = ""
        self.bin_dir = ""
        # result
        self.unit_of_measure = MeasureUnit.Default
        self.score = 0
        self.baseline = baseline
        self.index = 0  # index = score / baseline
        self.elapsed_time = 0  # the running time of workload
        self.sub_item = []
        self.sub_item_metric = {}
        self.runtime_as_score = False
        self.valid = False
        self.support_multi_instance = True
        self.support_multi_threads = False
        self.threads_per_instance = 1
        self.last_instance_thread_num = 1
        self.allow_bind_core = True
        self.instances_num = self.options.copies
        self.setup_dir()

    @property
    def support_multi_mode(self):
        if self.support_multi_instance:
            return True
        elif self.support_multi_threads and self.threads_per_instance > 1:
            return True
        else:
            return False

    @property
    def support_single_mode(self):
        if self.support_multi_threads and self.threads_per_instance > 1:
            return False
        else:
            return True

    def adjust_instance_num(self, instances_num):
        if not self.support_multi_instance:
            return 1

        if self.support_multi_threads:
            num = instances_num / self.threads_per_instance
            self.last_instance_thread_num = instances_num % self.threads_per_instance
            if self.last_instance_thread_num == 0:
                self.last_instance_thread_num = self.threads_per_instance
            return math.ceil(num)
        else:
            return instances_num

    def get_bind_cores(self, instance_id):
        bind_cores = None
        if self.options.taskset:
            if self.support_multi_threads:
                origin_instance_id = instance_id * self.threads_per_instance
                start_pos = origin_instance_id % len(self.options.taskset)
                end_pos = start_pos + self.threads_per_instance
                if end_pos > len(self.options.taskset):
                    end_pos = len(self.options.taskset)
                bind_cores = self.options.taskset[start_pos:end_pos]
            else:
                bind_cpuid = self.options.taskset[instance_id % len(self.options.taskset)]
                bind_cores = [bind_cpuid]

        origin_instance_id = instance_id
        if self.options.bind_copies and self.options.bind_cores:
            for instance_set in self.options.bind_copies:
                if self.support_multi_threads:
                    origin_instance_id = instance_id * self.threads_per_instance
                if origin_instance_id in instance_set:
                    index = self.options.bind_copies.index(instance_set)
                    bind_cores = self.options.bind_cores[index]
        return bind_cores

    def setup_dir(self):
        self.work_dir = os.path.join(GlobalEnv.GCBS_WORKLOAD_DIR, self.name.split("_")[0])
        self.src_dir = os.path.join(self.work_dir, 'src')
        self.build_dir = os.path.join(self.work_dir, 'build')
        self.data_dir = os.path.join(self.work_dir, 'data')
        self.log_dir = os.path.join(self.work_dir, 'log')
        self.bin_dir = os.path.join(self.work_dir, 'bin')
        self.run_dir = os.path.join(self.work_dir, 'run', self.name)
        dir_list = [self.work_dir, self.src_dir, self.build_dir, self.data_dir,
                    self.log_dir, self.run_dir, self.bin_dir]
        for directory in dir_list:
            if not os.path.exists(directory):
                os.makedirs(directory)

    def clean(self):
        """
        clean directories
        :return:
        """
        log.debug("clean...")
        log.debug(self.build_dir)
        log.debug(self.bin_dir)
        try:
            Utils.clean_directory(self.bin_dir)
            Utils.clean_directory(self.run_dir)
            Utils.clean_directory(self.build_dir)
        except BaseException as e:
            Utils.print_err_code(StatusCode.CLEAN_FAIL, e)

    def build(self):
        return

    def run(self):
        log.info("run workload : {}...".format(self.name))
        self.instances_num = self.adjust_instance_num(self.options.copies)
        self.init_instances()
        self.check_program()
        self.setup()
        for round in range(self.iterations):
            iter_round = "round{}".format(round + 1)
            log.info("Running {} {}".format(self.name, iter_round))
            iter_round_instances = self.instances.get(iter_round)
            if self.instances_num == 1:
                self.run_one_instance(iter_round_instances[0])
            else:
                self.run_multi_instance(self.instances_num, iter_round_instances)

            for instance in self.instances[iter_round]:
                if not instance.run_res:
                    Utils.print_err_code(StatusCode.RUN_FAIL,
                                         "run {} fail, check log in {}".format(self.name, self.run_dir))
            for instance in self.instances[iter_round]:
                if not instance.validate_res:
                    Utils.print_err_code(StatusCode.VALIDATE_FAIL,
                                         "run {} fail, check log in {}".format(self.name, self.run_dir))

        self.collect_results()
        self.tear_down()

    def init_instances(self):
        for round in range(self.iterations):
            iter_round = "round{}".format(round + 1)
            for id in range(self.instances_num):
                instance = Instance(id, self.run_dir)
                instance.init_instance_dir()
                self.instances.get(iter_round).append(instance)

    def check_program(self):
        return

    def run_multi_instance(self, instance_num, instances):
        with concurrent.futures.ProcessPoolExecutor(max_workers=instance_num) as pool:
            # setup
            setup_futures = tuple(self.run_phase(self.setup_instance_ex, pool, instances))
            for future in setup_futures:
                if not future.result():
                    Utils.print_err_code(StatusCode.SETUP_INSTANCE_FAIL,
                                         "check {}'s setup_instance".format(self.name))
                    break
            # run
            run_futures = tuple(self.run_phase(self.run_instance, pool, instances))
            for future in run_futures:
                future_res = future.result()
                # already initted
                instance = instances[run_futures.index(future)]
                instance.run_res, instance.start_time, instance.end_time, \
                    instance.elapsed_time, instance.raw_score = future_res
                if not instance.run_res:
                    Utils.print_err_code(StatusCode.RUN_FAIL,
                                         "check run log at {}".format(self.run_dir))
            # validate
            validate_futures = tuple(self.run_phase(self.validate_instance, pool, instances))
            for future in validate_futures:
                future_res = future.result()
                if not future.result():
                    Utils.print_err_code(StatusCode.VALIDATE_FAIL,
                                         "check validate log at {}".format(self.run_dir))
                    break
                instance = instances[validate_futures.index(future)]
                instance.validate_res = future_res
            # tear down
            tear_down_futures = tuple(self.run_phase(self.tear_down_instance, pool, instances))
            for future in tear_down_futures:
                if not future.result():
                    Utils.print_err_code(StatusCode.TEAR_DOWN_INSTANCE_FAIL,
                                         "check {}'s tear_down_instance".format(self.name))
                    break

    def run_one_instance(self, instance):
        with concurrent.futures.ProcessPoolExecutor(max_workers=1) as pool:
            # setup
            setup_future = self.run_phase(self.setup_instance_ex, pool, instance)
            if not setup_future.result():
                Utils.print_err_code(StatusCode.RUN_FAIL,
                                     "check run log at {}".format(self.run_dir))
            # run
            run_future = self.run_phase(self.run_instance, pool, instance)
            instance.run_res, instance.start_time, instance.end_time, \
                instance.elapsed_time, instance.raw_score = run_future.result()
            if not instance.run_res:
                Utils.print_err_code(StatusCode.VALIDATE_FAIL,
                                     "check validate log at {}".format(self.run_dir))
            # validate
            validate_future = self.run_phase(self.validate_instance, pool, instance)
            instance.validate_res = validate_future.result()
            if not instance.validate_res:
                Utils.print_err_code(StatusCode.VALIDATE_FAIL,
                                     "check validate log at {}".format(self.run_dir))
            # tear down
            tear_down_future = self.run_phase(self.tear_down_instance, pool, instance)
            if not tear_down_future.result():
                Utils.print_err_code(StatusCode.TEAR_DOWN_INSTANCE_FAIL,
                                     "check {}'s tear_down_instance".format(self.name))

    @staticmethod
    def get_sub_item_results(sub_item_results, sub_item):
        for name, score in sub_item.items():
            sub_item_results.setdefault(name, {})
            sub_item_results[name].setdefault("valid", True)
            sub_item_results[name]["score"] = sub_item_results[name].get("score", 0) + score

            if sub_item_results[name]["valid"] and score <= 0:
                sub_item_results[name]["valid"] = False
        return sub_item_results

    def calculate_score(self, valid, sum_score, baseline):
        if valid:
            avg_round_score = sum_score / len(self.instances)
            avg_instance_score = avg_round_score / self.instances_num
            score = round(avg_instance_score, 2)
            if self.runtime_as_score:
                # The less time, the better. when use runtime as score
                instances_index = baseline / score
                index = round(instances_index * self.instances_num, 2)
            else:
                instances_index = score / baseline
                index = round(instances_index * self.instances_num, 2)
        else:
            score = 0
            index = 1
        return score, index

    def integrate_sub_item_results(self, sub_item_results):
        for name, result in sub_item_results.items():
            final_result = {}
            metric = self.sub_item_metric.get(name)
            final_result["name"] = name
            final_result["desc"] = metric.get("desc", "")
            final_result["runtime_as_score"] = metric.get("runtime_as_score", False)
            self.runtime_as_score = final_result["runtime_as_score"]
            final_result["score"], final_result["index"] = \
                self.calculate_score(result["valid"], result["score"],
                                     self.baseline.get("{}.{}".format(self.name, name), 1))
            final_result["unit"] = metric.get("unit", "")
            final_result["valid"] = result["valid"]
            final_result["elapsed_time"] = self.elapsed_time
            self.sub_item.append(final_result)

    def collect_results(self):
        """
        get run results from instances
        :return:
        """
        sum_score = 0
        sub_item_results = {}
        find_failed_instance = False
        for round_id in self.instances:
            round_score = 0
            for instance in self.instances.get(round_id):
                if isinstance(instance.raw_score, dict):
                    instance.sub_item = instance.raw_score.get("sub_item")
                    sub_item_results = self.get_sub_item_results(sub_item_results, instance.sub_item)
                    instance.raw_score = instance.raw_score.get("score")

                if not instance.validate_res or instance.raw_score <= 0:
                    find_failed_instance = True

                round_score += instance.raw_score
                self.elapsed_time = max(self.elapsed_time, instance.elapsed_time)

            sum_score += round_score
        self.elapsed_time = round(self.elapsed_time, 3)

        if find_failed_instance:
            self.valid = False
        else:
            self.valid = True

        self.score, self.index = self.calculate_score(self.valid, sum_score, self.baseline[self.name])
        self.integrate_sub_item_results(sub_item_results)

        if self.valid:
            log.info("{} run finish, {} instances, avg score: {} {}, index: {}".
                     format(self.name, self.instances_num, self.score, self.unit_of_measure, self.index))
        else:
            log.error("Verification failed.")

    def run_phase(self, func, pool: concurrent.futures.ProcessPoolExecutor, instances):
        futures = []
        if isinstance(instances, Instance):
            instance_dir = instances.instance_dir
            future = pool.submit(func, instances)
            futures.append(future)
        else:
            for instance in instances:
                instance_dir = instance.instance_dir
                future = pool.submit(func, instance)
                futures.append(future)

        # all instance complete
        concurrent.futures.wait(futures, return_when=concurrent.futures.ALL_COMPLETED)
        if self.instances_num == 1:
            return futures[0]
        else:
            return futures

    def setup_instance_ex(self, instance):
        instance_id = instance.id
        if self.allow_bind_core:
            # bind cores
            bind_cores = self.get_bind_cores(instance_id)
            if bind_cores:
                log.debug("setup_instance {}, bind cores : {}".format(instance_id, bind_cores))
                os.sched_setaffinity(0, bind_cores)
        return self.setup_instance(instance)

    def run_instance(self, instance):
        """
        For each instance, you can customize the execution logic,
        construct the Exector list for the commands to run,
        and the framework will automatically run the provided executors.
        subclasses can override this method
        """
        instance_id = instance.id
        if self.allow_bind_core:
            # bind cores
            bind_cores = self.get_bind_cores(instance_id)
            if bind_cores:
                log.debug("run_instance {}, bind cores : {}".format(instance_id, bind_cores))
                os.sched_setaffinity(0, bind_cores)

        executor_start_time = []
        executor_end_time = []
        elapsed_time = 0
        run_res = True
        executors = self.get_instance_cmd(instance)
        for executor in executors:
            return_code = executor.exec()
            # calc instance’s elapsed_time
            elapsed_time += (executor.end_time - executor.start_time)
            # calc instance’s start time and end time
            executor_start_time.append(executor.start_time)
            executor_end_time.append(executor.end_time)
            if return_code != 0:
                run_res = False
                break

        start_time = min(executor_start_time)
        end_time = max(executor_end_time)
        raw_score = self.get_raw_score(instance)
        if self.runtime_as_score:
            raw_score = elapsed_time
        return [run_res, start_time, end_time, elapsed_time, raw_score]

    def get_instance_cmd(self, instance: Instance):
        """
        return the actual run benchmark command list
        """
        return []

    def validate_instance(self, instance: Instance):
        """
        For each instance, add a validate phase after running,
        and subclasses can override this method
        """
        return True

    def get_raw_score(self, instance: Instance):
        """
        For each instance,get raw score from sub workload class, and
        subClass can override this method
        """
        return 0

    def setup_instance(self, instance: Instance):
        """
        For each instance, add a pre-run phase,
        and subclasses can override this method
        """
        return True

    def tear_down_instance(self, instance: Instance):
        """
        For each instance, add a post-run processing phase, and
        subclasses can override this method
        """
        return True

    def build_extend_tool(self):
        """
        build additional programs required by subclasses, and
        subclasses can override this method
        """
        return True

    def setup(self):
        """
        For each workload, add a pre-run processing phase, and
        subclasses can override this method
        """
        return True

    def tear_down(self):
        """
        For each workload, add a post-run processing phase, and
        subclasses can override this method
        """
        return True

    def get_result(self):
        instances_dict = {}
        for round in range(self.iterations):
            instances_dict["round{}".format(round+1)] = \
                [instance.get_instance_result() for instance in self.instances.get("round{}".format(round+1))]

        return {
            "name": self.name,
            "type": ResultType.Workload,
            "desc": self.description,
            "lang": self.lang,
            "score": self.score,
            "baseline": self.baseline,
            "index": self.index,
            "unit": self.unit_of_measure,
            "valid": self.valid,
            "instances": instances_dict,
            "elapsed_time": self.elapsed_time,
            "sub_item": self.sub_item
        }
