import os
import pickle
from concurrent.futures import as_completed

import torch
from loguru import logger

from utils.file import get_root_path
from websocket.msghandler import MsgContext, MessageType


class TaskHandler:
    # 获得项目根目录路径
    temp_data_path = get_root_path() + r'\temp_data'

    @classmethod
    def handle(cls, message):
        """
        循环创建问题和算法实例并放入进程池中执行
        :return:
        """
        # 从消息中获取需要的数据
        algorithms_datas, futures, indicator_data, max_fun_eval, obj_dim, pop_size, problem_datas, run_size, \
            save_size, var_dim, statistical_type = cls.init(message)
        # ----------------------------------------------------------------
        for problem_index, problem_data in enumerate(problem_datas):
            problem_class = MsgContext.get_cls_by_str(problem_data['cls'])
            MsgContext.multiple_problem_objs[problem_index] = problem_class(var_dim, obj_dim, max_fun_eval,
                                                                            problem_data['params'])
            problem_name = problem_class.__name__
            for algorithm_index, algorithms_data in enumerate(algorithms_datas):
                algorithms_class = MsgContext.get_cls_by_str(algorithms_data['cls'])
                algorithm_name = algorithms_class.__name__
                parent_dir = '_'.join([algorithm_name, cls.dict2str(algorithms_data['params']),
                                       str(pop_size), str(max_fun_eval), str(save_size)])
                child_dir = '_'.join([problem_name, cls.dict2str(problem_data['params']), str(var_dim), str(obj_dim)])
                folder_path = os.path.join(cls.temp_data_path, parent_dir, child_dir)
                for nth_run in range(run_size):
                    file_path = os.path.join(folder_path, f"{nth_run}.pkl")
                    MsgContext.file_path_list[problem_index][algorithm_index][nth_run] = file_path
                    if os.path.exists(file_path):
                        logger.warning(f"file | {file_path} | already exist")
                        # 加载已存在的文件
                        with open(file_path, 'rb') as f:
                            load_data = pickle.load(f)
                        # 保存加载文件中的种群
                        MsgContext.multiple_populations[problem_index][algorithm_index][nth_run] = load_data[0]
                        MsgContext.multiple_FE[problem_index][algorithm_index][nth_run] = [item['FE'] for item in
                                                                                           load_data[0]]
                        # 加载已经存在的指标数据
                        for indicator_name, indicator_value in load_data[1].items():
                            temp_indicator = MsgContext.multiple_indicators.get(indicator_name,
                                                                                torch.ones(MsgContext.tensor_size,
                                                                                           dtype=torch.double) * -1)
                            temp_indicator[problem_index][algorithm_index][nth_run] = indicator_value
                            MsgContext.multiple_indicators[indicator_name] = temp_indicator

                        if indicator_data['name'] == MsgContext.num_of_run:
                            MsgContext.num_of_run_return(algorithm_index, problem_index)
                            continue

                        indicator = load_data[1].get(indicator_data['name'], None)
                        # 不存在则计算
                        if indicator is None:
                            indicator_value = MsgContext.calc_indicator(indicator_data, load_data[0], problem_index,
                                                                        algorithm_index, nth_run)
                            load_data[1][indicator_data['name']] = indicator_value
                            with open(file_path, 'wb') as my_file:
                                # 将中间种群和性能指标一起保存到二进制文件中
                                pickle.dump([MsgContext.multiple_populations[problem_index][algorithm_index][nth_run],
                                             load_data[1]], my_file)
                        MsgContext.return_res(problem_index, algorithm_index, indicator_data)
                    else:
                        algorithm_obj = algorithms_class(MsgContext.multiple_problem_objs[problem_index], pop_size,
                                                         algorithms_data['params'])
                        future = MsgContext.executor.submit(algorithm_obj.run, nth_run, problem_index, algorithm_index,
                                                            save_size, folder_path, file_path, file_path)
                        futures.append(future)

        for f in as_completed(futures):
            algorithm_index, intermediate_pops, nth_run, problem_index = cls.get_result_data(f)
            if indicator_data['name'] == MsgContext.num_of_run:
                MsgContext.num_of_run_return(algorithm_index, problem_index)
            else:
                MsgContext.calc_indicator(indicator_data, intermediate_pops, problem_index,
                                          algorithm_index, nth_run)
                MsgContext.save_indicator(algorithm_index, intermediate_pops,
                                          nth_run, problem_index)
                MsgContext.return_res(problem_index, algorithm_index, indicator_data)
        # 计算统计检验结果
        if indicator_data['name'] != MsgContext.num_of_run:
            MsgContext.statistical_result(len(algorithms_datas), len(problem_datas), indicator_data, statistical_type)
        for problem_index in range(len(problem_datas)):
            for algorithm_index in range(len(algorithms_datas)):
                MsgContext.multiple_FE[problem_index][algorithm_index] = torch.mean(torch.tensor(
                    MsgContext.multiple_FE[problem_index][algorithm_index], dtype=torch.double), dim=0).to(torch.long)
        MsgContext.task_complete_return(MessageType.MULTI_TASK_COMPLETED)
        # IGD_value = MsgContext.multiple_indicators.get("IGD")
        # for problem_index, problem_data in enumerate(problem_datas):
        #     tensor2excel(MsgContext.multiple_problem_objs[problem_index].optimal_solutions,file_name=problem_data['name']+"_PF_obj_dim_"+ str(obj_dim))
        #     for algorithm_index, algorithms_data in enumerate(algorithms_datas):
        #         _, best = torch.min(IGD_value[problem_index][algorithm_index],0)
        #         tensor2excel(MsgContext.multiple_populations[problem_index][algorithm_index][best][-1]['obj'],file_name=algorithms_data['name']+"_"+problem_data['name']+"_var_dim_"+str(var_dim)+"_obj_dim_"+ str(obj_dim) )

    @staticmethod
    def get_result_data(f):
        result = f.result()
        problem_index = result['problem_index']
        algorithm_index = result['algorithm_index']
        nth_run = result['nth_run']
        intermediate_pops = result['intermediate_pops']
        MsgContext.multiple_populations[problem_index][algorithm_index][nth_run] = intermediate_pops
        MsgContext.multiple_FE[problem_index][algorithm_index][nth_run] = result['FE']
        return algorithm_index, intermediate_pops, nth_run, problem_index

    @classmethod
    def init(cls, message):
        algorithms_datas, problem_datas, form_data, indicator_data, statistical_type = MsgContext.get_data(message)
        futures = []
        MsgContext.multiple_indicators = {}
        MsgContext.multiple_populations = []
        #
        max_fun_eval, obj_dim, pop_size, run_size, save_size, var_dim = MsgContext.get_form_data(form_data)
        # 初始化数据
        MsgContext.multiple_populations = MsgContext.create_3D_list(len(problem_datas), len(algorithms_datas), run_size)
        MsgContext.file_path_list = MsgContext.create_3D_list(len(problem_datas), len(algorithms_datas), run_size)
        MsgContext.multiple_FE = MsgContext.create_3D_list(len(problem_datas), len(algorithms_datas), run_size)
        MsgContext.indicator_value_list = {}
        MsgContext.multiple_problem_objs = [None] * len(problem_datas)
        MsgContext.tensor_size = (len(problem_datas), len(algorithms_datas), run_size)
        return algorithms_datas, futures, indicator_data, max_fun_eval, obj_dim, pop_size, problem_datas, run_size, \
            save_size, var_dim, statistical_type

    @staticmethod
    def dict2str(param_dict, separator='_'):
        str_list = []
        for key, value in param_dict.items():
            str_list.append(f"{key}{separator}{value}")
        return separator.join(str_list)
