import os

import scipy.io as scipy
import torch

from problems.problem import Problem


def init_data(data: object) -> object:
    temp = []
    for i in range(len(data)):
        temp.append(torch.tensor(data[i]))
    return temp


class Tree(Problem):
    parameters = [
        {'label': 'T', 'name': 'T', 'type': 'number',
         'description': 'Length of data (related to the number of variables)', 'step': 1, 'defaultValue': 400},
    ]
    data_file_path = os.path.join(os.getcwd(), "problems", "TREE", "Dataset_TREE.mat")
    total_data = scipy.loadmat(data_file_path)["Dataset"][0][0]
    total_data = init_data(total_data)

    def init_dec_boundary0(self, low_limit, high_limit):
        for i in range(3):
            low_limit[:, i] = torch.min(self.data[:, i::3], dim=1)[0] / 1.01
            high_limit[:, i] = torch.max(self.data[:, i::3], dim=1)[0] / 0.99
            self.mean[:, i] = torch.mean(self.data[:, i::3], dim=1)
        return low_limit, high_limit

    def init_dec_boundary1(self, low_limit, high_limit):
        length = int(self.data.size(1) * 0.5)
        for i in range(3):
            low_limit[:, i] = torch.min(self.data[:, i:length:3], dim=1)[0] / 1.01
            high_limit[:, i] = torch.max(self.data[:, i:length:3], dim=1)[0] / 0.99
            low_limit[:, i + 3] = torch.min(self.data[:, length + i::3], dim=1)[0] / 1.01
            high_limit[:, i + 3] = torch.max(self.data[:, length + i::3], dim=1)[0] / 0.99
            self.mean[:, i] = torch.mean(self.data[:, i:length:3], dim=1)
            self.mean[:, i + 3] = torch.mean(self.data[:, length + i::3], dim=1)
        return low_limit, high_limit

    def init_dec_boundary2(self, low_limit, high_limit):
        length0 = int(self.data.size(1) * 0.25)
        length1 = int(self.data.size(1) * 0.5)
        length2 = int(self.data.size(1) * 0.75)
        for i in range(3):
            low_limit[:, i] = torch.min(self.data[:, i:length0:3], dim=1)[0] / 1.01
            high_limit[:, i] = torch.max(self.data[:, i:length0:3], dim=1)[0] / 0.99
            low_limit[:, i + 3] = torch.min(self.data[:, length0 + i:length1:3], dim=1)[0] / 1.01
            high_limit[:, i + 3] = torch.max(self.data[:, length0 + i:length1:3], dim=1)[0] / 0.99
            low_limit[:, i + 6] = torch.min(self.data[:, length1 + i:length2:3], dim=1)[0] - torch.pi / 180
            high_limit[:, i + 6] = torch.max(self.data[:, length1 + i:length2:3], dim=1)[0] + torch.pi / 180
            low_limit[:, i + 9] = torch.min(self.data[:, length2 + i::3], dim=1)[0] - torch.pi / 180
            high_limit[:, i + 9] = torch.max(self.data[:, length2 + i::3], dim=1)[0] + torch.pi / 180
            self.mean[:, i] = torch.mean(self.data[:, i:length0:3], dim=1)
            self.mean[:, i + 3] = torch.mean(self.data[:, length0 + i:length1:3], dim=1)
            self.mean[:, i + 6] = torch.mean(self.data[:, length1 + i:length2:3], dim=1)
            self.mean[:, i + 9] = torch.mean(self.data[:, length2 + i::3], dim=1)
        return low_limit, high_limit

    def __init__(self, _: int, _1: int, max_fun_eval, data_index, K, init_dec_boundary, obj_dim, kwargs: dict):
        T = kwargs.get('T')
        self.data = self.total_data[data_index][:min(max(T, 10), self.total_data[0].shape[0] - 1)]
        var_dim = T * K
        low_limit = torch.zeros((T, K), dtype=torch.double)
        high_limit = torch.zeros((T, K), dtype=torch.double)
        self.mean = torch.zeros((T, K), dtype=torch.double)
        low_limit, high_limit = init_dec_boundary(low_limit, high_limit)
        low_limit = (low_limit - self.mean).T.reshape(1, -1)
        high_limit = (high_limit - self.mean).T.reshape(1, -1)
        super().__init__(var_dim, obj_dim, low_limit, high_limit, max_fun_eval, kwargs)

    def init_population(self, pop_size, **kwargs):
        # 生成随机数并调整范围
        PopDec = self.mean.T.reshape(1, -1) * (torch.rand(pop_size, self.var_dim, dtype=torch.double) * 0.008 - 0.004)
        # tensor2excel(PopDec)
        # 调用 Evaluation 方法计算 Population
        Population = self.estimate_population(PopDec, **kwargs)
        return Population

    def eval_value_mix(self, pop_dec, calc_ea):
        # tensor2excel(pop_dec)
        # pop_dec = excel2tensor("2025-04-07_19-28-08.xlsx")
        # 获取输入张量的形状
        N = pop_dec.size(0)  # PopDec 的形状为 (N, ...)
        KP = self.data.size(1)  # obj.Data 的形状为 (T, KP)
        T, K = self.mean.size()  # obj.Mean 的形状为 (T, K)

        # 将 PopDec 重塑为 (N, T, K) 并加上 obj.Mean 的广播
        pop_dec = pop_dec.T.reshape(K, T, N).permute(2, 1, 0) + self.mean.T.reshape(K, T, 1).permute(2, 1, 0)

        # 初始化目标值 pop_obj
        pop_obj = torch.zeros(N, self.obj_dim, dtype=torch.double)

        # 第一个目标函数
        eA = calc_ea(pop_dec, KP, T)
        # 对 eA 求和得到第一个目标值
        pop_obj[:, 0] = eA.sum(dim=(1, 2))  # 沿第 2 和第 3 维度求和

        # 第二个目标函数
        # 计算 Delta = std(eA[:, 2:] - eA[:, :-1])
        eA_diff = eA[:, 1:] - eA[:, :-1]  # 计算差分
        Delta = torch.std(eA_diff, dim=1, keepdim=True)  # 沿第 2 维度计算标准差

        # 对 Delta 求和得到第二个目标值
        pop_obj[:, 1] = Delta.permute(2, 1, 0).reshape(-1, N).T.sum(dim=1)  # 展平后求和

        return pop_obj

    def calc_ea0(self, pop_dec, KP, T):
        obj_data = self.data.T.reshape(KP, T, 1).permute(2, 1, 0)  # 将 obj.Data 重塑为 (1, T, KP)
        PopDec_slice = pop_dec[:, :, :3]  # 取 PopDec 的前 3 个通道
        return torch.abs(obj_data / PopDec_slice.repeat(1, 1, KP // 3) - 1)

    def calc_ea1(self, pop_dec, KP, T):
        length = int(self.data.size(1) * 0.5)
        obj_data = self.data[:, :length].T.reshape(KP // 2, T, 1).permute(2, 1, 0)  # 将 obj.Data 重塑为 (1, T, KP)
        PopDec_slice = pop_dec[:, :, :3]  # 取 PopDec 的前 3 个通道
        ea1 = torch.abs(obj_data / PopDec_slice.repeat(1, 1, KP // 6) - 1)
        obj_data = self.data[:, length:].T.reshape(KP // 2, T, 1).permute(2, 1, 0)
        PopDec_slice = pop_dec[:, :, 3:6]  # 取 PopDec 的前 3 个通道
        ea2 = torch.abs(obj_data / PopDec_slice.repeat(1, 1, KP // 6) - 1)
        return torch.cat((ea1, ea2), dim=2)

    def calc_ea2(self, pop_dec, KP, T):
        length0 = int(self.data.size(1) * 0.25)
        length1 = int(self.data.size(1) * 0.5)
        length2 = int(self.data.size(1) * 0.75)
        obj_data = self.data[:, :length0].T.reshape(KP // 4, T, 1).permute(2, 1, 0)  # 将 obj.Data 重塑为 (1, T, KP)
        PopDec_slice = pop_dec[:, :, :3]  # 取 PopDec 的前 3 个通道
        ea1 = torch.abs(obj_data / PopDec_slice.repeat(1, 1, KP // 12) - 1)
        obj_data = self.data[:, length0:length1].T.reshape(KP // 4, T, 1).permute(2, 1, 0)  # 将 obj.Data 重塑为 (1, T, KP)
        PopDec_slice = pop_dec[:, :, 3:6]  # 取 PopDec 的前 3 个通道
        ea2 = torch.abs(obj_data / PopDec_slice.repeat(1, 1, KP // 12) - 1)
        obj_data = self.data[:, length1:length2].T.reshape(KP // 4, T, 1).permute(2, 1, 0)  # 将 obj.Data 重塑为 (1, T, KP)
        PopDec_slice = pop_dec[:, :, 6:9]  # 取 PopDec 的前 3 个通道
        ea3 = torch.abs(obj_data / PopDec_slice.repeat(1, 1, KP // 12) - 1)
        obj_data = self.data[:, length2:].T.reshape(KP // 4, T, 1).permute(2, 1, 0)  # 将 obj.Data 重塑为 (1, T, KP)
        PopDec_slice = pop_dec[:, :, 9:12]  # 取 PopDec 的前 3 个通道
        ea4 = torch.abs(obj_data / PopDec_slice.repeat(1, 1, KP // 12) - 1)
        return torch.cat((ea1, ea2), dim=2), torch.cat((ea3, ea4), dim=2)

    def get_optimal_solutions(self, _):
        # 初始化一个全零张量，形状为 (1, obj.D)
        X = torch.zeros(1, self.var_dim, dtype=torch.double)
        # 将奇数索引位置设置为 obj.lower 的对应值
        X[:, 0::2] = self.low_limit[:, 0::2]
        # 将偶数索引位置设置为 obj.upper 的对应值
        X[:, 1::2] = self.high_limit[:, 1::2]
        # 调用 CalObj 方法计算目标值
        return self.eval_value(X)
