import os.path
from random import random

import torch
from torch import nn, optim
from torch.utils.data import TensorDataset, DataLoader

from algorithms.algorithm import Algorithm
from problems.problem import Problem
from utils.file import get_root_path


class GNNMOEA(Algorithm):
    def __init__(self, problem: Problem,
                 pop_size: int,
                 kwargs: dict,
                 ):
        super().__init__(pop_size, problem, kwargs)
        self.model = SimpleNN(problem.obj_dim, problem.var_dim)
        self.population = self.problem.init_population(pop_size)
        self.convergence_index, diversity_index = self.variable_analysis(problem)
        self.sub_group_list = self.group_dec(problem, self.convergence_index)
        self.criterion = nn.MSELoss()
        self.optimizer = optim.Adam(self.model.parameters(), lr=1e-1)
        population = self.problem.init_population(50000)
        path = os.path.join(get_root_path(), "test", 'nn_model.pth')
        if os.path.exists(path):
            self.model.load_state_dict(torch.load(path))
        else:
            self.train_model(population, path)
        self.a(population)

    def each_iteration(self):
        obj = self.population.obj
        dec = self.population.dec
        for sub_group_index in range(len(self.sub_group_list)):
            sub_group = self.sub_group_list[sub_group_index]
            offspring_dec = self.get_input_by_model(0.1 * obj)
            dec_clone = dec.clone()
            dec_clone[:, sub_group] = offspring_dec[:, sub_group]
            offspring = self.problem.estimate_population(offspring_dec)
            better_index = offspring.obj.sum(dim=1) < self.population.obj.sum(dim=1)
            self.population[better_index] = offspring[better_index]

    @staticmethod
    def variable_analysis(problem):
        """
        决策变量分析,通过梯度值进行分析
        :param problem:测试问题实例
        :return: 收敛变量索引, 分布变量索引
        """
        dec = problem.init_population(50).dec
        grads = problem.calc_grad(dec)
        res = torch.zeros(grads.size(0), grads.size(1), dtype=torch.bool)
        for idx in range(grads.size(0)):
            res0 = torch.all(grads[idx] >= 0, dim=1)
            res1 = torch.all(grads[idx] <= 0, dim=1)
            res2 = torch.sum(grads[idx] != 0, dim=1) >= 1
            res[idx] = (res0 | res1) & res2
        convergence_index = (torch.sum(res, dim=0) > 0) & (torch.sum(~res, dim=0) == 0)
        return convergence_index, ~convergence_index

    @staticmethod
    def group_dec(problem: Problem, convergence_index):
        """
        决策变量分组
        :param problem: 测试问题实例
        :param convergence_index:
        :return:
        """
        convergence_size = convergence_index.sum()
        # 记录变量维度是否已被分组
        group_front_no = torch.zeros(convergence_size, dtype=torch.int)
        dec = random() * problem.interval_length + problem.low_limit
        initial_grad = problem.calc_grad(dec)[0]
        convergence_index = torch.where(convergence_index)[0]
        initial_grad = initial_grad[convergence_index]
        front_no = 1
        for index, front in enumerate(convergence_index):
            if group_front_no[index] != 0:
                continue
            temp_dec = dec.clone()
            temp_dec[:, front] = random() * problem.interval_length[:, front] + problem.low_limit[:, front]
            new_grad = problem.calc_grad(temp_dec)[0][convergence_index]
            sub_group = (torch.sum(initial_grad, dim=1) != torch.sum(new_grad, dim=1))
            # 真是无语，dltz7导数是常数
            if sub_group.sum() == 0:
                sub_group[index] = True
            sub_group_index = torch.where(sub_group)[0]
            for idx, front in enumerate(group_front_no[sub_group_index]):
                if front != 0:
                    group_front_no[sub_group_index] = front
                    continue
            # sub_group_index = torch.tensor(
            #     [sub_group_index[idx].item() for idx, is_append in enumerate(has_group[sub_group_index]) if
            #      not is_append], dtype=torch.int)
            group_front_no[sub_group_index] = front_no
            front_no += 1
        # 使用unique函数找出张量中的唯一值
        unique_values = torch.unique(group_front_no)

        # 初始化一个空字典来存储结果
        groups = []
        # 遍历所有唯一值
        for value in unique_values:
            # 使用masked_select选择出与当前值相等的所有元素的索引
            mask = group_front_no == value
            indices = torch.nonzero(mask, as_tuple=True)[0]
            # 将分组结果存储到字典中
            groups.append(convergence_index[indices])

        return groups

    def train_model(self, pop, path):
        dec = pop.dec
        obj = pop.obj
        # 创建数据加载器
        train_dataset = TensorDataset(obj, dec)
        train_loader = DataLoader(train_dataset, batch_size=500, shuffle=True)
        num_epochs = 100
        self.model.train()  # 设置模型为训练模式
        for epoch in range(num_epochs):
            for inputs, targets in train_loader:
                outputs = self.model(inputs)
                loss = self.criterion(outputs, targets)
                # 反向传播和优化
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
            print("iter -- >", epoch, ", loss -- > ", loss)
        torch.save(self.model.state_dict(), path)

    def model_eval(self, inputs):
        self.model.eval()
        return self.model(inputs)

    def a(self, pop):
        with torch.no_grad():
            outputs = self.model_eval(pop.obj)
            loss = self.criterion(outputs, pop.dec)
            print(f'Evaluation Loss: {loss.item():.4f}')

    def get_input_by_model(self, input_dec):
        # 初始化输入（需要优化的变量）
        # input_dec = self.problem.init_population(1).dec
        # 定义优化器
        # input_dec = torch.tensor([[0.3, 0.3],[0,2],[0,1]], dtype=torch.double)
        output = self.model_eval(input_dec)
        # print(self.problem.estimate_population(output).obj)
        # optimizer = optim.Adam([input_dec], lr=0.1)
        # 优化过程
        # num_iterations = 10000
        # for i in range(num_iterations):
        #     optimizer.zero_grad()
        #     output = self.model_eval(input_dec)
        #     loss = (output - target_output).pow(2).sum().sqrt()
        #     loss.backward()
        #     optimizer.step()
        # with torch.no_grad():
        #     print("---------------------------------")
        #     print("loss ", loss.item(), ", self.model_eval(input_dec)", self.model_eval(input_dec).tolist())
        #     print(self.problem.estimate_population(input_dec).obj.tolist())
        #     print("**********************************")
        # return input_dec
        return output


# 定义一个简单的神经网络模型
class SimpleNN(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(SimpleNN, self).__init__()
        self.fc1 = nn.Linear(input_dim, 512, dtype=torch.double)
        self.fc2 = nn.Linear(512, output_dim, dtype=torch.double)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x
