import math
from itertools import combinations
from random import random

import torch

from algorithms.algorithm import Algorithm
from individual.population import Population
from problems.problem import Problem
from utils.vectors import get_ref_vectors, normalize_vector


class GRADDVA(Algorithm):
    def __init__(self, problem: Problem,
                 pop_size: int,
                 kwargs: dict,
                 ):
        self.ref_vectors = get_ref_vectors(pop_size, problem.obj_dim)
        pop_size = self.ref_vectors.shape[0]
        super().__init__(pop_size, problem, kwargs)
        self.convergence_index, diversity_index = self.variable_analysis(problem)
        self.population = self.init_population(self.pop_size, self.problem, self.convergence_index, diversity_index)
        self.sub_group_list = self.group_dec(problem, self.convergence_index)
        # self.neighbours = self.calc_neighbours(diversity_index)
        self.arc = self.op.update_archive(self.population)

    def each_iteration(self):
        pop = Population()
        for sub_group_index in range(len(self.sub_group_list)):
            sub_group = self.sub_group_list[sub_group_index]
            sampling_offspring = self.directed_sampling(self.population.dec, sub_group)
            pop += sampling_offspring
            self.arc = self.op.update_archive(self.arc + sampling_offspring)
        temp_pop = pop + self.population
        self.population = temp_pop[self.op.environmental_selection(temp_pop.obj, self.ref_vectors)]
        # if random() < 0.9:
        #     row_indices = torch.arange(len(dec)).unsqueeze(1).expand(-1, 2)
        #     p = self.neighbours[row_indices, self.generate_random_rows(self.neighbours.size(1), dec.size(0), 2)]
        # else:
        #     p = self.generate_random_rows(len(self.population), dec.size(0), 2)
        # offspring_dec = dec.clone()
        # new_dec = DE.do(self.problem, offspring_dec, dec[p[:, 0]],
        #                 dec[p[:, 1]], 1, 0.5, len(offspring_dec) / len(sub_group) / 2, 20).squeeze(0)
        # offspring_dec[:, sub_group] = new_dec[:, sub_group]
        # offspring = self.problem.estimate_population(offspring_dec)
        # better_index = offspring.obj.sum(dim=1) < self.population.obj.sum(dim=1)
        # self.population[better_index] = offspring[better_index]

    def final_output(self):
        return self.arc

    def directed_sampling(self, dec, sub_group):
        """
        直接采样
        :return:
        """
        arc_dec = self.arc.dec
        dec_low = arc_dec - self.problem.low_limit
        dec_high = arc_dec - self.problem.high_limit
        direction = torch.cat((dec_low, dec_high), dim=0)
        # 单位化direction
        direction = normalize_vector(direction, dim=1)
        # 计算决策变量上下界长度的平方和，再开根号
        interval = self.problem.interval_length.norm(p=2)
        # 直接采样

        Ns = dec.size(0)
        Nw = len(direction) // 2
        rand_sample = torch.rand(Ns, 2 * Nw) * interval
        offspring_dec = []
        # 产生2*Ns*Nw个新的解
        for i in range(Ns):
            temp_dec = self.problem.low_limit + rand_sample[i, :Nw].unsqueeze(dim=1) * direction[:Nw]
            temp_dec[:, sub_group] = dec[i][sub_group]
            offspring_dec.append(temp_dec)

            temp_dec = self.problem.high_limit + rand_sample[i, Nw:].unsqueeze(dim=1) * direction[Nw:]
            temp_dec[:, sub_group] = dec[i][sub_group]
            offspring_dec.append(temp_dec)
        # 将list拼接成张量
        offspring_dec = torch.cat(offspring_dec, dim=0)
        return self.problem.estimate_population(offspring_dec)

    def loss_function(self, dec):
        dec.requires_grad_()
        obj = self.problem.estimate_population(dec).obj
        return self.pbi(obj).sum(), obj

    def pbi(self, obj, theta=5):
        """
        计算 MOEA/D 中的 PBI 目标分解方法。

        参数：
        obj: 目标函数值向量 (tensor, shape: [M])
        ref_vectors: 权向量 (tensor, shape: [M])
        theta: 惩罚参数 (float)
        返回：
        PBI 距离 (tensor, shape: [])
        """
        # 计算 d_1
        # obj_min = obj.min(dim=0, keepdim=True)[0]
        obj_min = torch.zeros((1, self.problem.obj_dim), dtype=torch.double)
        d1 = ((obj_min - obj) * self.ref_vectors[:obj.size(0)]).norm(p=2, dim=1) / self.ref_vectors[:obj.size(0)].norm(
            p=2, dim=1)

        # 计算 d_2
        d2 = (obj - (obj_min - d1.unsqueeze(dim=1) * self.ref_vectors[:obj.size(0)] / self.ref_vectors.norm())).norm(
            p=2, dim=1)

        # 计算 PBI
        pbi_value = d1 + theta * d2

        return pbi_value

    @staticmethod
    def generate_random_rows(n, num_rows, num_elems):
        """
        在0-n之间取num_elems元素为一行，循环num_rows次
        :param n:
        :param num_rows:
        :param num_elems:
        :return:
        """
        result = torch.zeros((num_rows, num_elems), dtype=torch.int)
        for i in range(num_rows):
            result[i] = torch.randperm(n)[:num_elems]
        return result

    def calc_neighbours(self, diversity_index):
        """
        Calculate the neighbours of each individual
        :param diversity_index:
        :return:
        """
        dec = self.population.dec
        dis = torch.cdist(dec[:, diversity_index], dec[:, diversity_index])
        dis[(torch.eye(len(dis), dtype=torch.bool))] = torch.inf
        _, neighbour = dis.sort(dim=1)
        return neighbour[:, : math.ceil(self.pop_size / 10)]

    @staticmethod
    def variable_analysis(problem):
        """
        决策变量分析,通过梯度值进行分析
        :param problem:测试问题实例
        :return: 收敛变量索引, 分布变量索引
        """
        dec = problem.init_population(50).dec
        grads = problem.calc_grad(dec)
        res = torch.zeros(grads.size(0), grads.size(1), dtype=torch.bool)
        for idx in range(grads.size(0)):
            res0 = torch.all(grads[idx] >= 0, dim=1)
            res1 = torch.all(grads[idx] <= 0, dim=1)
            res2 = torch.sum(grads[idx] != 0, dim=1) >= 1
            res[idx] = (res0 | res1) & res2
        convergence_index = torch.sum(res, dim=0) == len(dec)
        return convergence_index, ~convergence_index

    @staticmethod
    def group_dec(problem: Problem, convergence_index):
        """
        决策变量分组
        :param problem: 测试问题实例
        :param convergence_index:
        :return:
        """
        convergence_size = convergence_index.sum()
        # 记录变量维度是否已被分组
        group_front_no = torch.zeros(convergence_size, dtype=torch.int)
        dec = random() * problem.interval_length + problem.low_limit
        initial_grad = problem.calc_grad(dec)[0]
        convergence_index = torch.where(convergence_index)[0]
        initial_grad = initial_grad[convergence_index]
        front_no = 1
        for index, front in enumerate(convergence_index):
            if group_front_no[index] != 0:
                continue
            temp_dec = dec.clone()
            temp_dec[:, front] = random() * problem.interval_length[:, front] + problem.low_limit[:, front]
            new_grad = problem.calc_grad(temp_dec)[0][convergence_index]
            sub_group = (torch.sum(initial_grad, dim=1) != torch.sum(new_grad, dim=1))
            # 真是无语，dltz7导数是常数
            if sub_group.sum() == 0:
                sub_group[index] = True
            sub_group_index = torch.where(sub_group)[0]
            for idx, front in enumerate(group_front_no[sub_group_index]):
                if front != 0:
                    group_front_no[sub_group_index] = front
                    continue
            # sub_group_index = torch.tensor(
            #     [sub_group_index[idx].item() for idx, is_append in enumerate(has_group[sub_group_index]) if
            #      not is_append], dtype=torch.int)
            group_front_no[sub_group_index] = front_no
            front_no += 1
        # 使用unique函数找出张量中的唯一值
        unique_values = torch.unique(group_front_no)

        # 初始化一个空字典来存储结果
        groups = []
        # 遍历所有唯一值
        for value in unique_values:
            # 使用masked_select选择出与当前值相等的所有元素的索引
            mask = group_front_no == value
            indices = torch.nonzero(mask, as_tuple=True)[0]
            # 将分组结果存储到字典中
            groups.append(convergence_index[indices])

        return groups

    @classmethod
    def init_population(cls, pop_size, problem, convergence_index, diversity_index):
        dec = torch.zeros(pop_size, problem.var_dim, dtype=torch.double)
        diversity_size = torch.sum(diversity_index)
        convergence_size = torch.sum(convergence_index)

        if diversity_size == 1:
            dec[:, diversity_index] = torch.arange(0, pop_size, dtype=torch.double).unsqueeze(1) / (pop_size - 1)
        elif diversity_size > 4:
            dec[:, diversity_index] = torch.rand(pop_size, diversity_size, dtype=torch.double)
        else:
            dec[:, diversity_index] = cls.u_dall(pop_size, diversity_size)

        dec[:, convergence_index] = torch.rand(pop_size, convergence_size, dtype=torch.double)
        dec = dec * problem.interval_length + problem.low_limit
        return Population(dec, problem.eval_value(dec))

    @staticmethod
    def calCD2(UT):
        """
        Calculate the CD2 (centered L2-discrepancy) value of the point set, to
        measure the uniformity of the points.
        """
        N, S = UT.size()
        X = (2 * UT - 1) / (2 * N)

        CS1 = torch.sum(torch.prod(2 + torch.abs(X - 1 / 2) - (X - 1 / 2) ** 2, dim=1))
        CS2 = torch.zeros(N)

        for i in range(N):
            diff_abs = torch.abs(X[i, :].repeat(N, 1) - 1 / 2) + torch.abs(X - 1 / 2) - torch.abs(
                X[i, :].repeat(N, 1) - X)
            CS2[i] = torch.sum(torch.prod((1 + 1 / 2 * diff_abs), dim=1))

        CS2 = torch.sum(CS2)
        CD2 = (13 / 12) ** S - (2 ** (1 - S) / N) * CS1 + 1 / (N ** 2) * CS2

        return CD2

    @staticmethod
    def gcd(a, b):
        """
        求解最大公约数
        :param a:
        :param b:
        :return:
        """
        while b != 0:
            a, b = b, a % b
        return a

    @classmethod
    def u_dall(cls, pop_size, diversity_size):
        # Find all numbers less than N that are coprime to N
        hm = [i for i in range(1, pop_size + 1) if cls.gcd(torch.tensor(i), torch.tensor(pop_size)) == 1]
        hm = torch.tensor(hm, dtype=torch.long)

        N_range = torch.arange(1, pop_size + 1, dtype=torch.double).view(pop_size, 1)
        udt = torch.remainder(N_range * hm, pop_size)
        udt[udt == 0] = pop_size

        # Choose M columns among hm as the output, which have the minimum CD2 value
        nCombination = len(list(combinations(hm.tolist(), diversity_size)))
        data = None

        if nCombination < 1e4:
            Combination = list(combinations(range(len(hm)), diversity_size))
            CD2 = torch.zeros(len(Combination))

            for i, indices in enumerate(Combination):
                UT = udt[:, indices]
                CD2[i] = cls.calCD2(UT)

            minIndex = torch.argmin(CD2).item()
            data = udt[:, Combination[minIndex]]
        else:
            CD2 = torch.zeros(pop_size)

            for i in range(1, pop_size + 1):
                UT = torch.remainder(N_range * torch.tensor([i ** k for k in range(diversity_size)]), pop_size)
                CD2[i - 1] = cls.calCD2(UT)

            minIndex = torch.argmin(CD2).item()
            data = torch.remainder(N_range * torch.pow(minIndex, torch.arange(diversity_size)), pop_size)
            data[data == 0] = pop_size

        data = (data - 1) / (pop_size - 1)

        return data[data[:, 0].sort()[1]]
