import copy
from datetime import datetime

import numpy as np
import torch

from algorithms.algorithm import Algorithm
from algorithms.base.nsga2 import NSGA2
from algorithms.large.lmoeads import LMOEADS
from individual.population import Population
from operators.crossover.sbx import SBX
from operators.mutations.polynomial_mutation import PolynomialMutation
from problems.problem import Problem
from utils.vectors import get_ref_vectors, normalize_vector, unique, vector_radian


# torch.autograd.set_detect_anomaly(True)

class LSGNA(Algorithm):

    def __init__(self, problem: Problem,
                 pop_size: int,
                 kwargs: dict,
                 ):
        """
        初始化算法
        :param problem:问题
        :param pop_size: 种群大小，实际大小会根据产生的权重向量个数决定
        :param max_func_eval: 最大评估次数
        :param kwargs: 其他参数
        """
        self.ref_vectors = get_ref_vectors(pop_size, problem.obj_dim)
        pop_size = self.ref_vectors.shape[0]
        super().__init__(pop_size, problem, kwargs)
        self.population = self.problem.init_population(pop_size)
        self.Nw = 5
        self.Ns = 5
        self.optimizer = LMOEADS(problem, pop_size, {'Nw': self.Nw, 'Ns': self.Ns})
        self.arc = self.update_archive(copy.deepcopy(self.population))
        self.iter = 0
        self.start_time = datetime.now()

    # def is_terminated(self):
    #     igd = IGD.value(self.arc.obj, self.problem.optimal_solutions)
    #     # print(IGD.value(igd, self.problem.optimal_solutions))
    #     if igd < 4e-1:
    #         print(self.__class__.__name__, self.problem.__class__.__name__,
    #               (datetime.now() - self.start_time).total_seconds() / 60)
    #         return True
    #     else:
    #         print(self.__class__.__name__, self.problem.__class__.__name__, "IGD=", igd)
    #         return False

    def each_iteration(self):
        # 对种群中的非支配个体做交叉变异操作
        temp_pop = self.GA_operator()
        # 更新归档集
        self.arc = self.update_archive(self.arc + copy.deepcopy(temp_pop))
        dec = temp_pop.dec
        # **********************************************交叉变异end****************************************************
        # **********************************************梯度优化start**************************************************
        pop_list = Population()  # 严禁偷卷 ！hahaha#
        self.iter += 1
        if self.iter % 3 == 0:
            # 用pytorch近似求导
            # dec = excel2tensor("2024-07-13_17-59-14.xlsx")
            pop_list = self.grad_optimize_with_pytorch(dec)
            # 用导数定义近似求导
            # pop_list = self.grad_optimize_with_math(dec)
        # **********************************************采样start*****************************************************
        # 随机选择归档集中30个个体作为采样解
        offspring = self.directed_sampling(self.arc[torch.randperm(len(self.arc))[:30]].dec)
        # offspring = self.gaussian_data(self.arc.dec,self.problem)
        # offspring = self.sample_within_hypersphere(self.arc[torch.randperm(len(self.arc))[:20]].dec,radius=10000,num_samples=10)
        # 更新归档集
        self.arc = self.update_archive(self.arc + copy.deepcopy(offspring))
        # **********************************************采样end*******************************************************
        # **********************************************环境选择start**************************************************
        # 将(原种群+交叉变异后的种群)temp_pop + 梯度优化后的种群pop_list + 采样生成的种群offspring 三个种群合并
        temp_pop = temp_pop + pop_list + offspring
        # 去重
        index = unique(temp_pop.obj)
        temp_pop = temp_pop[index]

        temp_obj = temp_pop.obj
        # 基于分解的选择,将解与参考向量绑定,一个参考向量绑定多个解的话，选择最好的一个，没有绑定解的参考向量，选择一个与它最近的且未被选择的解
        next_index = self.environmental_selection(temp_obj, self.ref_vectors)
        # **********************************************环境选择end***************************************************
        self.population = temp_pop[next_index]

    @staticmethod
    def environmental_selection(obj, ref_vectors):
        if len(ref_vectors) > len(obj):
            return torch.ones(len(obj), dtype=torch.int)
        # 计算temp_obj与ref_vectors的cos数值,对排序，得到每个temp_obj与最近参考向量的cos值和参考向量索引
        obj_vector_radian = vector_radian(obj, ref_vectors, return_radian=False)
        obj = (obj - obj.min(0)[0]) / (obj.max(0)[0] - obj.min(0)[0])
        # obj_ref_max_value, obj_ref_max_index = obj_vector_radian.max(1)
        population_used = torch.zeros(len(obj), dtype=torch.bool)
        ref_vectors_used = torch.zeros(len(ref_vectors), dtype=torch.bool)
        while torch.sum(population_used) < len(ref_vectors):
            obj_vector_radian_temp = obj_vector_radian[~population_used]
            obj_vector_radian_temp[:, ref_vectors_used] = -10
            obj_ref_max_value, obj_ref_max_index = obj_vector_radian_temp.max(1)
            unique_index = torch.unique(obj_ref_max_index)
            ref_vectors_used[unique_index] = True
            not_used_solution_index = torch.where(~population_used)[0]
            temp_obj = obj[not_used_solution_index]
            for index in unique_index:
                temp_index = torch.nonzero(index == obj_ref_max_index, as_tuple=True)[0]
                dist = torch.cdist(temp_obj[temp_index], torch.zeros(1, temp_obj.size(1), dtype=torch.double)).squeeze(
                    dim=1)
                fan = obj_ref_max_value[temp_index] / dist
                best = torch.argmax(fan)
                population_used[not_used_solution_index[temp_index[best]]] = True
        return population_used

    def GA_operator(self):
        dec = self.population.dec
        obj = self.population.obj
        nd_obj, front_no = NSGA2.get_best(obj, return_front=True)
        nd_dec = dec[front_no == 1]
        offspring_dec = SBX.do(nd_dec[torch.randperm(nd_dec.size(0))], nd_dec[torch.randperm(nd_dec.size(0))],
                               self.problem)
        offspring_dec = PolynomialMutation.do(offspring_dec, self.problem)
        temp_pop = self.population + self.problem.estimate_population(offspring_dec)
        return temp_pop

    def cal_grad1(self, decs, target):
        """
        对种群求解近似导数
        :param decs:
        :param target:
        :return: 决策变量对目标值累积的变化,对igd的导数矩阵, 对obj的导数矩阵
        """
        infinitesimal = 1e-6
        IGD_grads = torch.zeros_like(decs)
        obj_grads = torch.zeros((*decs.shape, self.problem.obj_dim))
        for i in range(len(decs)):
            dec = decs[i]
            dec[dec == 0] = 1e-12
            x = dec * (1 + torch.eye(len(dec)) * infinitesimal)
            raw_loss_value, raw_obj_value = self.loss_function(dec, target)
            for j in range(len(x)):
                igd_value, obj_value = self.loss_function(x[j], target)
                IGD_grads[i][j] = (igd_value - raw_loss_value) / dec[j] / infinitesimal
                obj_grads[i][j] = (obj_value - raw_obj_value) / dec[j] / infinitesimal
        self.problem.fun_eval -= (len(decs) * self.problem.var_dim)
        return obj_grads, IGD_grads

    def cal_grad(self, decs, target, res):
        """
        对种群求解近似导数
        :param res:
        :param decs:
        :param target:
        :return: 决策变量对目标值累积的变化,对igd的导数矩阵, 对obj的导数矩阵
        """
        infinitesimal = 1e-3
        GD_grads = torch.zeros_like(decs)
        obj_grads = torch.zeros((*decs.shape, self.problem.obj_dim))
        perturbation_value = decs * infinitesimal
        perturbation_decs = decs + perturbation_value
        raw_loss_value, raw_obj_value = self.loss_function(decs, target)
        for i in range(len(decs)):
            for j in range(self.problem.var_dim):
                if not res[i][j]:
                    continue
                temp_dec = decs.clone()
                temp_dec[i][j] = perturbation_decs[i][j]
                gd_value, obj_value = self.loss_function1(temp_dec, i, raw_obj_value, target)
                GD_grads[i][j] = (gd_value - raw_loss_value) / perturbation_value[i][j]
                obj_grads[i][j] = (obj_value - raw_obj_value[i]) / perturbation_value[i][j]
        # self.problem.fun_eval -= (len(decs) * self.problem.var_dim)
        return obj_grads, GD_grads

    def grad_optimize_with_pytorch(self, dec):
        grad_dec = dec.clone().requires_grad_()
        # lr是一个一元函数,值从0.1到 (0.1-0.0999)
        # optimizer = torch.optim.Adam([grad_dec],
        #                              lr=-0.099999 * min(self.problem.fun_eval / self.problem.max_fun_eval, 1) + 0.1)
        optimizer = torch.optim.Adam([grad_dec],
                                     lr=0.1)
        # scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
        # grad_obj = obj
        pop_list = Population()
        res = self.analyze_decision_variables_with_pytorch(grad_dec.clone(), self.problem)
        # if not res.any():
        #     return
        for _ in range(5):
            optimizer.zero_grad()
            loss, _ = self.loss_function(grad_dec, self.arc.obj * 0.9)
            loss.backward()
            # tensor2excel(grad_dec.grad)
            # 晒选需要更新的决策变量
            # res = self.analyze_decision_variables_with_pytorch(grad_dec.clone(), self.problem)
            with torch.no_grad():
                grad_dec.grad *= res
            # 使用优化器更新梯度
            optimizer.step()
            # scheduler.step()
            # 越界修复
            grad_dec = self.clip_to_bounds(grad_dec, self.problem.low_limit, self.problem.high_limit)
            # 替换 NaN
            grad_dec = self.replace_nan_with_random(grad_dec, self.problem)
            grad_pop = self.problem.estimate_population(grad_dec.detach().clone())
            pop_list += grad_pop
            self.arc = self.update_archive(self.arc + grad_pop)
            # print(IGD.value(grad_pop.obj, self.problem.optimal_solutions))
        return pop_list

    def grad_optimize_with_math(self, dec):
        grad_dec = dec.clone()
        # lr是一个一元函数,值从0.1到 (0.1-0.0999)
        optimizer = torch.optim.Adam([grad_dec],
                                     lr=0.01)
        # scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
        obj_grads, GD_grads = self.cal_grad(grad_dec, self.arc.obj * 0.9, res=torch.ones_like(grad_dec))
        res = self.analyze_decision_variables_with_math(obj_grads)
        pop_list = Population()
        # if not res.any():
        #     return pop_list
        self.grad_update(GD_grads * res, grad_dec, optimizer, pop_list)
        # grad_obj = obj
        for _ in range(5):
            obj_grads, GD_grads = self.cal_grad(grad_dec, self.arc.obj * 0.9, res)
            # tensor2excel(GD_grads)
            # 晒选需要更新的决策变量
            # res = self.analyze_decision_variables_with_math(obj_grads)
            # self.problem.fun_eval -= (~res).sum().item()
            self.grad_update(GD_grads * res, grad_dec, optimizer, pop_list)

        return pop_list

    @staticmethod
    def gaussian_kernel(s1, s2, sigma=1.0):
        """
        计算高斯核函数
        :param s1: 个体1的状态或策略参数，形状为 (d,)
        :param s2: 个体2的状态或策略参数，形状为 (d,)
        :param sigma: 高斯核的带宽参数
        :return: 高斯核值
        """
        distance = torch.norm(s1 - s2, p=2)  # 计算欧氏距离
        return torch.exp(-distance ** 2 / (2 * sigma ** 2))

    @classmethod
    def diversity_loss(cls, objs, sigma=1.0):
        """
        计算基于高斯核的多样化损失
        :param objs:
        :param sigma: 高斯核的带宽参数
        :return: 多样化损失值
        """
        N = objs.shape[0]  # 种群大小
        loss = 0.0
        for i in range(N):
            for j in range(i + 1, N):
                loss += cls.gaussian_kernel(objs[i], objs[j], sigma)
        return loss/N

    def grad_update(self, GD_grads, grad_dec, optimizer, pop_list):
        grad_dec.grad = GD_grads
        # 使用优化器更新梯度
        optimizer.step()
        # scheduler.step()
        grad_dec = self.clip_to_bounds(grad_dec, self.problem.low_limit, self.problem.high_limit)
        # 替换 NaN
        grad_dec = self.replace_nan_with_random(grad_dec, self.problem)
        grad_pop = self.problem.estimate_population(grad_dec)
        pop_list += grad_pop
        self.arc = self.update_archive(self.arc + grad_pop)
        # print(IGD.value(grad_pop.obj, self.problem.optimal_solutions))

    def analyze_decision_variables_with_math(self, grads):
        return self.analyze_decision_variables(grads)

    @classmethod
    def analyze_decision_variables(cls, grads):
        res = torch.zeros(grads.size(0), grads.size(1), dtype=torch.bool)
        distance = torch.zeros(grads.size(0), grads.size(1), dtype=torch.double)
        # obj = self.arc.obj
        for idx in range(grads.size(0)):
            grad = grads[idx]
            res0 = torch.all(grad >= 0, dim=1)
            res1 = torch.all(grad <= 0, dim=1)
            res2 = torch.sum(grad != 0, dim=1) >= 1
            # temp = torch.abs(grad)
            temp_max = grad.max(1)[0]
            temp_min = grad.min(1)[0]
            distance[idx] = temp_max - temp_min
            res[idx] = (res0 | res1) & res2
        return cls.res_update(grads, res)

    @staticmethod
    def res_update(grads, res):
        # 选择 res 为 False 对应的 grads 值
        false_mask = ~res  # 获取 res 为 False 的位置
        # false_grads = grads[false_mask]  # 提取 grads 中对应的值
        temp = grads[:, :, 0]
        for idx in range(1, grads.shape[2]):
            temp = temp - grads[:, :, idx]
        false_grads = temp[false_mask].abs()
        # 对这些 grads 值从大到小进行排序
        sorted_grads, sorted_indices = torch.sort(false_grads, descending=True)

        # 计算前 1/4 的数量
        num_elements = sorted_grads.numel()
        top_quarter_count = num_elements // 4

        # 找到前 1/4 的元素在原始 grads 矩阵中的索引
        top_quarter_indices = sorted_indices[:top_quarter_count]
        top_quarter_global_indices = torch.nonzero(false_mask, as_tuple=False)[top_quarter_indices]

        # 更新 res 矩阵，将这些索引对应的 res 值设置为 True
        res[top_quarter_global_indices[:, 0], top_quarter_global_indices[:, 1]] = True

        return res

    def update_archive(self, population: Population, get_ref_vectors_size: int = 200):
        dec = population.dec
        obj = population.obj
        index = unique(obj)
        dec = dec[index]
        obj = obj[index]
        front_no, _ = NSGA2.ND_sort(obj, 1)
        obj = obj[front_no == 1]
        dec = dec[front_no == 1]
        if len(dec) > get_ref_vectors_size:
            next_index = self.environmental_selection(obj, get_ref_vectors(get_ref_vectors_size, self.problem.obj_dim))
            obj = obj[next_index]
            dec = dec[next_index]
        return Population(dec, obj)

    @staticmethod
    def environmental_selection1(obj, ref_vectors):
        # 计算temp_obj与ref_vectors的cos数值,对排序，得到每个temp_obj与最近参考向量的cos值和参考向量索引
        obj_ref_max_value, obj_ref_max_index = vector_radian(obj, ref_vectors, return_radian=False).max(1)
        population_used = torch.zeros(len(obj), dtype=torch.bool)
        obj = (obj - obj.min(0)[0]) / (obj.max(0)[0] - obj.min(0)[0])
        unique_index = torch.unique(obj_ref_max_index)
        # next_index = torch.zeros(len(unique_index), dtype=torch.int)
        next_index = []
        for index in unique_index:
            temp_index = torch.nonzero(index == obj_ref_max_index, as_tuple=True)[0]
            dist = torch.cdist(obj[temp_index], torch.zeros(1, obj.size(1), dtype=torch.double)).squeeze(dim=1)
            fan = obj_ref_max_value[temp_index] / dist
            best = torch.argmax(fan)
            population_used[temp_index[best]] = True
            next_index.append(temp_index[best].item())
        # 查看还有哪些参考向量没有分配个体
        no_individual_refs = np.setdiff1d(torch.arange(len(ref_vectors)).numpy(), unique_index.numpy())
        if len(no_individual_refs) > 0:
            ref_obj_index = vector_radian(ref_vectors[no_individual_refs], obj).sort(dim=1)[1]
            for i in range(len(no_individual_refs)):
                no_alloc_individual_index = ref_obj_index[i][population_used == False][0]
                next_index.append(no_alloc_individual_index.item())
                population_used[no_alloc_individual_index] = True
        return torch.tensor(next_index, dtype=torch.int)

    # 越界处理函数
    @staticmethod
    def clip_to_bounds(tensor, lower, upper):
        tensor.data = torch.clamp(tensor.data, min=lower, max=upper)
        return tensor

    @staticmethod
    def replace_nan_with_random(p_dec, problem: Problem):
        if torch.isnan(p_dec).any():
            # 获取 NaN 元素的索引
            nan_indices = torch.isnan(p_dec)
            # 替换 NaN 元素为随机数
            p_dec.data[nan_indices] = (torch.rand(p_dec.shape, dtype=torch.double) *
                                       problem.interval_length + problem.low_limit)[nan_indices]
            # nan_indices = torch.any(nan_indices, dim=1)
            # obj = problem.estimate_population(p_dec[nan_indices]).obj
            # p_obj[nan_indices] = obj
        return p_dec

    def final_output(self):
        return self.arc

    def loss_function(self, dec, target):
        pop_obj = self.problem.estimate_population(dec).obj
        return torch.cdist(pop_obj, target).min(1)[0].mean() + self.diversity_loss(pop_obj,
                                                                                   1e-3), pop_obj.detach().clone()

    def loss_function1(self, decs, idx, objs, target):
        temp_objs = objs.clone()
        pop_obj = self.problem.estimate_population(decs[idx]).obj
        temp_objs[idx] = pop_obj
        return torch.cdist(temp_objs, target).min(1)[0].mean() + self.diversity_loss(temp_objs, 1e-3), pop_obj

    def analyze_decision_variables_with_pytorch(self, nd_dec, problem: Problem):
        grads = self.get_grads(nd_dec, problem)
        res = self.analyze_decision_variables(grads)
        return res

    @staticmethod
    def get_grads(nd_dec, problem: Problem):
        grad_list = []
        nd_dec.requires_grad_()
        nd_obj = problem.eval_value(nd_dec)
        # 创建一个梯度张量，形状与输出张量 y 相同
        grad_outputs = torch.ones_like(nd_obj)
        # 计算近似偏导
        for dim in range(problem.obj_dim):
            grad = torch.autograd.grad(outputs=nd_obj[:, dim], inputs=nd_dec,
                                       grad_outputs=grad_outputs[:, dim], retain_graph=True)[0]
            grad_list.append(grad)
        grads = torch.stack(grad_list, dim=2)
        return grads

    @staticmethod
    def find_significant_indices(vector, threshold=-1):
        """
        对vector正态化,选择正态化后vector值大于threshold的决策变量
        需要考虑threshold怎么设置
        :param vector:
        :param threshold:
        :return:
        """
        # 计算均值和标准差
        mean = torch.mean(vector)
        std = torch.std(vector)

        # 计算Z评分
        z_scores = (vector - mean) / std

        return z_scores > threshold

    @staticmethod
    def gaussian_data(dec, problem: Problem, return_population=True):
        sample_size = 200
        # 计算每列的均值和方差
        means = dec.mean(dim=0)
        # stds = dec.std(dim=0)

        # 利用计算出的均值和方差，通过正态分布生成新的张量
        new_dec = torch.empty((sample_size, problem.var_dim), dtype=torch.double)  # 初始化一个与原始张量相同大小的张量

        # 对于每列，使用对应的均值和方差通过正态分布生成数据
        for i in range(dec.size(1)):  # 遍历每列
            new_dec[:, i] = torch.normal(mean=means[i],
                                         std=problem.interval_length[0][i].sqrt(),
                                         size=(sample_size,))
        return problem.estimate_population(new_dec) if return_population else problem.repair_decision(new_dec)

    def sample_within_hypersphere(self, dec, radius=5000, num_samples=20):
        pop_size, var_dim = dec.shape
        radius = self.problem.interval_length[0] / 2
        # 初始化存储新采样点的张量
        samples = torch.empty((pop_size * num_samples, var_dim))

        for i, center in enumerate(dec):
            for j in range(num_samples):
                # Step 1: 在单位球面上生成随机向量
                random_direction = torch.randn(var_dim)
                random_direction = random_direction / torch.norm(random_direction, p=2)

                # Step 2: 生成均匀分布的随机半径
                random_radius = torch.rand(1).pow(1 / var_dim) * radius

                # Step 3: 计算新的采样点
                sampled_point = center + random_direction * random_radius
                samples[i * num_samples + j] = sampled_point

        return self.problem.estimate_population(samples)

    @staticmethod
    def hypervolume(P, r, N=1000):
        """
        Hypervolume estimator using Monte Carlo simulation.

        :param P: Tensor representing approximated Pareto front, size n x d
        :param r: Tensor representing the reference point, size 1 x d
        :param N: Number of random points to use (integer) or actual set of points, size N x d
        :return: Estimation of the hypervolume
        """
        # Normalize P by the reference point r
        P = P / r

        n, d = P.size()
        if not torch.is_tensor(N):
            # Generate N random points uniformly within the hyper-cuboid bounded by r
            C = torch.rand(N, d, device=P.device)
        else:
            # Use provided points
            C = N
            N = C.size(0)

        f_dominated = torch.zeros(N, dtype=torch.bool, device=P.device)
        lB = torch.min(P, dim=0)[0]
        f_check = torch.all(C > lB, dim=1)

        for k in range(n):
            if torch.any(f_check):
                f = torch.all(C[f_check] > P[k], dim=1)
                f_dominated[f_check] = f
                f_check[f_check == True] = ~f

        v = torch.sum(f_dominated, dtype=torch.float) / N
        return v.item()

    def directed_sampling(self, dec):
        """
        直接采样
        :return:
        """
        dec_low = dec - self.problem.low_limit
        dec_high = dec - self.problem.high_limit
        direction = torch.cat((dec_low, dec_high), dim=0)
        # 单位化direction
        direction = normalize_vector(direction, dim=1)
        # 计算决策变量上下界长度的平方和，再开根号
        interval = self.problem.interval_length.norm(p=2)
        # 直接采样

        self.Nw = dec_low.size(0)
        rand_sample = torch.rand(self.Ns, 2 * self.Nw) * interval
        offspring_dec = []
        # 产生2*Ns*Nw个新的解
        for i in range(self.Ns):
            offspring_dec.append(
                self.problem.low_limit + rand_sample[i, :self.Nw].unsqueeze(dim=1) * direction[:self.Nw])
            offspring_dec.append(
                self.problem.high_limit + rand_sample[i, self.Nw:].unsqueeze(dim=1) * direction[self.Nw:])
        # 将list拼接成张量
        offspring_dec = torch.cat(offspring_dec, dim=0)
        offspring_dec = self.problem.repair_decision(offspring_dec)
        offspring = self.problem.estimate_population(offspring_dec)
        offspring_obj = offspring.obj
        # 非支配选择
        nd_solution_index = NSGA2.ND_sort(offspring_obj, 1)[0] == 1
        return offspring[nd_solution_index]

    def directed_sampling1(self, dec):
        """
        直接采样
        :return:
        """
        dec_low = dec - dec.min(0)[0]
        dec_high = dec.max(0)[0] - dec
        direction = torch.cat((dec_low, dec_high), dim=0)
        # 单位化direction
        direction = normalize_vector(direction, dim=1)
        # 计算决策变量上下界长度的平方和，再开根号
        interval = self.problem.interval_length.norm(p=2)
        # 直接采样

        self.Nw = dec_low.size(0)
        # rand_sample = torch.rand(self.Ns, 2 * self.Nw) * interval
        rand_sample = torch.normal(0, std=30, size=(self.Ns, 2 * self.Nw))
        offspring_dec = []
        # 产生2*Ns*Nw个新的解
        for i in range(self.Ns):
            offspring_dec.append(dec + rand_sample[i, :self.Nw].unsqueeze(dim=1) * direction[:self.Nw])
            offspring_dec.append(dec + rand_sample[i, self.Nw:].unsqueeze(dim=1) * direction[self.Nw:])
        # 将list拼接成张量
        offspring_dec = torch.cat(offspring_dec, dim=0)
        offspring_dec = self.problem.repair_decision(offspring_dec)
        offspring = self.problem.estimate_population(offspring_dec)
        offspring_obj = offspring.obj
        # 非支配选择
        nd_solution_index = NSGA2.ND_sort(offspring_obj, 1)[0] == 1
        return offspring[nd_solution_index]

    def directed_sampling2(self, dec):
        """
        直接采样
        :return:
        """
        # dec_low = dec - dec.min(0)[0]
        # dec_high = dec.max(0)[0] - dec
        dec_low = dec - self.problem.low_limit
        dec_high = dec - self.problem.high_limit
        direction = torch.cat((dec_low, dec_high), dim=0)
        # 单位化direction
        direction = normalize_vector(direction, dim=1)
        # 计算决策变量上下界长度的平方和，再开根号
        interval = self.problem.interval_length.norm(p=2)
        # 直接采样

        self.Nw = dec_low.size(0)
        # rand_sample = torch.rand(self.Ns, 2 * self.Nw) * interval
        rand_sample = torch.normal(0, std=30, size=(self.Ns, 2 * self.Nw))
        offspring_dec = []
        # 产生2*Ns*Nw个新的解
        for i in range(self.Ns):
            offspring_dec.append(dec + rand_sample[i, :self.Nw].unsqueeze(dim=1) * direction[:self.Nw])
            offspring_dec.append(dec + rand_sample[i, self.Nw:].unsqueeze(dim=1) * direction[self.Nw:])
        # 将list拼接成张量
        offspring_dec = torch.cat(offspring_dec, dim=0)
        offspring_dec = self.problem.repair_decision(offspring_dec)
        offspring = self.problem.estimate_population(offspring_dec)
        offspring_obj = offspring.obj
        # 非支配选择
        nd_solution_index = NSGA2.ND_sort(offspring_obj, 1)[0] == 1
        return offspring[nd_solution_index]

    def directed_sampling3(self, dec):
        """
        直接采样
        :return:
        """
        dec_low = dec - dec - dec.min(0)[0]
        dec_high = dec - dec.max(0)[0]
        direction = torch.cat((dec_low, dec_high), dim=0)
        # 单位化direction
        direction = normalize_vector(direction, dim=1)
        # 计算决策变量上下界长度的平方和，再开根号
        interval = self.problem.interval_length.norm(p=2)
        # 直接采样

        self.Nw = dec_low.size(0)
        rand_sample = torch.rand(self.Ns, 2 * self.Nw) * interval
        offspring_dec = []
        # 产生2*Ns*Nw个新的解
        for i in range(self.Ns):
            offspring_dec.append(
                self.problem.low_limit + rand_sample[i, :self.Nw].unsqueeze(dim=1) * direction[:self.Nw])
            offspring_dec.append(
                self.problem.high_limit + rand_sample[i, self.Nw:].unsqueeze(dim=1) * direction[self.Nw:])
        # 将list拼接成张量
        offspring_dec = torch.cat(offspring_dec, dim=0)
        offspring_dec = self.problem.repair_decision(offspring_dec)
        offspring = self.problem.estimate_population(offspring_dec)
        offspring_obj = offspring.obj
        # 非支配选择
        nd_solution_index = NSGA2.ND_sort(offspring_obj, 1)[0] == 1
        return offspring[nd_solution_index]

    def directed_sampling4(self, dec):
        """
        直接采样
        :return:
        """
        dec_low = dec - dec.min(0)[0]
        dec_high = dec.max(0)[0] - dec
        direction = torch.cat((dec_low, dec_high), dim=0)
        # 单位化direction
        direction = normalize_vector(direction, dim=1)
        # 计算决策变量上下界长度的平方和，再开根号
        interval = self.problem.interval_length.norm(p=2)
        # 直接采样

        self.Nw = dec_low.size(0)
        rand_sample = torch.rand(self.Ns, 2 * self.Nw) * interval
        # rand_sample = torch.normal(0, std=30, size=(self.Ns, 2 * self.Nw))
        offspring_dec = []
        # 产生2*Ns*Nw个新的解
        for i in range(self.Ns):
            offspring_dec.append(dec + rand_sample[i, :self.Nw].unsqueeze(dim=1) * direction[:self.Nw])
            offspring_dec.append(dec + rand_sample[i, self.Nw:].unsqueeze(dim=1) * direction[self.Nw:])
        # 将list拼接成张量
        offspring_dec = torch.cat(offspring_dec, dim=0)
        offspring_dec = self.problem.repair_decision(offspring_dec)
        offspring = self.problem.estimate_population(offspring_dec)
        offspring_obj = offspring.obj
        # 非支配选择
        nd_solution_index = NSGA2.ND_sort(offspring_obj, 1)[0] == 1
        return offspring[nd_solution_index]
