import copy

import torch

from algorithms.algorithm import Algorithm
from algorithms.base.nsga2 import NSGA2
from algorithms.large.mocgde import MOCGDE
from individual.population import Population
from problems.problem import Problem
from utils.vectors import get_ref_vectors


class GPSOM(Algorithm):
    parameters = [
        {'label': 'popsize', 'name': 'popsize', 'type': 'number', 'description': 'Population size of single run',
         'step': 1, 'defaultValue': 20}
    ]

    def __init__(self, problem: Problem,
                 pop_size: int,
                 kwargs,
                 ):
        self.ref_vectors = get_ref_vectors(pop_size, problem.obj_dim)
        pop_size = self.ref_vectors.size(0)
        super().__init__(pop_size, problem, kwargs)
        self.sub_pops = [None] * pop_size
        self.p_best = [None] * pop_size
        self.g_best = Population([None] * pop_size)
        self.init()

    def init(self):
        for i in range(self.pop_size):
            self.sub_pops[i] = self.problem.init_population(self.popsize,
                                                            vel=torch.zeros(self.popsize, self.problem.var_dim,
                                                                            dtype=torch.double))
            self.p_best[i] = copy.deepcopy(self.sub_pops[i])
            self.update_g_best(i)

    def update_g_best(self, i):
        front_no, _ = NSGA2.ND_sort(self.sub_pops[i].obj, 1)
        index = front_no == 1
        front_no_pops = self.sub_pops[i][index]
        self.g_best[i] = self.local_search(self.problem, front_no_pops[0], self.ref_vectors[i])

    def final_output(self):
        return self.g_best

    def each_iteration(self):
        for i in range(self.pop_size):
            for j in range(self.popsize):
                self.sub_pops[i][j] = \
                    self.operator_pso(self.problem, Population([self.sub_pops[i][j]]), self.p_best[i][j],
                                      self.g_best[i])[0]
                self.p_best[i][j] = self.update_p_best(self.p_best[i][j], self.sub_pops[i][j])
            self.update_g_best(i)

    @staticmethod
    def local_search(problem, solution, ref_vector):
        MaxIter = 5
        Tol = 1e-3
        step = 1.0
        k = 1
        error = 10.0
        grad = torch.zeros(2, problem.var_dim, dtype=torch.double)
        while error > Tol and k < MaxIter:
            grad[0] = MOCGDE.finite_difference(solution, problem, ref_vector)[0].T
            offspring_dec = solution.dec - step * grad[0]
            offspring_dec = problem.repair_decision(offspring_dec)
            offspring = problem.estimate_population(offspring_dec)
            grad[1] = MOCGDE.finite_difference(offspring[0], problem, ref_vector)[0].T
            step = torch.abs(torch.mv(offspring.dec - solution.dec, grad[1] - grad[0])) / torch.norm(
                grad[1] - grad[0]) ** 2
            error = torch.norm(offspring.dec - solution.dec)
            solution = offspring[0]
            k += 1
        return solution

    @staticmethod
    def update_p_best(p_best, population):
        temp = p_best.obj - population.obj
        dominate = any(temp < 0) - any(temp > 0)
        if dominate == -1:
            p_best = population
        temp = torch.rand(1)
        if dominate == 0 and temp < 0.5:
            p_best = population
        return p_best

    @staticmethod
    def operator_pso(problem, particle, p_best, g_best, w=0.4):
        particle_dec = particle.dec
        particle_vel = particle.get_attr('vel')
        p_best_dec = p_best.dec
        g_best_dec = g_best.dec

        # 相关维度参数
        pop_size, var_dim = particle_dec.shape

        # 粒子群优化
        r1 = torch.rand(pop_size, var_dim, dtype=torch.double)
        r2 = torch.rand(pop_size, var_dim, dtype=torch.double)
        off_vel = w * particle_vel + r1 * (p_best_dec - particle_dec) + r2 * (g_best_dec - particle_dec)
        off_dec = particle_dec + off_vel

        offspring = problem.estimate_population(off_dec, vel=off_vel)

        return offspring
