import torch

from config import device
from problems.problem import Problem
from utils.vectors import get_ref_vectors, get_ref_vectors_grad


class LSMOP(Problem):
    parameters = [
        {'label': 'Nk', 'name': 'Nk', 'type': 'number', 'description': '分组大小', 'step': 1, 'defaultValue': 5},
    ]

    def __init__(self, var_dim: int, obj_dim: int, max_fun_eval, kwargs: dict):
        low_limit = torch.zeros((1, var_dim), device=device, dtype=torch.double)
        high_limit = torch.cat([torch.ones(obj_dim - 1, device=device, dtype=torch.double),
                                torch.ones(var_dim - (obj_dim - 1), device=device, dtype=torch.double) * 10],
                               dim=0).unsqueeze(dim=0)
        super().__init__(var_dim, obj_dim, low_limit, high_limit, max_fun_eval, kwargs)

    def get_optimal_solutions0(self, size):
        return get_ref_vectors(size, self.obj_dim)

    def get_optimal_solutions1(self, size):
        temp = get_ref_vectors(size, self.obj_dim)
        return temp / torch.sum(temp ** 2, dim=1).unsqueeze(dim=1).sqrt()

    @staticmethod
    def get_optimal_solutions2(obj_dim, size):
        interval = [0, 0.251412, 0.631627, 0.859401]
        median = (interval[1] - interval[0]) / (interval[3] - interval[2] + interval[1] - interval[0])
        X = get_ref_vectors_grad(size, obj_dim - 1)
        X[X <= median] = X[X <= median] * (interval[1] - interval[0]) / median + interval[0]
        X[X > median] = (X[X > median] - median) * (interval[3] - interval[2]) / (1 - median) + interval[2]
        res = torch.cat(
            (X, (2 * (obj_dim - torch.sum(X / 2 * (1 + torch.sin(3 * torch.pi * X)), dim=1))).unsqueeze(dim=1)),
            dim=1)
        return res

    @staticmethod
    def eta1(x):
        return torch.sum(x, dim=1)

    @staticmethod
    def eta2(x):
        return torch.abs(x).max(dim=1)[0]

    @staticmethod
    def eta3(x):
        return torch.sum(100 * (x[:, :x.size(1) - 1] ** 2 - x[:, 1:x.size(1)]) ** 2 + (x[:, :x.size(1) - 1] - 1) ** 2,
                         dim=1)

    @staticmethod
    def eta4(x):
        return torch.sum(x ** 2 - 10 * torch.cos(2 * torch.pi * x) + 10, dim=1)

    @staticmethod
    def eta5(x):
        return torch.sum(x ** 2, dim=1) / 4000 - torch.prod(
            torch.cos(x / (torch.arange(1, x.size(1) + 1, device=device).sqrt()))) + 1

    @staticmethod
    def eta6(x):
        return -20 * torch.exp(-0.2 * torch.sqrt(torch.sum(x ** 2, dim=1) / x.size(1))) - \
            torch.exp((torch.sum(torch.cos(2 * torch.pi * x), dim=1)) / x.size(1)) + 20 + torch.e

    def eval_value(self, x, L_func, g1, g2, H_func):
        """
        函数评估
        :param x:
        :param L_func:线性或者非线性函数
        :param g1: DEFINITIONS OF THE SIX BASIC SINGLE-OBJECTIVE FUNCTIONS
        :param g2: DEFINITIONS OF THE SIX BASIC SINGLE-OBJECTIVE FUNCTIONS
        :param H_func:
        :return:
        """
        [pop_size, _] = x.shape
        Ns = self.var_dim - (self.obj_dim - 1)
        L = L_func(x)
        G = torch.zeros(pop_size, self.obj_dim, device=device)
        alpha = 3.8
        c = torch.zeros(self.obj_dim, device=device)
        c[0] = alpha * 0.1 * (1 - 0.1)
        for i in range(1, self.obj_dim):
            c[i] = alpha * c[i - 1] * (1 - c[i - 1])
        c_total = torch.sum(c)
        r = c / c_total
        subcomponents_size = torch.floor(r * Ns / self.Nk).type(torch.int)
        cumulative_size = torch.cat(
            (torch.tensor([0], device=device), torch.cumsum(subcomponents_size * self.Nk, dim=0))).type(
            torch.int)
        for i in range(0, self.obj_dim, 2):
            for j in range(cumulative_size[i], cumulative_size[i + 1], subcomponents_size[i]):
                G[:, i] = G[:, i] + g1(L[:, j:j + subcomponents_size[i]] ** 2)
        for i in range(1, self.obj_dim, 2):
            for j in range(cumulative_size[i], cumulative_size[i + 1], subcomponents_size[i]):
                G[:, i] = G[:, i] + g2(L[:, j:j + subcomponents_size[i]] ** 2)
        return H_func(x, G, subcomponents_size)

    def H1(self, x, G, subcomponents_size):
        [pop_size, _] = x.shape
        G = G / torch.floor(subcomponents_size) / self.Nk
        return (1 + G) * torch.fliplr(
            torch.cumprod(torch.cat((torch.ones(pop_size, 1, device=device), x[:, :self.obj_dim - 1]), dim=1),
                          dim=1)) * torch.cat(
            (torch.ones(pop_size, 1, device=device), 1 - torch.fliplr(x[:, :self.obj_dim - 1])), dim=1)

    def H2(self, x, G, subcomponents_size):
        [pop_size, _] = x.shape
        G = G / torch.floor(subcomponents_size) / self.Nk
        return (1 + G + torch.cat((G[:, 1:], torch.zeros((pop_size, 1), device=device, dtype=torch.double)), dim=1)) * \
            torch.fliplr(torch.cumprod(torch.cat((torch.ones(pop_size, 1, device=device),
                                                  torch.cos(x[:, :self.obj_dim - 1] * torch.pi / 2)), dim=1), dim=1)) * \
            torch.cat((torch.ones(pop_size, 1, device=device), torch.sin(torch.fliplr(x[:, :self.obj_dim - 1]) * \
                                                                         torch.pi / 2)), dim=1)

    def H3(self, x, G, subcomponents_size):
        G = 1 + torch.sum(G / subcomponents_size / self.Nk, dim=1)
        obj = torch.zeros((x.size(0), self.obj_dim), device=device, dtype=torch.double)
        obj[:, :self.obj_dim - 1] = x[:, :self.obj_dim - 1]
        obj[:, self.obj_dim - 1] = (1 + G) * (self.obj_dim - torch.sum(
            x[:, :self.obj_dim - 1] / (1 + G).unsqueeze(dim=1) * (
                    1 + torch.sin(3 * torch.pi * x[:, :self.obj_dim - 1])), dim=1))
        return obj

    def L1(self, x):
        return (1 + torch.arange(self.obj_dim, self.var_dim + 1, device=device, dtype=torch.double) / self.var_dim) * \
            x[:, self.obj_dim - 1:self.var_dim] - (x[:, 0] * 10).unsqueeze(dim=1)

    def L2(self, x):
        return (1 + torch.cos(
            0.5 * torch.pi * torch.arange(self.obj_dim, self.var_dim + 1, device=device,
                                          dtype=torch.double) / self.var_dim)) * \
            x[:, self.obj_dim - 1:self.var_dim] - (x[:, 0] * 10).unsqueeze(dim=1)
