from math import cos

import numpy as np
import torch

from problems.problem import Problem
from utils.vectors import get_ref_vectors


class LMF(Problem):
    parameters = [
        {
            'label': 'type_fm', 'name': 'type_fm', 'type': 'select', 'description': 'type for formulation model',
            'defaultValue': 2,
            "options": [
                {'label': "addition", "value": 0},
                {'label': "multiplication", "value": 1},
                {'label': "mixed", "value": 2}
            ]
        },
        {
            'label': 'type_lk', 'name': 'type_lk', 'type': 'select', 'description': 'type for variable linkage',
            'defaultValue': 2,
            "options": [
                {'label': "linear linkage", "value": 0},
                {'label': "nonlinear linkage", "value": 1},
                {'label': "mixed linkage", "value": 2}
            ]
        },
        {
            'label': 'type_dg', 'name': 'type_dg', 'type': 'select',
            'description': 'type for deep grouping distance-related variables',
            'defaultValue': 1,
            "options": [
                {'label': "even grouping", "value": 0},
                {'label': "nonuniform grouping", "value": 1}
            ]
        },
        {
            'label': 'type_cv', 'name': 'type_cv', 'type': 'select',
            'description': 'type for contribution of variables',
            'defaultValue': 1,
            "options": [
                {'label': "balanced contribution", "value": 0},
                {'label': "unbalanced contribution", "value": 1},
            ]
        },
    ]

    def __init__(self, var_dim: int, obj_dim: int, max_fun_eval, kwargs: dict):
        if obj_dim == 2:
            self.nop = 0 * torch.ones(obj_dim - 1, dtype=torch.int)
        else:
            self.nop = 1 * torch.ones(obj_dim - 1, dtype=torch.int)
        self.nip = 3 * torch.ones(obj_dim - 1)
        self.nsp = 1
        self.K = torch.sum(self.nip).to(torch.int).item() + self.nsp
        self.L = var_dim - self.K
        c1 = 3.8 * 0.1 * (1 - 0.1);
        c = self.logistic_map(c1, 3.8, obj_dim)
        nsd_least = 2
        proportion = 0.2
        self.nid = torch.floor(c * (self.L - nsd_least))
        self.nod = torch.cat(
            (torch.floor(self.nid[-1] * proportion).unsqueeze(0), torch.floor(self.nid[:-1] * proportion)), dim=0)
        self.nsd = self.L - sum(self.nid).to(torch.int).item()
        super().__init__(var_dim, obj_dim, None, None, max_fun_eval, kwargs)

    def eval_obj(self, pop_dec, H_func, func_type, *args):
        N = pop_dec.shape[0]
        D = pop_dec.shape[1]
        obj_dim = self.obj_dim

        # Nonuniform grouping of the variables
        gp = self.grouping(self.nip, self.nop, self.nsp, obj_dim - 1, 1)
        gd = self.grouping(self.nid, self.nod, self.nsd, obj_dim, self.K + 1)

        # Calculate the shape function H
        yp = torch.zeros((N, obj_dim - 1))
        for i in range(obj_dim - 1):
            yp[:, i] += self.rt_sum(pop_dec[:, :self.K], gp[i])

        H = H_func(N, yp)

        pop_dec_new = pop_dec.clone()
        # Variable linkages between distance-related variables
        if self.type_lk == 0:  # Linear linkages
            for i in range(self.K, D):
                pop_dec_new[:, i] = (1 + (i + 1) / self.L) * (pop_dec[:, i] - self.low_limit[:, i]) - yp[:, 0] * (
                        self.high_limit[:, i] - self.low_limit[:, i])
        elif self.type_lk == 1:  # Nonlinear linkages
            for i in range(self.K, D):
                pop_dec_new[:, i] = (1 + cos((i + 1) / self.L * torch.pi / 2)) * (
                        pop_dec[:, i] - self.low_limit[:, i]) - yp[:, 0] * (
                                            self.high_limit[:, i] - self.low_limit[:, i])
        else:  # Mixed linkages
            for i in range(self.K, D, 2):
                pop_dec_new[:, i] = (1 + (i + 1) / self.L) * (pop_dec[:, i] - self.low_limit[:, i]) - yp[:, 0] * (
                        self.high_limit[:, i] - self.low_limit[:, i])
            for i in range(self.K + 1, D, 2):
                pop_dec_new[:, i] = (1 + cos((i + 1) / self.L * torch.pi / 2)) * (
                        pop_dec[:, i] - self.low_limit[:, i]) - yp[:, 0] * (
                                            self.high_limit[:, i] - self.low_limit[:, i])

        x = pop_dec_new
        yd = func_type(N, obj_dim, x, gd, args)

        # Calculate the distance function G
        G = torch.zeros((N, obj_dim), dtype=torch.double)
        for i in range(0, obj_dim, 2):
            len1 = yd[i].shape[1]
            w1 = self.logistic_map(0.23, 3.7, len1)
            if self.type_cv == 1:  # Imbalanced contribution
                G[:, i] = self.weighted_sum(yd[i], w1)
            else:  # Balanced contribution
                G[:, i] = self.even_sum(yd[i])

        for i in range(1, obj_dim, 2):
            len2 = yd[i].shape[1]
            w2 = self.logistic_map(0.23, 3.75, len2)
            if self.type_cv == 1:  # Imbalanced contribution
                G[:, i] = self.weighted_sum(yd[i], w2)
            else:  # Balanced contribution
                G[:, i] = self.even_sum(yd[i])

        # Evaluate the objective values
        if self.type_fm == 0:  # Addition model
            pop_obj = G + H
        elif self.type_fm == 1:  # Multiplication model
            pop_obj = (1 + G) * H
        else:  # Mixed model
            pop_obj = torch.zeros((N, obj_dim), dtype=torch.double)
            for i in range(0, obj_dim, 2):
                pop_obj[:, i] = H[:, i] * (1 + G[:, i])
            for i in range(1, obj_dim, 2):
                pop_obj[:, i] = H[:, i] + G[:, i]

        return pop_obj

    def func_type0(self, N, obj_dim, x, gd, args):
        f0, f1, f2, f3 = args
        # Deep Grouping of the distance-related variables in each group
        yd = [None] * obj_dim
        a1 = 5  # Define the value of the first entry in the sequence
        d1 = 1  # Define the value of the variance in the sequence
        nk1 = 5  # Number of groups for uniformly deep grouping
        for i in range(0, obj_dim, 2):
            if self.type_dg == 1:  # Nonuniformly deep grouping
                dg = self.deep_grouping(gd[i], a1, d1)
            else:  # Evenly deep grouping
                dg = self.uniform_grouping(gd[i], nk1)
            yd[i] = torch.zeros((N, len(dg)))
            for j in range(0, len(dg), 2):
                yd[i][:, j] = f0(x, dg[j])
            for j in range(1, len(dg), 2):
                yd[i][:, j] = f1(x, dg[j])
        a2 = 5  # Define the value for deep grouping
        d2 = 2  # Variance of the sequence
        nk2 = 5  # Number of groups for deep grouping
        for i in range(1, obj_dim, 2):
            if self.type_dg == 1:  # Nonuniformly deep grouping
                dg = self.deep_grouping(gd[i], a2, d2)
            else:  # Evenly deep grouping
                dg = self.uniform_grouping(gd[i], nk2)
            yd[i] = torch.zeros((N, len(dg)))
            for j in range(0, len(dg), 2):
                yd[i][:, j] = f2(x, dg[j])
            for j in range(1, len(dg), 2):
                yd[i][:, j] = f3(x, dg[j])
        return yd

    def func_type1(self, N, obj_dim, x, gd, args):
        f0, f1, f2, f3, f4, f5 = args
        # Deep Grouping of the distance-related variables in each group
        yd = [None] * obj_dim
        a1 = 5  # Define the value of the first entry in the sequence
        d1 = 1  # Define the value of the variance in the sequence
        nk1 = 5  # Number of groups for uniformly deep grouping
        for i in range(0, obj_dim, 2):
            if self.type_dg == 1:  # Nonuniformly deep grouping
                dg = self.deep_grouping(gd[i], a1, d1)
            else:  # Evenly deep grouping
                dg = self.uniform_grouping(gd[i], nk1)
            yd[i] = torch.zeros((N, len(dg)))
            for j in range(0, len(dg), 3):
                yd[i][:, j] = f0(x, dg[j])
            for j in range(1, len(dg), 3):
                yd[i][:, j] = f1(x, dg[j])
            for j in range(2, len(dg), 3):
                yd[i][:, j] = f2(x, dg[j])
        a2 = 5  # Define the value for deep grouping
        d2 = 2  # Variance of the sequence
        nk2 = 5  # Number of groups for deep grouping
        for i in range(1, obj_dim, 2):
            if self.type_dg == 1:  # Nonuniformly deep grouping
                dg = self.deep_grouping(gd[i], a2, d2)
            else:  # Evenly deep grouping
                dg = self.uniform_grouping(gd[i], nk2)
            yd[i] = torch.zeros((N, len(dg)))
            for j in range(0, len(dg), 3):
                yd[i][:, j] = f3(x, dg[j])
            for j in range(1, len(dg), 3):
                yd[i][:, j] = f4(x, dg[j])
            for j in range(2, len(dg), 3):
                yd[i][:, j] = f5(x, dg[j])
        return yd

    def func_type2(self, N, obj_dim, x, gd, args):
        f0, f1, f2, f3, f4, f5 = args
        # Deep Grouping of the distance-related variables in each group
        yd = [None] * obj_dim
        a1 = 5  # Define the value of the first entry in the sequence
        d1 = 1  # Define the value of the variance in the sequence
        nk1 = 5  # Number of groups for uniformly deep grouping
        for i in range(0, obj_dim, 3):
            if self.type_dg == 1:  # Nonuniformly deep grouping
                dg = self.deep_grouping(gd[i], a1, d1)
            else:  # Evenly deep grouping
                dg = self.uniform_grouping(gd[i], nk1)
            yd[i] = torch.zeros((N, len(dg)))
            for j in range(0, len(dg), 2):
                yd[i][:, j] = f0(x, dg[j])
            for j in range(1, len(dg), 2):
                yd[i][:, j] = f1(x, dg[j])
        a2 = 5  # Define the value for deep grouping
        d2 = 2  # Variance of the sequence
        nk2 = 5  # Number of groups for deep grouping
        for i in range(1, obj_dim, 3):
            if self.type_dg == 1:  # Nonuniformly deep grouping
                dg = self.deep_grouping(gd[i], a2, d2)
            else:  # Evenly deep grouping
                dg = self.uniform_grouping(gd[i], nk2)
            yd[i] = torch.zeros((N, len(dg)))
            for j in range(0, len(dg), 2):
                yd[i][:, j] = f2(x, dg[j])
            for j in range(1, len(dg), 2):
                yd[i][:, j] = f3(x, dg[j])
        a3 = 5  # Define the value for deep grouping
        d3 = 1  # Variance of the sequence
        nk3 = 5  # Number of groups for deep grouping
        for i in range(2, obj_dim, 3):
            if self.type_dg == 1:  # Nonuniformly deep grouping
                dg = self.deep_grouping(gd[i], a3, d3)
            else:  # Evenly deep grouping
                dg = self.uniform_grouping(gd[i], nk3)
            yd[i] = torch.zeros((N, len(dg)))
            for j in range(0, len(dg), 2):
                yd[i][:, j] = f4(x, dg[j])
            for j in range(1, len(dg), 2):
                yd[i][:, j] = f5(x, dg[j])
        return yd

    def get_H0(self, N, yp):
        obj_dim = self.obj_dim
        H = torch.ones((N, obj_dim), dtype=torch.double)
        for i in range(obj_dim):
            for j in range(obj_dim - i - 1):
                H[:, i] *= torch.cos(0.5 * torch.pi * yp[:, j])
            if i != 0:
                aux = obj_dim - i - 1
                H[:, i] *= torch.sin(0.5 * torch.pi * yp[:, aux])
        return H

    def get_H1(self, N, yp):
        obj_dim = self.obj_dim
        H = torch.ones((N, obj_dim), dtype=torch.double)
        for i in range(obj_dim):
            for j in range(obj_dim - i - 1):
                H[:, i] *= torch.cos(0.5 * torch.pi * yp[:, j])
            if i != 0:
                aux = obj_dim - i - 1
                H[:, i] *= torch.sin(0.5 * torch.pi * yp[:, aux])

            if i != obj_dim - 1:
                H[:, i] = H[:, i].clone() ** 4
            else:
                H[:, i] = H[:, i].clone() ** 2
        return H

    def get_H2(self, N, yp):
        obj_dim = self.obj_dim
        H = torch.ones((N, obj_dim), dtype=torch.double)
        for i in range(obj_dim):
            for j in range(obj_dim - i - 1):
                H[:, i] *= yp[:, j]
            if i != 0:
                aux = obj_dim - i - 1
                H[:, i] *= (1 - yp[:, aux])
        return H

    def get_H3(self, N, yp):
        obj_dim = self.obj_dim
        H = torch.ones((N, obj_dim), dtype=torch.double)
        for i in range(obj_dim):
            for j in range(obj_dim - i - 1):
                H[:, i] *= torch.cos(0.5 * torch.pi * yp[:, j])
            if i != 0:
                aux = obj_dim - i - 1
                H[:, i] *= torch.sin(0.5 * torch.pi * yp[:, aux])
            H[:, i] = 1.0 - H[:, i].clone()
        return H

    def get_H4(self, N, yp):
        obj_dim = self.obj_dim
        H = torch.ones((N, obj_dim), dtype=torch.double)
        for i in range(obj_dim):
            for j in range(obj_dim - i - 1):
                H[:, i] *= yp[:, j]
            if i != 0:
                aux = obj_dim - i - 1
                H[:, i] *= (1 - yp[:, aux])
            H[:, i] = 1.0 - H[:, i].clone()
        return H

    def get_H5(self, N, yp):
        obj_dim = self.obj_dim
        H = torch.ones((N, obj_dim), dtype=torch.double)
        for i in range(obj_dim):
            for j in range(obj_dim - i - 1):
                H[:, i] *= torch.cos(0.5 * torch.pi * yp[:, j])
            if i != 0:
                aux = obj_dim - i - 1
                H[:, i] *= torch.sin(0.5 * torch.pi * yp[:, aux])
            H[:, i] = 1.0 - H[:, i].clone()
        return H

    def get_optimal_solutions0(self, size):
        P = get_ref_vectors(size, self.obj_dim)
        P = P / torch.sqrt(torch.sum(P ** 2, dim=1, keepdim=True))
        return P

    def get_optimal_solutions1(self, size):
        P = get_ref_vectors(size, self.obj_dim) ** 2
        temp = torch.sum(torch.sqrt(P[:, :-1]), dim=1) + P[:, -1]
        P = P / torch.cat((temp.view(-1, 1).repeat(1, P.size(1) - 1) ** 2, temp.view(-1, 1)), dim=1)
        return P

    def get_optimal_solutions2(self, size):
        return get_ref_vectors(size, self.obj_dim)

    def get_optimal_solutions3(self, size):
        P = get_ref_vectors(size, self.obj_dim)
        P = P / torch.sqrt(torch.sum(P ** 2, dim=1, keepdim=True))
        return 1 - P

    @staticmethod
    def logistic_map(c1, r, num):
        # 初始化结果张量
        f = torch.zeros(num)
        f[0] = c1

        # 通过循环计算洛吉斯蒂克映射
        for i in range(1, num):
            f[i] = r * f[i - 1] * (1 - f[i - 1])

        # 求和并进行规范化
        sum_c = torch.sum(f)
        f = f / sum_c

        return f

    @staticmethod
    def rt_sum(x, g):
        l = len(g)
        s1 = x.size(0)
        f = torch.zeros(s1)
        for i in range(l):
            f += x[:, g[i] - 1].float()
        f = torch.abs(f) / l
        return f

    @staticmethod
    def weighted_sum(x, w):
        s1, s2 = x.size()
        f = torch.zeros(s1)
        for i in range(s2):
            f += x[:, i] * w[i]
        return f

    @staticmethod
    def even_sum(x):
        s1, s2 = x.size()
        f = torch.zeros(s1)
        for i in range(s2):
            f += x[:, i] * (1.0 / s2)
        return f

    @staticmethod
    def rosenbrock(x, g):
        l = len(g)
        s1 = x.size(0)
        gx = torch.zeros(s1, dtype=x.dtype)

        for i in range(l - 1):
            gx += 100 * ((x[:, g[i] - 1] ** 2 - x[:, g[i + 1] - 1]) ** 2) + (x[:, g[i] - 1] - 1) ** 2

        gx = gx / l
        return gx

    @staticmethod
    def sphere(x, g):
        l = len(g)
        s1 = x.size(0)
        gx = torch.zeros(s1)
        for i in range(l):
            gx += x[:, g[i] - 1].float() ** 2
        gx = gx / l
        return gx

    @staticmethod
    def ackley(x, g):
        l = len(g)  # g 是索引列表或张量
        s1, _ = x.shape  # 获取 x 的形状
        gx = torch.zeros(s1)  # 初始化 gx 为全零张量
        sum1 = torch.zeros(s1)  # 初始化 sum1 为全零张量
        sum2 = torch.zeros(s1)  # 初始化 sum2 为全零张量

        for i in range(l):
            sum1 += x[:, g[i] - 1] ** 2 / l
            sum2 += torch.cos(2 * torch.pi * x[:, g[i] - 1]) / l
        gx += 20 - 20 * torch.exp(-0.2 * torch.sqrt(sum1)) + np.exp(1) - torch.exp(sum2)
        gx /= l

        return gx

    @staticmethod
    def rastrigin(x, g):
        l = len(g)  # g 是索引列表或张量
        s1, _ = x.shape  # 获取 x 的形状
        gx = torch.zeros(s1)  # 初始化 gx 为全零张量

        for i in range(l):
            gx += x[:, g[i] - 1] ** 2 + 10 - 10 * torch.cos(2 * torch.pi * x[:, g[i] - 1])

        # 将结果除以 l
        gx /= l

        return gx

    @staticmethod
    def schwefel221(x, g):
        l = len(g)
        gx = torch.abs(x[:, g[0] - 1].float())
        for i in range(1, l):
            gx = torch.max(gx, torch.abs(x[:, g[i] - 1].float()))
        return gx

    @staticmethod
    def schwefel2(x, g):
        l = len(g)
        s1 = x.size(0)
        gx = torch.zeros(s1)
        for i in range(l):
            midx = torch.zeros(s1)
            for j in range(i + 1):
                midx += x[:, g[j] - 1]
            gx += midx ** 2
        return gx

    @staticmethod
    def grouping(ni, no, ns, ng, point):
        pointer = point
        length = torch.zeros(ng, dtype=torch.int)
        length = length + ni + no + ns
        length = length.to(torch.int)
        no = no.to(torch.int)
        g = [[] for _ in range(ng)]

        for i in range(ng):
            for j in range(no[i] + ns, length[i]):
                g[i].append(pointer)
                pointer += 1
            g[i][0:0] = [0] * (no[i] + ns)
        for i in range(ng):
            for j in range(no[i]):
                if i == 0:
                    g[i][j] = g[ng - 1][length[ng - 1] - j - 1]
                else:
                    g[i][j] = g[i - 1][length[i - 1] - j - 1]

        for j in range(ns):
            for i in range(ng):
                g[i][j + no[i]] = pointer
            pointer += 1

        return g

    @staticmethod
    def deep_grouping(g, a, d):
        span = a
        remain = len(g)
        deta = 0
        t = 0
        ng = 1
        dg = []

        while remain > span + a:
            group = []
            for i in range(span):
                group.append(g[t])
                t += 1
            dg.append(group)
            remain -= span
            deta += d
            span += deta
            ng += 1

        if remain > 0:
            group = []
            for i in range(remain):
                group.append(g[t])
                t += 1
            dg.append(group)

        return dg

    @staticmethod
    def uniform_grouping(g, k):
        remain = len(g) % k
        divisor = len(g) // k
        t = 0
        dg = [[] for _ in range(k)]

        for i in range(k):
            for j in range(divisor):
                dg[i].append(g[t])
                t += 1

        for i in range(remain):
            dg[i].append(g[t])
            t += 1

        return dg
