# _*_ coding: utf-8 _*_

import numpy as np
from random import sample
from random import uniform
from math import exp
import matplotlib.pyplot as plt

from scripts.utils.operators import Lifecycle
from scripts.utils.operators import lhs_uniform

np.set_printoptions(linewidth=999, threshold=round(1e9))


class SDLSO:
    """
    Stochastic dominant learning swarm optimizer (SDLSO)
    DOI: 10.1109/TCYB.2020.3034427
    https://ieeexplore.ieee.org/document/9288916
    """

    def __init__(self, obj_func, dimension: int, bounds: tuple, NP: int):
        self.obj_func = obj_func
        self.__d, self.__bounds, self.__NP = dimension, bounds, NP

        lhs = lhs_uniform(n=self.__NP, p=self.__d)
        x = (self.__bounds[0] + lhs * (self.__bounds[1] - self.__bounds[0]))  # position
        v = np.zeros(shape=(self.__NP, self.__d))  # velocity
        # population[i] structure: [x, v, fitness]
        self.__population = [[x[i], v[i], 0] for i in range(self.__NP)]
        for i, x in enumerate(self.__population):
            self.__population[i][2] = self.obj_func(x[0])
        self.gBest = min(self.__population, key=lambda _: _[2])[2]  # Only Record the fitness.

        self.life = Lifecycle()
        self.life.update(eva=self.__NP)
        self.updated = []

    def optimize(self, iterations=None, evaluations=None, deadline=None):
        self.life.set_stopping_condition(iterations=iterations, evaluations=evaluations, deadline=deadline)

        while self.life.check_stopping_condition() is False:
            self.__learning()

            self.life.update(it=1, eva=self.__NP, gBest=self.gBest)

            print(self.life.it, self.updated[-1])
        return self.life

    def __learning(self):
        updated_count = 0
        for i in range(self.__NP):
            # Select two random exemplars from the swarm: xb1, xb2.
            swarm = list(range(0, self.__NP))
            swarm.remove(i)
            xb1, xb2 = sample(swarm, k=2)
            if self.__population[xb1][2] <= self.__population[i][2] \
                    and self.__population[xb2][2] <= self.__population[i][2]:
                if self.__population[xb1][2] > self.__population[xb2][2]:
                    xb1, xb2 = xb2, xb1
                x1, x2 = self.__population[xb1][0], self.__population[xb2][0]
                fi, f1, f2 = self.__population[i][2], self.__population[xb1][2], self.__population[xb2][2]
                beta = 0.5 - 0.2 * exp(-(fi - f2 + 1e-99) / (fi - f1 + 1e-99))
                self.__population[i][1], self.__population[i][0] = self.__update(self.__population[i], x1, x2, beta)

                # Calculate the fitness of xi: f(xi).
                self.__population[i][2] = self.obj_func(self.__population[i][0])
                updated_count += 1
                # Update gBest.
                if self.gBest > self.__population[i][2]:
                    self.gBest = self.__population[i][2]

            else:
                continue

        self.updated.append(updated_count)

    def __update(self, p, x1, x2, beta: float):
        """
        update velocity and position
        :param p: current particle
        :param x1: pBest position
        :param x2: gBest position
        :return: velocity and position
        """
        x, v = p[0], p[1]
        r1 = np.random.uniform(low=0.0, high=1.0, size=self.__d)
        r2 = np.random.uniform(low=0.0, high=1.0, size=self.__d)
        r3 = np.random.uniform(low=0.0, high=1.0, size=self.__d)
        v = r1 * v + r2 * (x1 - x) + beta * r3 * (x2 - x)
        x = x + v
        for i in range(len(x)):
            if x[i] < self.__bounds[0]:
                x[i] = self.__bounds[0]
            if x[i] > self.__bounds[1]:
                x[i] = self.__bounds[1]
        return v, x
