# _*_ coding: UTF-8 _*_

import numpy as np
from copy import deepcopy
from random import sample
from random import uniform

from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt

from scripts.utils.operators import Lifecycle
from scripts.utils.operators import lhs_uniform

from data.CEC.CEC2017.cec17_functions import cec17_test_func

np.set_printoptions(linewidth=999, threshold=1e9)


class CALLSO:
    """
    Classifier-Assisted Level-Based Learning Swarm Optimizer
    https://doi.org/10.1109/TEVC.2020.3017865
    """

    def __init__(self, obj_func, dimension: int, bounds: tuple, NP: int, L: int, phi: float, Ns: int):
        self.obj_func = obj_func
        self.__d, self.__bounds, self.__NP, self.__L, self.__phi, self.__Ns = dimension, bounds, NP, L, phi, Ns

        # Initialize swarm.
        lhs = lhs_uniform(n=self.__NP, p=self.__d)  # a matrix of shape (NP, d)
        x = (self.__bounds[0] + lhs * (self.__bounds[1] - self.__bounds[0]))  # positions of shape (NP, d)
        v = np.zeros(shape=(self.__NP, self.__d))  # velocities of shape (NP, d)
        self.__swarm = [[x[i], v[i], 0, 1] for i in range(self.__NP)]  # structure: [x, v, fitness, layer]
        for i, particle in enumerate(self.__swarm):
            self.__swarm[i][2] = self.obj_func(particle[0])
        self.gBest = min(self.__swarm, key=lambda _: _[2])[2]  # record only fitness

        self.__database = self.__swarm  # Database stores the individuals with real-evaluated fitness.
        self.__classifier = GradientBoostingClassifier()  # GBC

        self.life = Lifecycle()
        self.life.update(eva=self.__NP)
        self.acc = []

    def optimize(self, iterations=None, evaluations=None, deadline=None):
        self.life.set_stopping_condition(iterations=iterations, evaluations=evaluations, deadline=deadline)
        while self.life.check_stopping_condition() is False:
            self.__prepare_swarm()  # Choose the best NP particles from the database as the swarm and layer them.
            self.__train_classifier()  # Train the classifier by using the sorted swarm.

            self.__layer_learning()  # Generate the offspring using LLSO operators.
            self.__predict_layers()  # Predict the levels of the offspring.
            self.L1_exploitation()  # The L1-exploitation.
            self.__selection()  # The selection.

            self.gBest = min([p[2] for p in self.__database])

            self.life.update(it=1, eva=self.__Ns, gBest=self.gBest)

            print(self.life.it, self.life.eva, len(self.__database), self.acc[-1], self.gBest)

        return self.life

    def __prepare_swarm(self):
        """
        1) Choose the best NP particles from the database as the swarm;
        2) Sort POP into L levels;
        """
        self.__database = sorted(self.__database, key=lambda x: x[2], reverse=False)
        self.__swarm = deepcopy(self.__database[:self.__NP])
        grid, layers = self.__NP / self.__L, 1
        for i, p in enumerate(self.__swarm):
            self.__swarm[i][3] = layers
            if (i + 1) == grid * layers:
                layers += 1

    def __train_classifier(self):
        X = np.array([p[0] for p in self.__swarm])
        y = np.array([p[3] for p in self.__swarm])
        X_train, X_test, y_train, y_test = train_test_split(X, y,
                                                            test_size=0.2, random_state=0,
                                                            stratify=y)
        self.__classifier.fit(X_train, y_train)
        self.acc.append(self.__classifier.score(X_test, y_test))

    def __predict_layers(self):
        X = np.array([p[0] for p in self.__swarm])
        pred = self.__classifier.predict(X)
        for i, p in enumerate(self.__swarm):
            self.__swarm[i][3] = pred[i]

    def __layer_learning(self):
        self.__swarm = sorted(self.__swarm, key=lambda _: _[3])
        L_counts = [0 for _ in range(0, self.__L + 1)]
        for p in self.__swarm:
            L_counts[p[3]] += 1
        L = []
        for i, counts in enumerate(L_counts):
            if counts != 0:
                L.append(i)
        for i in range(self.__NP - 1, -1, -1):  # learning progress starts at the lowest level
            for level in range(self.__L, 2, -1):  # learning from the bottom
                if self.__swarm[i][3] == level:
                    Lb = [_ for _ in L if _ < level]
                    if len(Lb) == 1:
                        k1 = k2 = Lb[0]
                    else:
                        k1, k2 = sorted(sample(Lb, k=2))  # randomly select 2 higher levels
                    Lk1 = [_ for _ in self.__swarm if _[3] == k1]  # find Level k1
                    Lk2 = [_ for _ in self.__swarm if _[3] == k2]  # find Level k2
                    # print(k1, k2, len(Lk1), len(Lk2))
                    if len(Lk2) == 0:
                        Lk2 = Lk1
                    x1, x2 = sample(Lk1, k=1)[0], sample(Lk2, k=1)[0]  # randomly sample from Lk1, Lk2
                    if x1[2] > x2[2]:
                        x1, x2 = x2, x1
                    x1, x2 = x1[0], x2[0]  # x1 is better than x2
                    # update velocity and position
                    self.__swarm[i][1], self.__swarm[i][0] = self.__update(self.__swarm[i], x1, x2)

            if self.__swarm[i][3] == 2:
                L1 = [_ for _ in self.__swarm if _[3] == 1]
                # randomly select 2 particles from L1
                x1, x2 = sorted(sample(L1, k=2), key=lambda x: x[2])  # sorted to make sure x1 is better than x2
                x1, x2 = x1[0], x2[0]  # choose position vector
                # update velocity and position
                self.__swarm[i][1], self.__swarm[i][0] = self.__update(self.__swarm[i], x1, x2)

    def __update(self, p, x1, x2):
        """
        update velocity and position
        :param p: current particle
        :param x1: learning position 1
        :param x2: learning position 2
        :return: velocity and position
        """
        x, v = p[0], p[1]
        r1 = np.random.uniform(low=0.0, high=1.0, size=self.__d)
        r2 = np.random.uniform(low=0.0, high=1.0, size=self.__d)
        r3 = np.random.uniform(low=0.0, high=1.0, size=self.__d)
        v = r1 * v + r2 * (x1 - x) + self.__phi * r3 * (x2 - x)
        x = x + v
        for i in range(len(x)):
            if x[i] < self.__bounds[0]:
                x[i] = self.__bounds[0]
            if x[i] > self.__bounds[1]:
                x[i] = self.__bounds[1]
        return v, x

    def L1_exploitation(self):
        L1_count = [p[3] for p in self.__swarm].count(1)  # Count the number of particles in L1.
        while L1_count < self.__NP:
            self.__layer_learning()
            self.__predict_layers()
            L1_count = [p[3] for p in self.__swarm].count(1)

    def __selection(self):
        """
        Select Ns promising candidates which are different from particles in the database for real FEs.
        """
        PL1 = self.__database[:int(self.__NP / self.__L)]  # The particles which are in L1
        dis = []
        for o in self.__swarm:
            max_dis = 0
            for p in PL1:
                d = np.sqrt(sum((o[0] - p[0]) ** 2))
                if max_dis < d:
                    max_dis = d
            dis.append(max_dis)  # the maximal distance from o to each p in PL1
        dis_sorted = sorted(range(len(dis)), key=lambda k: dis[k])  # sort the maximal distances
        candidates = [self.__swarm[i] for i in dis_sorted[:self.__Ns - 1]]
        i = sample(range(int(self.__NP / self.__L), self.__NP), k=1)[0]
        candidates.append(self.__swarm[i])
        for i, p in enumerate(candidates):
            candidates[i][2] = self.obj_func(p[0])
            self.__database.append(candidates[i])

