# _*_ coding:utf-8 _*_
import numpy as np
from math import exp
from math import pow
from copy import deepcopy
from random import uniform

from optimizer import normalized_vec
from optimizer.mtsp.operators import Lifecycle, greedy_route
from optimizer.mtsp.TSP import TSP


class MOAT:
    """
    Mission-oriented ant-team ACO for min–max MTSP, modified with Ant Colony System
    https://doi.org/10.1016/j.asoc.2018.11.048
    recommended parameters:
    α =  1.65, β = 1.4, γ = 1.4, λ = 1.85, κ = 0.15, and ρ = 0.01,
    """

    def __init__(self, data, NP: int, Pc: float, Pm: float, greedy_init=True):
        self.__data, self.__NP, self.__Pc, self.__Pm = data, NP, Pc, Pm

        self.__nodes, self.__nodes_num = data.nodes, data.nodes_num
        self.__distances, self.__routes_length = data.distances, data.routes_length

        self.__depot = 27  # depot

        # path pheromone
        self.__greedy_dis = greedy_route(data, depot=self.__depot)[1]
        self.__path_pheromone = np.full((self.__nodes_num + 1, self.__nodes_num + 1),
                                        fill_value=1.0 / self.__greedy_dis)
        # mission pheromone, unit vectors indicating the search direction, [2d unit vector] * m * NP
        # self.__mission = []
        # for i in range(self.__NP):
        #     direction = [normalized_vec(np.random.uniform(size=2, low=-1, high=1)) for i in range(self.__m)]
        #     self.__mission.append(direction)
        # mission pheromone template : [2d unit vector] * m
        self.__mission = [normalized_vec(np.random.uniform(size=2, low=-1, high=1)) for i in range(self.__m)]

        self.__tabu = set()
        self.troupes = []
        self.fitness = []
        self.pBest = [[], 1e9]
        self.gBest = [[], 1e9]

        self.life = Lifecycle()

    def optimize(self, iterations=None, evaluations=None, deadline=None):

        self.life.set_stopping_condition(iterations=iterations, evaluations=evaluations, deadline=deadline)
        while self.life.check_stopping_condition() is False:
            # construct ant troupes
            self.troupes.clear()
            self.fitness.clear()
            for i in range(self.__NP):
                routes, length = self.__build_routes(h=i)
                self.troupes.append(routes)
                self.fitness.append(length)
            # update pBest and gBest
            self.pBest[1] = min(self.fitness)
            self.pBest[0] = deepcopy(self.troupes[self.fitness.index(self.pBest[1])])
            if self.gBest[1] > self.pBest[1]:
                self.gBest = deepcopy(self.pBest)
            # pheromone update
            self.__path_pheromone_update()
            self.__mission_pheromone_update()

            # lifecycle update
            self.life.update(it=1, data=self.gBest)
            self.life.report(per_sec=True, graph=True, mapping=(self.gBest[0], self.__data.draw_routes))

    def __build_routes(self, h):
        candidates = self.__nodes.tolist()
        candidates.remove(self.__depot)
        self.__tabu.clear()
        self.__tabu.add(self.__depot)
        routes = [[self.__depot] for i in range(self.__m)]  # all routes start at depot
        dis = [0 for i in range(self.__m)]
        while len(candidates) > 0:
            # max-min firing rule
            k = dis.index(min(dis))  # max-min firing rule
            curr = routes[k][-1]
            post = self.__transition(h=h, k=k, curr=curr, candidates=candidates)
            routes[k].append(post)
            dis[k] += self.__distances[curr][post]
            candidates.remove(post)
            self.__tabu.add(post)

            # plt.clf()
            # for i in range(self.__m):
            #     x, y = self.__mission[h][i][0]*2, self.__mission[h][i][1]*2
            #     x = self.__data.x[self.__depot] + x
            #     y = self.__data.y[self.__depot] + y
            #     plt.scatter(x, y)
            # self.__data.draw_routes(plot=plt, routes=routes, show_text=True)
            # # print(routes)
            # plt.pause(1)

        for i in range(self.__m):
            routes[i].append(self.__depot)  # all routes return to depot
            dis[i] += self.__distances[routes[i][-2]][routes[i][-1]]

        length = sum(dis)
        return routes, length

    def __transition(self, h: int, k: int, curr: int, candidates):
        post = -1
        p_list, p_sum = [], 0
        for node in candidates:
            tau = pow(self.__path_pheromone[curr][node], self.__alpha)
            eta = pow(1 / self.__distances[curr][node], self.__beta)
            zee = pow(self.__mission_influence(k=k, node=node), self.__gamma)
            # zee = 1
            p = tau * eta * zee
            p_list.append(p)
            p_sum += p
        for i in range(len(p_list)):
            p_list[i] = p_list[i] / p_sum
        for i in range(1, len(p_list)):
            p_list[i] += p_list[i - 1]
        rand = uniform(0, 1)
        for i in range(len(p_list)):
            if rand < p_list[i]:
                post = candidates[i]
                break

        if post == -1:
            print(f'\033[31mError in class<{self.__class__.__name__}> method<__transition>, post == -1. ')
            quit(-1)

        self.__update_mission(k=k, node=post)

        return post

    # def __mission_influence(self, h, k, node):
    #     """
    #     :param h: team number
    #     :param k: the kth ant in the team
    #     :param node: the node about to reach
    #     :return: influence
    #     """
    #     coord = np.array([self.__data.x[node], self.__data.y[node]])  # the coordinates of the target node
    #     depot = np.array([self.__data.x[self.__depot], self.__data.y[self.__depot]])  # the coordinates of the depot
    #     w = coord - depot
    #     w = normalized_vec(w)
    #     gain = np.inner(self.__mission[h][k], w)  # mission dir, gain points
    #     loss = 0
    #     for i in range(self.__m):
    #         if i != k:
    #             np.inner(self.__mission[h][i], w)  # non-mission dir, lose points
    #     net = gain - loss  # the net points on direction
    #     return 1 / (1 + exp(-self.__lamb * net))  # sigmoid func

    def __mission_influence(self, k, node):
        """
        :param k: the kth ant in the team
        :param node: the node about to reach
        :return: influence
        """
        coord = np.array([self.__data.x[node], self.__data.y[node]])  # the coordinates of the target node
        depot = np.array([self.__data.x[self.__depot], self.__data.y[self.__depot]])  # the coordinates of the depot
        w = coord - depot
        w = normalized_vec(w)
        gain = np.inner(self.__mission[k], w)  # mission dir, gain points
        loss = 0
        for i in range(self.__m):
            if i != k:
                np.inner(self.__mission[i], w)  # non-mission dir, lose points
        net = gain - loss  # the net points on direction
        return 1 / (1 + exp(-self.__lamb * net))  # sigmoid func

    # def __update_mission(self, h, k, node):
    #     """
    #     the k ant chooses city j as the next city to visit
    #     :param h: team number
    #     :param k: the kth ant
    #     :param node: city j
    #     :return: None
    #     """
    #     coord = np.array([self.__data.x[node], self.__data.y[node]])  # the coordinates of the target node
    #     depot = np.array([self.__data.x[self.__depot], self.__data.y[self.__depot]])  # the coordinates of the depot
    #     w = coord - depot
    #     self.__mission[h][k] = normalized_vec((1 - self.__k) * self.__mission[h][k] + self.__k * w)

    def __update_mission(self, k, node):
        """
        the k ant chooses city j as the next city to visit
        :param k: the kth ant
        :param node: city j
        :return: None
        """
        coord = np.array([self.__data.x[node], self.__data.y[node]])  # the coordinates of the target node
        depot = np.array([self.__data.x[self.__depot], self.__data.y[self.__depot]])  # the coordinates of the depot
        w = coord - depot
        self.__mission[k] = normalized_vec((1 - self.__k) * self.__mission[k] + self.__k * w)

    def __local_update_pheromone(self):
        pass

    def __path_pheromone_update(self):
        # path pheromone update
        self.__path_pheromone = (1 - self.__rho) * self.__path_pheromone
        for i in range(self.__NP):
            delta = 1 / self.fitness[i]
            routes = self.troupes[i]
            for route in routes:
                for j in range(1, len(route)):
                    a, b = route[j - 1], route[j]
                    self.__path_pheromone[a][b] += delta
                    self.__path_pheromone[b][a] += delta

    # def __mission_pheromone_update(self):
    #     # mission pheromone update
    #     for h in range(self.__NP):
    #         routes = self.troupes[h]
    #         for k in range(len(routes)):
    #             route = routes[k]
    #             x_, y_ = [], []
    #             for i in range(1, len(route)-1):
    #                 node = route[i]
    #                 x = self.__data.x[node]-self.__data.x[self.__depot]
    #                 y = self.__data.y[node]-self.__data.y[self.__depot]
    #                 x_.append(x)
    #                 y_.append(y)
    #             vec = [sum(x_) / len(x_), sum(y_) / len(y_)]
    #             self.__mission[h][k] = normalized_vec(np.array(vec))

    def __mission_pheromone_update(self):
        # mission pheromone update
        for h in range(self.__NP):
            routes = self.troupes[h]
            for k in range(len(routes)):
                route = routes[k]
                x_, y_ = [], []
                for i in range(1, len(route)-1):
                    node = route[i]
                    x = self.__data.x[node]-self.__data.x[self.__depot]
                    y = self.__data.y[node]-self.__data.y[self.__depot]
                    x_.append(x)
                    y_.append(y)
                vec = [sum(x_) / len(x_), sum(y_) / len(y_)]
                self.__mission[k] = normalized_vec(np.array(vec))


tsp = TSP(path=r'D:\Workspace\PyCharm\TSP Research\data\TSPLIB\att48.tsp.txt')
moat = MOAT(data=tsp)

