"""""
This code is taken from https://github.com/philtabor/Deep-Q-Learning-Paper-To-Code and modified as per our requirement
Author: Shahid Mohammed Shaikbepari
"""""
import numpy
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch as T
import random
from BasicDefinition import Network
from GeneticAlgorithm import Individual


# from util import plot_learning_curve

class LinearDeepQNetwork(nn.Module):
    def __init__(self, lr, n_actions, input_dims):
        super(LinearDeepQNetwork, self).__init__()

        self.fc1 = nn.Linear(input_dims, 256)
        self.fc2 = nn.Linear(256, n_actions)

        self.optimizer = optim.Adam(self.parameters(), lr=lr)
        self.loss = nn.MSELoss()
        self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
        self.to(self.device)

    # takes the current and returns list of actions
    def forward(self, state):
        layer1 = F.relu(self.fc1(state))
        actions = self.fc2(layer1)
        return actions


class Experience:
    def __init__(self, MaximumSize):
        self.Pool = []
        self.MaximumSize = MaximumSize

    def Size(self):
        return len(self.Pool)

    def Save(self, state, action, reward, next_state):
        if len(self.Pool) + 1 == self.MaximumSize:
            self.Erase()
        self.Pool.append((state, action, reward, next_state))

    def Erase(self):
        idx = np.random.randint(len(self.Pool))
        self.Pool.pop(idx)

    def Replay(self, number):
        random.shuffle(self.Pool)
        return self.Pool[:number]


class Agent:
    def __init__(self, input_dims, lr, gamma, epsilon, eps_dec, eps_min, action_space):
        self.LearningRate = lr
        self.InputLength = input_dims
        self.Gamma = gamma
        self.Epsilon = epsilon
        self.EpsilonDec = eps_dec
        self.MinimumEpsilon = eps_min
        self.ActionSpace = action_space
        self.ActionNumber = len(self.ActionSpace)
        self.Q = LinearDeepQNetwork(self.LearningRate, self.ActionNumber, self.InputLength)

    def Choose_Action(self, state):
        if np.random.random() > self.Epsilon:
            _state = T.tensor(state, dtype=T.float)
            ActionOutput = self.Q(_state)
            Action = T.argmax(ActionOutput).item()
        else:
            Action = np.random.randint(self.ActionNumber)
        return Action

    def Decrement_Epsilon(self):
        self.Epsilon = min([self.MinimumEpsilon, self.Epsilon - self.EpsilonDec])

    def Learn(self, state, action, reward, next_state):
        self.Q.optimizer.zero_grad()
        Q_pred = self.Q(state)[action]
        Q_next = self.Q(next_state).max()
        Q_target = reward + self.Gamma * Q_next
        Loss = self.Q.loss(Q_target, Q_pred)
        Loss.backward()
        self.Q.optimizer.step()
        self.Decrement_Epsilon()


def GetResourceList(network: Network, cx, cy, weight):
    res = []
    L = network.MaxCommunicationDistance
    N = network.SatelliteNumber
    for x in range(cx - L, cx + L + 1):
        if x < 0 or x >= network.OrbitNumber:
            continue
        B = 2 * L + 1 - 2 * abs(x - cx)
        C = B // 2
        if C > 0:
            for dy in range(- C, 0):
                y = (cy + N + dy) % N
                W = weight * (abs(x - cx) + abs(y - cy) + 1) ** 2
                res.append(W * network.SatelliteGroup[x][y].ResidualResource())
        res.append(weight * (abs(x - cx) + 1) ** 2 * network.SatelliteGroup[x][cy].ResidualResource())
        if C > 0:
            for dy in range(1, C + 1):
                y = (cy + N + dy) % N
                W = weight * (abs(x - cx) + abs(y - cy) + 1) ** 2
                res.append(W * weight * network.SatelliteGroup[x][y].ResidualResource())
    res = T.tensor(res, dtype=T.float)
    return res


# def SurroundingResource(network: Network, cx, cy):
#     res = []
#     res.append(
#         network.SatelliteGroup[cx - 1][cy].ResidualResource() if network.CoordinationMapping(cx,
#                                                                                              cy) not in network.GetAdjSatellites(
#             cx,
#             cy) else 0)
#     res.append(
#         network.SatelliteGroup[cx][cy - 1].ResidualResource() if network.CoordinationMapping(cx,
#                                                                                              cy - 1) not in network.GetAdjSatellites(
#             cx,
#             cy) else 0)
#     res.append(
#         network.SatelliteGroup[cx][cy].ResidualResource())
#     res.append(
#         network.SatelliteGroup[cx][cy + 1].ResidualResource() if network.CoordinationMapping(cx,
#                                                                                              cy + 1) not in network.GetAdjSatellites(
#             cx,
#             cy) else 0)
#     res.append(
#         network.SatelliteGroup[cx + 1][cy].ResidualResource() if network.CoordinationMapping(cx + 1,
#                                                                                              cy) not in network.GetAdjSatellites(
#             cx,
#             cy) else 0)
#     return res


def SurroundingResourceExcluded(network: Network, cx, cy):
    res = []
    if network.CoordinationMapping(cx - 1, cy) in network.GetAdjSatellites(cx, cy):
        res.append(network.SatelliteGroup[cx - 1][cy].ResidualResource())
    if network.CoordinationMapping(cx, cy - 1) in network.GetAdjSatellites(cx, cy):
        res.append(network.SatelliteGroup[cx][cy - 1].ResidualResource())
    res.append(network.SatelliteGroup[cx][cy].ResidualResource())
    if network.CoordinationMapping(cx, cy + 1) in network.GetAdjSatellites(cx, cy):
        res.append(network.SatelliteGroup[cx][cy + 1].ResidualResource())
    if network.CoordinationMapping(cx + 1, cy) in network.GetAdjSatellites(cx, cy):
        res.append(network.SatelliteGroup[cx + 1][cy].ResidualResource())
    return res


def GetAgent(network: Network, cx, cy, lr, gamma, epsilon, eps_dec, eps_min, sampling_size,
             maximum_size, Seq_Length, training_times):
    This_Satellite = network.SatelliteGroup[cx][cy]
    N = network.MaxCommunicationDistance
    bias = 0
    if cx < network.MaxCommunicationDistance:
        bias = network.MaxCommunicationDistance - cx
    if cx + network.MaxCommunicationDistance >= network.OrbitNumber:
        bias = cx + 1 - (network.OrbitNumber - network.MaxCommunicationDistance)
    input_size = N * N * 2 + N * 2 + 1 - bias * bias
    ActionSpace = network.GetAdjSatellites(cx, cy)
    # print(cx, cy, input_size, len(ActionSpace))
    res_agent = Agent(input_size, lr, gamma, epsilon, eps_dec, eps_min, ActionSpace)
    experience = Experience(maximum_size)
    for _ in range(training_times):
        for block_idx in range(len(This_Satellite.TaskBlock)):
            Sim_ind = Individual(Seq_Length, 20, pow(10, 3), 1, cx, cy, network, block_idx)
            las = network.CoordinationMapping(cx, cy)
            for idx in range(Seq_Length):
                now = random.choice(network.GetAdjSatellites(cx, cy))
                now_state = GetResourceList(network, cx, cy, 10)

                action = res_agent.Choose_Action(now_state)

                nowx, nowy = network.Inv_CoordinationMapping(now)

                Sim_ind.chromosome[idx] = now
                Sim_ind.ImplementSingle(idx)

                reward = (
                        100 * network.Get(now).ResidualResource() ** 5
                        - 1000 * numpy.var(
                    [network.Get(ele).ResidualResource() for ele in network.GetAdjSatellites(nowx, nowy)])
                        - 10 * network.CalcDistance(las, now) ** 2
                )

                # print(reward)

                next_state = GetResourceList(network, cx, cy, 10)
                reward = T.tensor(reward, dtype=T.float)
                experience.Save(now_state, action, reward, next_state)
                # print(type(now_state), type(action), type(reward), type(next_state))
                # print(now_state, action,reward, next_state)
                if experience.Size() > sampling_size:
                    for s, a, r, _s in experience.Replay(sampling_size):
                        res_agent.Learn(s, a, r, _s)
                las = now
                # print("Finish Training:", block_idx, idx)

        # if is_train:
        for ele in network.GetAdjSatellites(cx, cy):
            network.Get(ele).Offload([idx for idx in range(This_Satellite.TaskNumber)])
    return res_agent


def DQN_Decision(network: Network, cx, cy, block_idx, Seq_Length, Agent_Table: list[list[Agent]]):
    scheme = []
    now, nxt = network.CoordinationMapping(cx, cy), 0
    for _ in range(Seq_Length):
        nowx, nowy = network.Inv_CoordinationMapping(now)
        agent = Agent_Table[nowx][nowy]
        # agent.Q.eval()
        inp = GetResourceList(network, nowx, nowy, 10)
        # print(nowx, nowy, inp.shape)
        action = T.argmax(agent.Q(inp)).item()
        nxt = agent.ActionSpace[action]
        scheme.append(now)
        now = nxt
    return Individual(scheme, 0, 0, 0, cx, cy, network, block_idx)

# network = Network(5, 5, 2, 3, 6, 20, 1, 50, 1, 500, 200, 2)
#
# Agent_List = [GetAgent(network, i, 0, 0.01, 0.95, 1, 0.01, 0.5, 10, 20, 3, 5) for i
#               in range(network.OrbitNumber)]
#
# for i in range(len(network.SatelliteGroup[2][2].TaskBlock)):
#     # for j in range(network.OrbitNumber):
#     #     for k in range(network.SatelliteNumber):
#     #         print("{: <20}".format(network.SatelliteGroup[j][k].ResidualResource()), end=" ")
#     #     print('\n')
#     # print("\n\n\n\n\n")
#     ind = DQN_Decision(network, 2, 2, i, 3, Agent_List)
#     print(ind.posx, ind.posy, ind.block_idx, ind.TaskIndices,
#           ind.chromosome, [network.SatelliteGroup[2][2].TaskList[idx].Slice for idx in ind.TaskIndices])
#     print(network.SatelliteGroup[2][2].ResidualResource())
#     ind.ImplementAll()
#     # for j in range(network.OrbitNumber):
#     #     for k in range(network.SatelliteNumber):
#     #         print("{: <20}".format(network.SatelliteGroup[j][k].ResidualResource()), end=" ")
#     #     print('\n')
#     # print("\n\n----------------------------------------\n\n\n")
