import torch
from torch.nn import functional as F
from network import GCN
from performance_evaluation import evaluate
import network_topology as nt
import task_generator
from network_topology import weight_update, weighted_adj
from const import EPISODE_SIZE, TASK_NUM, GROUP_SIZE, DEVICE, INIT_NUM, GRAPH_TAG
import networkx as nx
import time
from tensorboardX import SummaryWriter


class Anc:
    """
    G,edge_index：由构造图的函数得到的返回值
    task_num:一组任务中随机生成的任务个数
    dropout:是否进行dropout操作
    hid_num:GCN隐藏层中的节点数
    alpha：学习率

    共有EPISODE_NUM个episode，每个episode中，有GROUP_SIZE组任务，每组任务中随机任务的个数应该是TASK_NUM
    """

    def __init__(self, G, edge_index, task_num, dropout=True, hid_num=50, alpha=0.001):
        super().__init__()

        self.G = G
        self.edge_index = edge_index

        nodenum = G.nodes.__len__()
        edgenum = G.edges.__len__()

        self.actor = GCN(task_num, hid_num, nodenum, edgenum, dropout).to(DEVICE)
        self.critic = GCN(task_num, hid_num, nodenum, 1, dropout).to(DEVICE)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=alpha)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=alpha)

        # self.writer = SummaryWriter('./res')

    '''
    单个actor&critic进行预测并求loss进行反向传播的过程
    '''

    def update(self, taskset_1, taskset_2, G, edge_index, task_dic, capacity_matrix, v_matrix, gamma=0.8):
        old_edge_weight = weighted_adj(G)

        # self.writer.add_graph(self.actor, [taskset_1, old_edge_weight])
        # self.writer.close()

        weight_change = self.actor(taskset_1, edge_index, old_edge_weight)
        weight_change = torch.softmax(weight_change, dim=1)
        # weight_change = weight_change * torch.mean(old_edge_weight) / torch.sum(abs(weight_change))

        # 尝试不用函数，直接在这里写，将weight的运算纳入梯度计算，实际上就是weight_update()函数的内容
        edge_labels = nx.get_edge_attributes(G, 'weight')
        val_list = list(edge_labels.values())
        new_value = torch.tensor(val_list, dtype=torch.float32).to(DEVICE) + weight_change

        # 如果不经过上方softmax，这一步可以有效的防止出现负权值
        min_val = torch.min(new_value.detach())
        if min_val < 0:
            new_value = new_value - min_val * 2

        new_edge_weight = (new_value / torch.sum(new_value))[0]

        # 更新
        edges = G.edges
        i = 0
        for item in edges:
            G.add_edge(item[0], item[1], weight=new_edge_weight[i])
            i = i + 1

        # 更新完权值之后，根据新权值，算出环境对于更新权值这个动作给出的奖励rn
        reward = evaluate(G, task_dic, capacity_matrix, v_matrix)

        # 使用critic预测累计奖励和 Vn 和 Vn+1
        step_n_value = self.critic(taskset_1, edge_index, old_edge_weight)
        step_n_target = torch.tensor(reward, dtype=torch.float32).to(DEVICE) + self.critic(taskset_2, edge_index,
                                                                                           new_edge_weight, ) * gamma
        # te_delta = step_n_target - step_n_value = (rn + γ * Vn+1) - Vn
        '''
        这里很重要
        这里计算actor_loss的逻辑不太一样
        原版中，将每个输出对应一个动作采取的可能性，当 TD 误差值大于 0 时增强，当 TD 误差值小于 0 时减小
        这里，actor输出的是每个边权值的改变值，如果TD大于0，意味着这样的改变应当被鼓励，否则应当被抑制
        '''
        # 当 TD 误差值大于 0 时强化，当 TD 误差值小于 0 时抑制
        actor_loss = torch.mean(
            - (step_n_target - self.critic(taskset_1, edge_index, old_edge_weight)).detach() * weight_change)

        critic_loss = F.mse_loss(self.critic(taskset_1, edge_index, old_edge_weight), step_n_target.detach())

        # 优化器梯度清0
        self.actor_optimizer.zero_grad()  # 策略梯度网络的优化器
        self.critic_optimizer.zero_grad()  # 价值网络的优化器

        # 反向传播
        actor_loss.backward()
        critic_loss.backward()

        # 参数更新
        self.actor_optimizer.step()
        self.critic_optimizer.step()

        return actor_loss.detach(), critic_loss.detach(), G, weight_change.detach(), step_n_value.detach(), step_n_target.detach()

    # anc训练代码
    def train(self):
        G, edge_index = self.G, self.edge_index
        graph_tag = GRAPH_TAG[0]
        # G, edge_index = nt.GEANT2()
        # graph_tag = GRAPH_TAG[1]

        capacity_matrix = nt.generate_capacity_matrix(G, 50, 100)

        v_matrix = nt.generate_v_matrix(G, 10, 30)

        generator = task_generator.taskGenerator(G)

        '''
        这里加一下初始化图后对权重的一个更好的初始化,以防止不合理的收敛
        '''
        if graph_tag == 'GBN':
            net_path = 'selected_model/actor_episode300_193320.pth'
            good_net = torch.load(net_path)
            good_net.eval()  # 这里应该（可能）是有影响，先切换到eval模式下最保险
            task_group = generator.generate_group(INIT_NUM, 50, 80)
            for j in range(INIT_NUM):
                old_edge_weight = nt.weighted_adj(G)
                task_matrix = torch.tensor(task_group[j]['task_lists']['task_matrix'], dtype=torch.float32).to(DEVICE)
                weight_change = good_net(task_matrix.T, edge_index, old_edge_weight)
                weight_change = torch.softmax(weight_change, dim=1)
                # weight_change = weight_change * torch.mean(old_edge_weight) / torch.sum(abs(weight_change))

                G = nt.weight_update(G, weight_change)

        '''
        训练代码
        '''
        for i in range(EPISODE_SIZE):
            self.actor.train()
            self.critic.train()

            start = time.perf_counter()  # 计时器记录开始时间，看看每个episode跑多久

            episode_return = 0  # 累计每回合的reward
            task_group = generator.generate_group(GROUP_SIZE, 20, 80)
            # reward_list = group_reward(G, task_group, capacity_matrix, v_matrix)
            # reward_list = torch.tensor(reward_list, dtype=torch.float32).to(DEVICE)

            # 以下是统计相关信息的变量
            actor_losslist = torch.empty(GROUP_SIZE - 1)
            critic_losslist = torch.empty(GROUP_SIZE - 1)
            predict_valuelist = torch.empty(GROUP_SIZE - 1)
            true_valuelist = torch.empty(GROUP_SIZE - 1)
            weight_change = None
            confidence = None

            for j in range(GROUP_SIZE - 1):
                task_matrix1 = torch.tensor(task_group[j]['task_lists']['task_matrix'], dtype=torch.float32).to(DEVICE)
                task_matrix2 = torch.tensor(task_group[j + 1]['task_lists']['task_matrix'], dtype=torch.float32).to(
                    DEVICE)

                task_dic = task_group[j]['task_dic']

                actor_loss, critic_loss, G, weight_change, step_n_value, true_value = self.update(
                    task_matrix1.T,
                    task_matrix2.T,
                    G, edge_index, task_dic, capacity_matrix, v_matrix)
                # 收集必要的参数查看状态
                actor_losslist[j] = actor_loss
                critic_losslist[j] = critic_loss
                predict_valuelist[j] = step_n_value
                true_valuelist[j] = true_value

            end = time.perf_counter()  # 记录结束时间
            elapsed = end - start

            # 打印相关信息
            print('episode ' + str(i + 1) + ' finished!, total time:' + str(elapsed))
            if (i + 1) % 10 == 0:
                print("episode " + str(i) + " :")
                # print("network_weight :")
                # for parameters in actor_n_critic.actor.parameters():
                #     print(parameters)
                print("actor_loss :")
                print(torch.mean(actor_losslist))
                print("critic_loss :")
                print(torch.mean(critic_losslist))
                # print("confidence :")
                # print(confidence)
                print("mean predict :")
                print(torch.mean(predict_valuelist))
                print("mean truth :")
                print(torch.mean(true_valuelist))
                print("weight_info :")
                print(weighted_adj(G))
                print(weight_change)

                print(
                    "_________________________________________________________________________________________________")

            if (i + 1) % 50 == 0:
                '''
                加一下根据已有的进行评价确定是否要留下，不加也行，因为训练次数基本上500次就够了，可以以100次为单位
                '''

                curr_time = time.strftime('%H%M%S', time.localtime())
                path1 = 'D:\\ProgrammeProjects\\routing_GCN\\model\\actor_' + GRAPH_TAG[1] + '_episode' + str(
                    i + 1) + '_' + curr_time + '.pth'
                path2 = 'D:\\ProgrammeProjects\\routing_GCN\\model\\critic_' + GRAPH_TAG[1] + '_episode' + str(
                    i + 1) + '_' + curr_time + '.pth'
                open(path1, 'w').close()
                open(path2, 'w').close()
                torch.save(self.actor, path1)
                torch.save(self.critic, path2)
