import numpy as np
import random
import torch
from torch.nn import Module, Parameter
from torch.nn import functional as F
import network
from performance_evaluation import group_reward
import network_topology as nt
import task_generator
import networkx as nx
from network_topology import weight_update, weighted_adj, draw_graph
from const import EPISODE_SIZE, TASK_NUM, GROUP_SIZE, DEVICE
from tensorboardX import SummaryWriter


class Anc:
    def __init__(self, node_num, edge_num, task_num, head_num, dropout, alpha=0.01):
        super().__init__()

        # self.actor = network.actor_gat(task_num, task_num, 1, node_num, edge_num, head_num, dropout, alpha).to(DEVICE)
        self.actor = network.GAT_with_fc(task_num, edge_num, 1, node_num, edge_num, head_num, dropout, alpha).to(DEVICE)
        self.critic = network.GAT_with_fc(task_num, edge_num, 1, node_num, 1, head_num, dropout, alpha).to(DEVICE)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=alpha)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=alpha)

        # self.writer = SummaryWriter('./res')

    def update(self, taskset_1, taskset_2, reward, G, gamma=0.7):
        actor_n_critic.actor.train()
        actor_n_critic.critic.train()

        old_adj = weighted_adj(G)
        old_adj = torch.tensor(old_adj, dtype=torch.double).to(DEVICE)

        # self.writer.add_graph(self.actor, [taskset_1, old_adj])
        # self.writer.close()

        # weight_change, confidence = self.actor(taskset_1, old_adj)
        weight_change = self.actor(taskset_1, old_adj)
        G = weight_update(G, weight_change)
        new_adj = weighted_adj(G)
        new_adj = torch.tensor(new_adj, dtype=torch.double).to(DEVICE)
        step_n_value = self.critic(taskset_1, old_adj)
        step_n_target = torch.tensor(reward, dtype=torch.double).to(DEVICE) + self.critic(taskset_2, new_adj) * gamma
        # te_delta = step_n_target - step_n_value

        actor_loss = - (step_n_target - self.critic(taskset_1, old_adj)).detach() * self.actor(taskset_1, old_adj)[1]  # 当 TD 误差值大于 0 时增强置信度，当 TD 误差值小于 0 时减小置信度
        critic_loss = F.mse_loss(self.critic(taskset_1, old_adj), step_n_target.detach())
        # 优化器梯度清0
        self.actor_optimizer.zero_grad()  # 策略梯度网络的优化器
        self.critic_optimizer.zero_grad()  # 价值网络的优化器
        # 反向传播
        actor_loss.requires_grad_(True)
        critic_loss.requires_grad_(True)

        actor_loss.backward()
        critic_loss.backward()
        # 参数更新
        self.actor_optimizer.step()
        self.critic_optimizer.step()

        return actor_loss.detach(), critic_loss.detach(), G, weight_change.detach(), confidence.detach(), step_n_value.detach(), step_n_target.detach()


G, _ = nt.GBN()
nodenum = G.nodes.__len__()
edgenum = G.edges.__len__()
capacity_matrix = nt.generate_capacity_matrix(G, 80, 100)
actor_n_critic = Anc(nodenum, edgenum, TASK_NUM, head_num=2, dropout=False, alpha=0.01)
v_matrix = nt.generate_v_matrix(G, 10, 30)

generator = task_generator.taskGenerator(G)

for i in range(EPISODE_SIZE):
    episode_return = 0  # 累计每回合的reward
    task_group = generator.generate_group(GROUP_SIZE, 50, 80)
    reward_list = group_reward(G, task_group, capacity_matrix, v_matrix)
    reward_list = torch.tensor(reward_list, dtype=torch.double).to(DEVICE)
    actor_losslist = torch.empty(GROUP_SIZE - 1)
    critic_losslist = torch.empty(GROUP_SIZE - 1)
    predict_valuelist = torch.empty(GROUP_SIZE - 1)
    true_valuelist = torch.empty(GROUP_SIZE - 1)
    weight_change = None
    confidence = None
    for j in range(GROUP_SIZE - 1):
        task_matrix1 = torch.tensor(task_group[j]['task_lists']['task_matrix'], dtype=torch.double).to(DEVICE)
        task_matrix2 = torch.tensor(task_group[j + 1]['task_lists']['task_matrix'], dtype=torch.double).to(DEVICE)

        actor_loss, critic_loss, G, weight_change, confidence, step_n_value, true_value = actor_n_critic.update(
            task_matrix1.T,
            task_matrix2.T,
            reward_list[j], G)
        # 收集必要的参数查看状态
        actor_losslist[j] = actor_loss
        critic_losslist[j] = critic_loss
        predict_valuelist[j] = step_n_value
        true_valuelist[j] = true_value

    if i % 100 == 0:
        print("episode " + str(i) + " :")
        print("network_weight :")
        print(actor_n_critic.actor.last_layer.W)
        print("actor_loss :")
        print(torch.mean(actor_losslist))
        print("critic_loss :")
        print(torch.mean(critic_losslist))
        print("confidence :")
        print(confidence)
        print("mean predict :")
        print(torch.mean(predict_valuelist))
        print("mean truth :")
        print(torch.mean(true_valuelist))
        print("weight_info :")
        print(weighted_adj(G))
        print(weight_change)

        print("_________________________________________________________________________________________________")
