"""
因为使用了multiprocessing,所以设备标识将不得不换成cpu
gpu版本不会搞
"""
import networkx as nx
import multiprocessing as mp

import numpy as np
import torch
import copy
import time

from matplotlib import pyplot as plt

import network_topology as nt

from task_generator import taskGenerator
from const import DEVICE_CPU, TASK_NUM, HID_NUM, INIT_NUM, DEVICE, GROUP_SIZE, EPISODE_SIZE
from network import GCN
from performance_evaluation import evaluate
from torch.nn import functional as F


def push_and_pull(l_anc, g_anc, task_matrix1, task_matrix2, edge_index, task_dic, capacity_matrix, v_matrix, actor_opt,
                  critic_opt, g_num):
    reward, actor_loss, critic_loss = l_anc.forward_and_lossfunc(task_matrix1.T, task_matrix2.T, edge_index, task_dic,
                                                                 capacity_matrix, v_matrix, gamma=0.8)

    g_num.value += 1

    actor_opt.zero_grad()
    critic_opt.zero_grad()

    actor_loss.backward()
    critic_loss.backward()

    for lp, gp in zip(l_anc.actor.parameters(), g_anc.actor.parameters()):
        gp._grad = lp.grad

    for lp, gp in zip(l_anc.critic.parameters(), g_anc.critic.parameters()):
        gp._grad = lp.grad

    actor_opt.step()
    critic_opt.step()

    l_anc.actor.load_state_dict(g_anc.actor.state_dict())
    l_anc.critic.load_state_dict(g_anc.critic.state_dict())

    if g_num.value % (100 * GROUP_SIZE) == 0:
        # 保存模型
        curr_time = time.strftime('%H%M%S', time.localtime())
        path1 = 'D:\\ProgrammeProjects\\routing_GCN\\model\\A3C_GEANT2_actor_episode' + str(
            g_num.value / GROUP_SIZE) + '_' + curr_time + '.pth'
        # path2 = 'D:\\ProgrammeProjects\\routing_GCN\\model\\A3C_GEANT2_critic_episode' + str(
        #     g_num.value / GROUP_SIZE) + '_' + curr_time + '.pth'
        open(path1, 'w').close()
        # open(path2, 'w').close()
        torch.save(g_anc.actor, path1)
        # torch.save(g_anc.critic, path2)

        print("save point")

    return reward, actor_loss.detach(), critic_loss.detach()


# 共享优化器内的特定参数，使得可以全局访问从而实现对主网络的参数优化
class SharedAdam(torch.optim.Adam):
    def __init__(self, params, lr=1e-3, betas=(0.9, 0.99), eps=1e-8,
                 weight_decay=0):
        super(SharedAdam, self).__init__(params, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
        # State initialization
        for group in self.param_groups:
            for p in group['params']:
                state = self.state[p]
                state['step'] = 0
                state['exp_avg'] = torch.zeros_like(p.data)
                state['exp_avg_sq'] = torch.zeros_like(p.data)

                # share in memory
                state['exp_avg'].share_memory_()
                state['exp_avg_sq'].share_memory_()


class Net:
    def __init__(self, G, edge_index, task_num, dropout=True, hid_num=HID_NUM):
        super().__init__()

        self.G = G
        self.edge_index = edge_index

        nodenum = G.nodes.__len__()
        edgenum = G.edges.__len__()

        self.actor = GCN(task_num, hid_num, nodenum, edgenum, dropout, DEVICE_CPU).to(DEVICE_CPU)
        self.critic = GCN(task_num, hid_num, nodenum, 1, dropout, DEVICE_CPU).to(DEVICE_CPU)

    def share_memory(self):
        self.actor.share_memory()
        self.critic.share_memory()

    '''
    在每个worker的run里面做这个工作或者是push&pull内做，做了反向传播的过程而且顺便求了一下lossfunc
    使用求出的loss进行反向传播会得到梯度，这个梯度是属于l_net的，我们把这个赋给g_net()，然后再调用g_opt调整参数
    这部分代码和单个网络的actor_critic是一致的，结束在了loss反向传播，opt开始更新参数之前
    '''

    def forward_and_lossfunc(self, taskset_1, taskset_2, edge_index, task_dic, capacity_matrix, v_matrix, gamma=0.8):
        with torch.autograd.set_detect_anomaly(True):
            # 你的模型前向和后向传播代码
            self.actor.train()
            self.critic.train()

            G = self.G
            old_edge_weight = nt.weighted_adj(G, device=DEVICE_CPU)

            # self.writer.add_graph(self.actor, [taskset_1, old_edge_weight])
            # self.writer.close()

            weight_change = self.actor(taskset_1, edge_index, old_edge_weight)

            '''
            下部分代码试图加上噪声
            '''
            mean = np.mean(weight_change.detach().numpy())  # 均值
            variance = 1  # 假设方差为1，可以根据需要调整

            # 生成高斯噪声
            noise = np.random.normal(mean, np.sqrt(variance), weight_change.detach().numpy().shape)
            weight_change += torch.tensor(noise, dtype=torch.float32).to(DEVICE_CPU)

            # weight_change = torch.softmax(weight_change, dim=1)
            mean = torch.mean(weight_change.detach())
            var = torch.var(weight_change.detach())
            weight_change = (weight_change - mean) / torch.sqrt(var)
            # 尝试不用函数，直接在这里写，将weight的运算纳入梯度计算，实际上就是weight_update()函数的内容
            edge_labels = nx.get_edge_attributes(G, 'weight')
            val_list = list(edge_labels.values())
            new_value = torch.tensor(val_list, dtype=torch.float32).to(DEVICE_CPU) + weight_change

            # # 如果不经过上方softmax，这一步可以有效的防止出现负权值?但是不等价啊
            min_val = torch.min(new_value.detach())
            if min_val < 0:
                new_value = new_value - min_val * 2
            #
            new_edge_weight = (new_value / torch.sum(new_value))[0]

            # 更新
            edges = G.edges
            i = 0
            for item in edges:
                G.add_edge(item[0], item[1], weight=new_edge_weight[i])
                i = i + 1

            # 更新完权值之后，根据新权值，算出环境对于更新权值这个动作给出的奖励rn
            reward = evaluate(G, task_dic, capacity_matrix, v_matrix)

            # 使用critic预测累计奖励和 Vn 和 Vn+1
            step_n_value = self.critic(taskset_1, edge_index, old_edge_weight)
            step_n_target = torch.tensor(reward, dtype=torch.float32).to(DEVICE_CPU) + self.critic(taskset_2,
                                                                                                   edge_index,
                                                                                                   new_edge_weight, ) * gamma
            # te_delta = step_n_target - step_n_value = (rn + γ * Vn+1) - Vn
            '''
            这里很重要
            这里计算actor_loss的逻辑不太一样
            原版中，将每个输出对应一个动作采取的可能性，当 TD 误差值大于 0 时增强，当 TD 误差值小于 0 时减小
            这里，actor输出的是每个边权值的改变值，如果TD大于0，意味着这样的改变应当被鼓励，否则应当被抑制
            '''
            # 当 TD 误差值大于 0 时强化，当 TD 误差值小于 0 时抑制
            # actor_loss = -(step_n_target - self.critic(taskset_1, edge_index, old_edge_weight)).detach() * torch.sum(
            #     weight_change)
            actor_loss = - (step_n_target - self.critic(taskset_1, edge_index, old_edge_weight)).detach() * torch.var(
                weight_change)
            critic_loss = F.mse_loss(self.critic(taskset_1, edge_index, old_edge_weight), step_n_target.detach())

        return reward, actor_loss, critic_loss


class Worker(mp.Process):
    def __init__(self, task_num, g_anc, g_res_queue, g_r, g_num, actor_opt, critic_opt, capacity_matrix, v_matrix, idx,
                 G=None):
        super(Worker, self).__init__()

        self.idx = idx
        self.capacity_matrix = capacity_matrix
        self.v_matrix = v_matrix
        self.g_r = g_r
        self.g_num = g_num

        if G is None:
            self.G, self.edge_index = nt.GBN(DEVICE_CPU)
        else:
            self.G = G
            # _, self.edge_index = nt.GBN(DEVICE_CPU)
            _, self.edge_index = nt.GEANT2(DEVICE_CPU)

        self.generator = taskGenerator(self.G)

        self.l_anc = Net(G, self.edge_index, task_num)

        self.g_anc = g_anc
        self.actor_opt = actor_opt
        self.critic_opt = critic_opt
        self.res_queue = g_res_queue

    '''
    继承自Process类的方法，start之后就开始跑里面的内容
    我们的目标应该是：在run()里面，pull&push外做好准备工作，在pull&push内做传播并更新更新主网络参数，并拿回来结果
    拿回结果后，处理后放置在q中，作为输出或者判断
    '''

    def run(self):
        for i in range(EPISODE_SIZE):
            task_group = self.generator.generate_group(GROUP_SIZE, 20, 100)
            print("worker " + str(self.idx) + ", running episode " + str(i))

            for j in range(GROUP_SIZE - 1):
                task_matrix1 = torch.tensor(task_group[j]['task_lists']['task_matrix'], dtype=torch.float32).to(
                    DEVICE_CPU)
                task_matrix2 = torch.tensor(task_group[j + 1]['task_lists']['task_matrix'], dtype=torch.float32).to(
                    DEVICE_CPU)

                task_dic = task_group[j]['task_dic']

                reward, actor_loss, critic_loss = \
                    push_and_pull(self.l_anc, self.g_anc, task_matrix1, task_matrix2, self.edge_index, task_dic,
                                  self.capacity_matrix, self.v_matrix, self.actor_opt, self.critic_opt, self.g_num)

        print("GOOD!")
        self.res_queue.put(None)


def a3c_train():
    # G, edge_index = nt.GBN(DEVICE_CPU)
    G, edge_index = nt.GEANT2(DEVICE_CPU)
    need_init = False

    # 是否初始化一下G权值
    if need_init:
        net_path = 'selected_model/A3C_GEANT2_actor_episode2900.0_161235.pth'
        good_net = torch.load(net_path)
        good_net = good_net.cpu()
        good_net.eval()  # 这里应该（可能）是有影响，先切换到eval模式下最保险

        generator = taskGenerator(G)

        task_group = generator.generate_group(INIT_NUM, 50, 80)
        for j in range(INIT_NUM):
            old_edge_weight = nt.weighted_adj(G, device=DEVICE_CPU)
            task_matrix = torch.tensor(task_group[j]['task_lists']['task_matrix'], dtype=torch.float32).to(DEVICE_CPU)
            weight_change = good_net(task_matrix.T, edge_index, old_edge_weight)
            weight_change = torch.softmax(weight_change, dim=1)
            # weight_change = weight_change * torch.mean(old_edge_weight) / torch.sum(abs(weight_change))

            # G中权重变量要放在cpu上，之后多线程训练要用
            G = nt.weight_update(G, weight_change, DEVICE_CPU)

    g_anc = Net(G, edge_index, TASK_NUM)
    g_anc.share_memory()

    g_res_queue = mp.Queue()
    g_r = mp.Value('d', 0)
    g_num = mp.Value('i', 0)

    g_capacity_matrix = nt.generate_capacity_matrix(G, 50, 100)
    g_v_matrix = nt.generate_v_matrix(G, 5, 10)

    # g_capacity_matrix.share_memory()
    # g_v_matrix.share_memory()

    g_actor_opt = SharedAdam(g_anc.actor.parameters(), lr=1e-3)
    g_critic_opt = SharedAdam(g_anc.critic.parameters(), lr=1e-3)

    workers = [
        Worker(TASK_NUM, g_anc, g_res_queue, g_r, g_num, g_actor_opt, g_critic_opt, g_capacity_matrix, g_v_matrix, i,
               copy.deepcopy(G)) for i in range(3)]

    [w.start() for w in workers]

    a_loss_res, c_loss_res, r_res = [], [], []

    # 在这里，如果放入队列的内容过多，会导致进程卡死，因为必须要确保在进程join之前将队列中所有元素拿出两种解决方法：
    # 1、q.cancel_join_thread()
    # 2、少读一点
    while True:
        r = g_res_queue.get()
        if r is not None:
            r_res.append(r)
            # a_loss_res.append(r[1])
            # c_loss_res.append(r[2])
            print("get one!")
        else:
            break

    [w.join() for w in workers]
    print("waiting for sub_p")

    # p.join([timeout]):
    # 主线程等待p终止（强调：是主线程处于等的状态，而p是处于运行的状态）。
    # timeout是可选的超时时间
    # 需要强调的是，p.join只能join住start开启的进程，而不能join住run开启的进程

    # plt.plot(r_res)
    # plt.ylabel('Moving accumulated reward')
    # plt.xlabel('episode')
    # plt.show()


if __name__ == "__main__":
    # mp.freeze_support()
    a3c_train()
