import networkx as nx
import numpy as np
import torch
from matplotlib import pyplot as plt

import network_topology as nt
from task_generator import taskGenerator
import copy
from const import DEVICE
from performance_evaluation import evaluate

'''
这里输出的7个值分别是：
初始权重 某神经网络预测50轮后停止 神经网络1~5
'''
if __name__ == "__main__":

    actor_net = []

    net_path = 'model/A3C_GEANT2_actor_episode100.0_154103.pth'
    actor_net.append(torch.load(net_path))
    actor_net[-1].eval()  # 这里应该（可能）是有影响，先切换到eval模式下最保险

    net_path = 'model/A3C_GEANT2_actor_episode200.0_155257.pth'
    actor_net.append(torch.load(net_path))
    actor_net[-1].eval()

    net_path = 'model/A3C_GEANT2_actor_episode300.0_160432.pth'
    actor_net.append(torch.load(net_path))
    actor_net[-1].eval()

    net_path = 'model/A3C_GEANT2_actor_episode400.0_161710.pth'
    actor_net.append(torch.load(net_path))
    actor_net[-1].eval()

    net_path = 'selected_model/A3C_GEANT2_actor_episode2900.0_161235.pth'
    actor_net.append(torch.load(net_path))
    actor_net[-1].eval()

    [net.to(DEVICE) for net in actor_net]

    # G, edge_index = nt.GBN()
    G, edge_index = nt.GEANT2()

    nodenum = G.nodes.__len__()
    edgenum = G.edges.__len__()
    capacity_matrix = nt.generate_capacity_matrix(G, 50, 100)
    v_matrix = nt.generate_v_matrix(G, 5, 10)
    generator = taskGenerator(G)

    '''
    我的测试思路：
    1、随机权值vs更新权值
    2、固定权值vs更新权值
    
    评价标准：使用evaluate()函数评价每次的reward
    '''

    G_list = []
    for i in range(7):
        # G, _ = nt.GBN()
        G, _ = nt.GEANT2()
        G_list.append(G)

    # weight1 = nt.weighted_adj(G1)
    # weight2 = nt.weighted_adj(G2)

    test_num = 200
    acc_r = np.zeros(7)
    reward_list = [[], [], [], [], [], [], []]
    label = ['default', 'with init', 'net1', 'net2', 'net3', 'net4', 'net5']
    color = ['b', 'b', 'g', 'r', 'c', 'm', 'y']

    test_group = generator.generate_group(test_num, 20, 30)

    for k in range(test_num - 1):

        task_dic = test_group[k]['task_dic']
        for s in range(7):
            if s >= 2 or (s == 1 and k <= 30):
                task_matrix = torch.tensor(test_group[k]['task_lists']['task_matrix'], dtype=torch.float32).to(DEVICE)
                old_edge_weight = nt.weighted_adj(G_list[s])
                weight_change = actor_net[s - 2](task_matrix.T, edge_index, old_edge_weight)
                weight_change = torch.softmax(weight_change, dim=1)
                # weight_change = weight_change * torch.mean(old_edge_weight) / torch.sum(abs(weight_change))
                G_list[s] = nt.weight_update(G_list[s], weight_change)

            acc_r[s] += evaluate(G_list[s], task_dic, capacity_matrix, v_matrix)
            reward_list[s].append(acc_r[s] / (k + 1))

    # 输入不同的任务
    test_group = generator.generate_group(test_num, 60, 90)
    for k in range(test_num - 1):
        task_dic = test_group[k]['task_dic']

        for s in range(7):
            if s >= 2:
                task_matrix = torch.tensor(test_group[k]['task_lists']['task_matrix'], dtype=torch.float32).to(DEVICE)
                old_edge_weight = nt.weighted_adj(G_list[s])
                weight_change = actor_net[s - 2](task_matrix.T, edge_index, old_edge_weight)
                weight_change = torch.softmax(weight_change, dim=1)
                # weight_change = weight_change * torch.mean(old_edge_weight) / torch.sum(abs(weight_change))
                G_list[s] = nt.weight_update(G_list[s], weight_change)
            acc_r[s] += evaluate(G_list[s], task_dic, capacity_matrix, v_matrix)

            reward_list[s].append(acc_r[s] / (k + test_num))

    plt.ylabel('Average task reward')
    plt.xlabel('Task group')

    for i in range(7):
        if i == 1:
            plt.plot(reward_list[i], label=label[i], linewidth=1, color=color[i], linestyle='--')
        else:
            plt.plot(reward_list[i], label=label[i], linewidth=1, color=color[i])

    plt.legend()
    plt.show()
