import gc
import time

import torch

from DQN_FILE import DQN
import network
from network import ENV
import numpy as np
import matplotlib.pyplot as plt
import warnings

warnings.filterwarnings("ignore")
EVAL_GAP = 1
# 环境任务编号 0~6
TASK_NUM = 1
# 环境拓扑编号  0~3
GRAPH_NUM = 0
time_epoch_learn = 10
TRAGET_UPDATE_EPOCH = 1


def normalize_reward(random, dff, reward):
    return (reward - random) / (dff - random)


def eval_GA(gene, env):
    def choose_action(gene, alloc_flag_next):
        if alloc_flag_next[int(gene)]:
            return int(gene), True
        else:
            return -1, False

    reward_list = []
    env.reset()
    i = 0
    while True:
        link_msg, link_msg_next, alloc_flag_next = env.observe()
        action, choose_flag = choose_action(gene[i], alloc_flag_next)
        reward, done = env.step(link_msg_next, action, choose_flag)
        reward_list.append(reward)
        i += 1
        if done:
            score = np.sum(reward_list)
            # print('在第'+str(env.graph_num)  +'个环境下+d+ff score='+str(score))
            break
    return score


def eval_random(env, dqn):
    score_list = []
    for i in range(10):
        reward_list = []
        env.reset()
        while True:
            link_msg, link_msg_next, alloc_flag_next = env.observe()
            action, choose_flag = dqn.choose_action_random(alloc_flag_next)
            reward, done = env.step(link_msg_next, action, choose_flag)
            reward_list.append(reward)
            if done:
                score = np.sum(reward_list)
                # print('在第'+str(env.graph_num)  +'个环境下+d+ff score='+str(score))
                score_list.append(score)
                break
    return np.average(score_list)


def eval_DFF(env, dqn):
    reward_list = []
    env.reset()
    while True:
        link_msg, link_msg_next, alloc_flag_next, task, task_next = env.observe()
        action, choose_flag = dqn.choose_actions_K1(alloc_flag_next)
        reward, done, state_next, allocate_next = env.step(link_msg, link_msg_next, action, choose_flag)
        reward_list.append(reward)
        # if not choose_flag:
        #     score = np.sum(reward_list)
        #     #print('eval score=' + str(score))
        #     print('{:.4f}'.format(score))
        #     dqn.eval_net.training = True
        #     return score
        if done:
            score = np.sum(reward_list)
            # print('在第'+str(env.graph_num)  +'个环境下+d+ff score='+str(score))
            return score


def eval_kFF(env, dqn):
    reward_list = []
    env.reset()
    while True:
        cur,state_next = env.observe()
        action = dqn.choose_actions_KFF(state_next)
        reward, done, state_next_next = env.step(state_next, action)
        reward_list.append(reward)
        if done:
            score = np.sum(reward_list)
            print('kff=%.4f' % score)
            # for k in range(time_epoch_learn):
            #     dqn.learn(adj)
            # dqn.clear_transition()
            break


def eval(env, dqn):
    dqn.eval_net.training = False
    # 评估阶段
    env.reset()
    adj = env.link_adj
    reward_list = []
    while True:
        cur,state_next = env.observe()
        action = dqn.choose_action_eval(cur,state_next,True)
        reward, done, state_next_next = env.step(state_next, action)
        reward_list.append(reward)
        if done:
            score = np.sum(reward_list)
            print('%.4f' % score)
            break


def train(env, dqn, store_flag, epoch):
    dqn.eval_net.training = True
    eval_score = []
    step = 0
    dqn.eval_net.training = True
    adj = env.link_adj
    for episode in range(epoch):

        if (episode % TRAGET_UPDATE_EPOCH) == 0:
            dqn._replace_target_params()
        dqn.epsilon = dqn.epsilon + dqn.epsilon_increment if dqn.epsilon < dqn.epsilon_max else dqn.epsilon_max

        if (episode % EVAL_GAP) == 0:
            eval_score.append(eval(env, dqn))
        env.reset()
        reward_list = []
        while True:
            cur,state_next = env.observe()
            action = dqn.choose_action_code(cur,state_next, False)
            reward, done, state_next_next = env.step(state_next, action)

            reward_list.append(reward)
            dqn.store_transition(cur,reward, action,state_next[action], state_next_next, 1 if done else 0)
            if (step > 100) and (step % 5 == 0):
                dqn.learn(adj)
            if done:
                score = np.sum(reward_list)
                print('train=%.4f' % score)
                # for k in range(time_epoch_learn):
                #     dqn.learn(adj)
                # dqn.clear_transition()
                break
            step += 1
    if store_flag:
        dqn.store_exp('eval_task.pth')
    # dqn.plot_cost()
    return eval_score


def run(k):
    for i in range(50):
        # graph_number=i%3
        env = ENV(task_num=0, graph_num=0)
        env.create_task()
        node_size = env.graph.number_of_edges()
        dqn = DQN(network.K, network.SLOT_NUM,
                  node_size=node_size,
                  learning_rate=0.0001,
                  reward_decay=0.95,
                  e_greedy=0.99,
                  replace_target_iter=40,
                  memory_size=50,
                  batch_size=5,
                  e_greedy_increment=None
                  )
        if i != 0:
            dqn.construct_GAT_RNN(node_size=node_size, str='eval_task.pth')
        eval_score = train(env, dqn, True, k)
        d_ff = eval_DFF(env, dqn)
        random_score = eval_random(env, dqn)
        if random_score > d_ff:
            print('+++++++++++')
        print('%.3f' % normalize_reward(random_score, d_ff, np.average(eval_score)))
        # print('%.3f' % (np.average(eval_score)))
        # print('dff=' + '%.3f' % (d_ff))


def scratch():
    env = ENV(task_num='cur', graph_num=0)
    env.load_task()
    node_size = env.graph.number_of_edges()
    dqn = DQN(network.K, network.SLOT_NUM,
              node_size=node_size,
              learning_rate=0.0005,
              reward_decay=1,
              e_greedy=0.98,
              replace_target_iter=40,
              memory_size=500,
              batch_size=200,
              e_greedy_increment=None
              )
    d_ff = eval_DFF(env, dqn)
    print('%s拓扑的的D+FF得分是%s' % (env.graph_num, d_ff))
    # dqn.construct_GAT_RNN(node_size=node_size)
    eval_score = train(env, dqn, False, 2000)
    print('++++++++++++')


def adapt(env, dqn):
    d_ff = eval_DFF(env, dqn)
    print('%s拓扑的的D+FF得分是%s' % (env.graph_num, d_ff))
    dqn.construct_GAT_RNN(node_size=0)
    dqn.unfrozen()
    eval_score = train(env, dqn, False, 200)
    print("++++++++++++")


def train_and_store(graph):
    env = ENV(task_num='cur', graph_num=graph)
    env.create_task()
    node_size = env.graph.number_of_edges()
    dqn = DQN(network.K, network.SLOT_NUM,
              node_size=node_size,
              learning_rate=0.03,
              reward_decay=1,
              e_greedy=0.999,
              replace_target_iter=40,
              memory_size=100,
              batch_size=50,
              e_greedy_increment=None
              )
    d_ff = eval_DFF(env, dqn)
    print('%s拓扑的的D+FF得分是%s' % (env.graph_num, d_ff))
    eval_score = train(env, dqn, False, 200)
    dqn.store_exp('eval_' + str(graph) + '.pth')
    print("++++++++++++")


def transfer(origin_graph, env, dqn):
    checkpoint = torch.load('eval_' + str(origin_graph) + '.pth', map_location="cuda:0")
    dqn.eval_net.load_state_dict(checkpoint)
    for name, param in dqn.eval_net.rnn.named_parameters():
        if 'weight' in name:
            torch.nn.init.xavier_normal_(param.data)
        elif 'bias' in name:
            torch.nn.init.constant_(param.data, 0)
    dqn.target_copy()
    dqn.frozen_gat()
    d_ff = eval_DFF(env, dqn)
    print('%s拓扑的的D+FF得分是%s' % (env.graph_num, d_ff))
    eval_score = train(env, dqn, False, 200)
    print('++++++++++++++++')


def transfer_task(env, dqn):
    checkpoint = torch.load('eval_task_transfer.pth', map_location="cuda:0")
    dqn.eval_net.load_state_dict(checkpoint)
    for name, param in dqn.eval_net.rnn.named_parameters():
        if 'weight' in name:
            torch.nn.init.xavier_normal_(param.data)
        elif 'bias' in name:
            torch.nn.init.constant_(param.data, 0)
    dqn.target_copy()
    dqn.frozen_gat()
    d_ff = eval_DFF(env, dqn)
    print('%s拓扑的的D+FF得分是%s' % (env.graph_num, d_ff))
    eval_score = train(env, dqn, False, 500)
    print('++++++++++++++++')


if __name__ == "__main__":
    # run(5)

    # 验证阶段
    # adapt()
    # scratch()
    # train_and_store(0)
    # train_and_store(1)
    # train_and_store(2)
    # transfer()
    env = ENV(task_num='cur', graph_num=0)
    env.load_task()
    node_size = env.graph.number_of_edges()
    dqn = DQN(network.K+1, network.SLOT_NUM+14,
              node_size=node_size,
              learning_rate=0.000001,
              reward_decay=1,
              e_greedy=1,
              replace_target_iter=100,
              memory_size=100,
              batch_size=20,
              e_greedy_increment=None
              )
    # dqn.construct_GAT_RNN(0,'eval_task.pth')
    # d_ff = eval_DFF(env, dqn)
    k_ff = eval_kFF(env, dqn)
    # print('%s拓扑的的D+FF得分是%.4f' %  (env.graph_num, d_ff))
    eval_score = train(env, dqn, False, 20000)
    # dqn.plot_cost()
    # transfer_task(env,dqn)
    # adapt(env, dqn)
    # transfer(0,env,dqn)
    # transfer(1,env,dqn)
    # transfer(2,env,dqn)
    # print('全局训练下%s拓扑的的得分是%s' % (env.graph_num, eval_score))
    # plt.figure()
    # plt.plot(np.arange(len(eval_score)), eval_score)
    # plt.show()
