import datetime
import os
import time
import numpy as np
import random
import sys
import torch
import matplotlib.dates as mdates
import tqdm
import pickle
sys.path.append(r'E:\项目\rf_computing')
from env.network_playground import NetworkEnv
from algorithm.DQN import DQN
import matplotlib.pyplot as plt
from config.DQNConfig import DQNConfig
from config.ENVConfig import ENVConfig
from config.TRAINConfig import TrainConfig
from baseline import optimal_time_matching
"""
两个个agent,一个main,子agent共享
"""


def env_agent_config(*k):

    # 网络环境初始化
    cfg = ENVConfig(visenv=f'DQN_g_spedd_{k[0]}')

    nenv = NetworkEnv(cfg)
    dqncfg = DQNConfig(n_states=cfg.edge_num*4+2,n_actions=cfg.edge_num,hidden_dim=256,lr=0.0001,buffer_size=int(2e5),main_win=f'DQN_maintrain_gspeed{k[0]}')  #4*2+2
    print(dqncfg.__dict__)
    dqn_cfgsub = DQNConfig(n_states=cfg.nodes_of_edge*4+2,n_actions=cfg.nodes_of_edge,hidden_dim=256,lr=0.0002,buffer_size=int(2e5),sub_win=f'DQN_subtrain_gspeed{k[0]}')#4*3+2
    agent_main = DQN(dqncfg)
    agent_sub = DQN(dqn_cfgsub)




    n_states = nenv.observation_space.shape[0]
    n_actions = nenv.action_space.n
    print(f"0级环境状态空间维度：{n_states}，动作空间维度：{n_actions}")
    print(nenv.info())

    return nenv,agent_main, agent_sub,cfg

def train(cfg,nenv,agent_main, agent_sub):
    totalStep =cfg.totalStep
    GenerateTaskSpeed = cfg.GenerateTaskSpeed
    policy = cfg.policy
    eps =cfg.train_eps
    print(f'policy:{policy},i_eps:{eps}')
    random.seed(cfg.random_seed)
    rewards = []
    w1_returns = []
    cfg_str = '\n'.join([f"{key}: {value}" for key, value in cfg.__dict__.items()])
    print("Parsed arguments:\n" + cfg_str)
    for i_ep in tqdm.tqdm(range(cfg.train_eps)):
        taskID = cfg.TaskInitID
        tempbuffer ={}  #临时bufffer存放奖励
        tempbuffer_work1 = {}
        ep_return = 0
        w1_return = 0

        print(f'{i_ep+1}/{cfg.train_eps}')
        nenv.reset()

        for step in range(int(totalStep)):  #
            # if i_ep%49==0:
            #     nenv.render(i_ep,cfg.policy)
            if step % GenerateTaskSpeed == 0:
                for i in range(len(nenv.user_list)):
                    random_user = random.choice(nenv.user_list)  #随机选择用户
                    task = random_user.generate_task(taskID,mode='Train')
                    taskID += 1
                    nenv.add_task(task)


            if nenv.task_queue.qsize()>0:  #有任务的时候进行分配
                task_len = nenv.task_queue.qsize()

                for i in range(task_len):  #全部任务依次分配
                    # print(f'分配任务{i+1}/{task_len}')
                    state = nenv.get_state()
                    # print(f'全局状态：{state}')
                    if policy=='agent':
                        action = agent_main.sample_action(state)  # 选择动作
                    elif policy=='random':
                        _nodes = cfg.edge_num
                        action = random.randint(0, _nodes-1)
                    elif policy=='optimal_time_matching':

                        action = optimal_time_matching(state)

                    state_next,r,done,tid = nenv.step1(action)  #全局奖励
                    # if ep_step==9990 and i ==task_len-1:
                    #     print('本回合最后一次分配')
                    #     done = True
                    tempbuffer[tid]=[state,action,r,state_next,done]


            for group_i in range(len(nenv.work_groups)):  #工作组内进行任务分配
                if nenv.task_queue_edge_list[group_i].qsize()>0:
                    task1_len = nenv.task_queue_edge_list[group_i].qsize()
                    for i in range(task1_len):
                        gstate = nenv.get_group_state(group_i)
                        # print(f'组{group_i}状态',gstate)
                        if policy == 'agent':
                            action = agent_sub.sample_action(gstate)
                        elif policy == 'random':
                            _nodes = cfg.nodes_of_edge
                            action = random.randint(0, _nodes - 1)
                        elif policy == 'optimal_time_matching':

                            action = optimal_time_matching(gstate)

                        state_next, r, done, taskid = nenv.stepbyworker1(group_i,action)   #内部奖励

                        tempbuffer_work1[taskid] =[gstate,action,r,state_next,done]
                    for i in range(int(len(state_next[2:])/4)):
                        nenv.cl[group_i][i].append(state_next[2+4*i+2])
                        nenv.bl[group_i][i].append(state_next[2+4*i+3])# work  带宽负载



            for group_i in  range(len(nenv.work_groups)) : #任务处理
                for worker in nenv.work_groups[group_i]:
                    worker.process_computing_task()
                    task_done = worker.process_transmit_task()
                    if len(task_done)>0:
                        for done in task_done:
                            taskid,reward = done[0],done[1]
                            sasrd_w1 = tempbuffer_work1[taskid]
                            sasrd_w1[2] = reward
                            sasrd_main = tempbuffer[taskid]
                            sasrd_main[2] = reward
                            if policy == 'agent':
                                agent_sub.memory.push(tuple(sasrd_w1))
                                agent_main.memory.push(tuple(sasrd_main))  # 更新main_agent buffer
                            w1_return += reward
                            ep_return += reward

                            # print(f'写入缓冲池：{tuple(sasrd)}')
                if policy=='agent':
                    agent_sub.update()
            if policy=='agent':
                if step%1==0:  #控制main 更新速度
                    agent_main.update()
        print('任务分配结束，处理未完成任务')
        while 1:
            cqs = 0
            bqs = 0
            for w in nenv.edge_list:
                _, _, cq, bq = w.get_all_node_state()
                cqs += cq
                bqs += bq
            if cqs == 0 and bqs == 0:
                print('任务全部处理完成，回合结束')
                break
            else:
                for group_i in range(len(nenv.work_groups)):  # 任务处理
                    for worker in nenv.work_groups[group_i]:
                        worker.process_computing_task()
                        task_done = worker.process_transmit_task()
                        if len(task_done) > 0:
                            for done in task_done:
                                taskid, reward = done[0], done[1]

                                sasrd_w1 = tempbuffer_work1[taskid]
                                sasrd_w1[2] = reward
                                sasrd_main = tempbuffer[taskid]
                                sasrd_main[2] = reward
                                if policy == 'agent':
                                    agent_sub.memory.push(tuple(sasrd_w1))
                                    agent_main.memory.push(tuple(sasrd_main))  # 更新main_agent buffer
                                ep_return += reward

        if i_ep % 10 == 0:
            print('main buffer 大小：',len(agent_main.memory))

        task_num = taskID - cfg.TaskInitID
        av_task_speed = ep_return/task_num
        rewards.append(av_task_speed)



        now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        print(f"回合：{i_ep + 1}/{cfg.train_eps}，policy:{policy}  总奖励：{ep_return:.2f}; 任务数:{task_num},平均任务速度：{av_task_speed:.2f} mianEpislon：{agent_main.epsilon:.3f},subEpislon：{agent_sub.epsilon:.3f} Time:{now}")
        nenv.info()

    print('完成训练！')
    if policy=='agent':
        model_dir = 'E:\\项目\\rf_computing\\models\\'
        main_model_name = 'dqn_main_model_continue_task.pkl'
        sub_model_name = 'dqn_sub_model_continue_task.pkl'
        with open(model_dir+main_model_name, 'wb') as f1:
            pickle.dump(agent_main, f1)
        with open(model_dir+sub_model_name, 'wb') as f2:
            pickle.dump(agent_sub, f2)

        print(f'保存模型{main_model_name}、{sub_model_name}，路径为：{model_dir}')
    return rewards,w1_returns

def test(cfg, nenv,agent_main=None,agent_sub=None):
    totalStep = cfg.totalStep
    GenerateTaskSpeed = cfg.GenerateTaskSpeed
    policy = cfg.policy
    random.seed(cfg.random_seed)
    if policy=='agent':
        assert agent_main is not None
        assert agent_sub is not None
    eps = cfg.train_eps
    print(f'policy:{policy},i_eps:{eps}')

    rewards = []

    cfg_str = '\n'.join([f"{key}: {value}" for key, value in cfg.__dict__.items()])
    print("Parsed arguments:\n" + cfg_str)
    for i_ep in range(cfg.test_eps):
        taskID = cfg.TaskInitID
        tempbuffer = {}  # 临时bufffer存放奖励
        tempbuffer_work1 = {}
        ep_return = 0
        print(f'{i_ep + 1}/{cfg.test_eps}')
        nenv.reset()

        for step in range(int(totalStep)):  #
            # if i_ep%49==0:
            #     nenv.render(i_ep,cfg.policy)
            if step % GenerateTaskSpeed == 0:
                for i in range(len(nenv.user_list)):
                    random_user = random.choice(nenv.user_list)  # 随机选择用户
                    task = random_user.generate_task(taskID)
                    taskID += 1
                    nenv.add_task(task)

            if nenv.task_queue.qsize() > 0:  # 有任务的时候进行分配
                task_len = nenv.task_queue.qsize()

                for i in range(task_len):  # 全部任务依次分配

                    state = nenv.get_state()

                    if policy == 'agent':
                        action = agent_main.predict_action(state)  # 选择动作
                    elif policy == 'random':
                        _nodes = cfg.edge_num
                        action = random.randint(0, _nodes - 1)
                    elif policy == 'optimal_time_matching':

                        action = optimal_time_matching(state)

                    state_next, r, done, tid = nenv.step1(action)  # 全局奖励

                    tempbuffer[tid] = [state, action, r, state_next, done]

            for group_i in range(len(nenv.work_groups)):  # 工作组内进行任务分配
                if nenv.task_queue_edge_list[group_i].qsize() > 0:
                    task1_len = nenv.task_queue_edge_list[group_i].qsize()
                    for i in range(task1_len):
                        gstate = nenv.get_group_state(group_i)

                        if policy == 'agent':
                            action = agent_sub.predict_action(gstate)
                        elif policy == 'random':
                            _nodes = cfg.nodes_of_edge
                            action = random.randint(0, _nodes - 1)
                        elif policy == 'optimal_time_matching':

                            action = optimal_time_matching(gstate)

                        state_next, r, done, taskid = nenv.stepbyworker1(group_i, action)  # 内部奖励

                        tempbuffer_work1[taskid] = [gstate, action, r, state_next, done]
                    for i in range(int(len(state_next[2:]) / 4)):
                        nenv.cl[group_i][i].append(state_next[2 + 4 * i + 2])
                        nenv.bl[group_i][i].append(state_next[2 + 4 * i + 3])  # work  带宽负载

            for group_i in range(len(nenv.work_groups)):  # 任务处理
                for worker in nenv.work_groups[group_i]:
                    worker.process_computing_task()
                    task_done = worker.process_transmit_task()
                    if len(task_done) > 0:
                        for done in task_done:
                            taskid, reward = done[0], done[1]
                            sasrd_w1 = tempbuffer_work1[taskid]
                            sasrd_w1[2] = reward
                            sasrd_main = tempbuffer[taskid]
                            sasrd_main[2] = reward
                            ep_return += reward
        print('任务分配结束，处理未完成任务')
        while 1:
            cqs = 0
            bqs = 0
            for w in nenv.edge_list:
                _, _, cq, bq = w.get_all_node_state()
                cqs += cq
                bqs += bq
            if cqs == 0 and bqs == 0:
                print('任务全部处理完成，回合结束')
                break
            else:
                for group_i in range(len(nenv.work_groups)):  # 任务处理
                    for worker in nenv.work_groups[group_i]:
                        worker.process_computing_task()
                        task_done = worker.process_transmit_task()
                        if len(task_done) > 0:
                            for done in task_done:
                                taskid, reward = done[0], done[1]

                                sasrd_w1 = tempbuffer_work1[taskid]
                                sasrd_w1[2] = reward
                                sasrd_main = tempbuffer[taskid]
                                sasrd_main[2] = reward
                                ep_return += reward
        task_num = taskID - cfg.TaskInitID
        av_task_speed = ep_return / task_num
        rewards.append(av_task_speed)


        now = datetime.datetime.now().strftime('%Y%m%d %H:%M:%S')

        print(
            f"回合：{i_ep + 1}/{cfg.test_eps}，policy:{policy},总奖励：{ep_return:.2f}  任务数:{task_num},平均任务速度：{av_task_speed:.2f}  Time:{now}")
        nenv.info()

    print('完成测试！')

    return rewards




def smooth(data, weight=0.9):
    '''用于平滑曲线，类似于Tensorboard中的smooth曲线
    '''
    last = data[0]
    smoothed = []
    for point in data:
        smoothed_val = last * weight + (1 - weight) * point  # 计算平滑值
        smoothed.append(smoothed_val)
        last = smoothed_val
    return smoothed

def plot_rewards(cfg, tag='train',**kw):
    ''' 画图
    '''
    # sns.set()

    fig, ax = plt.subplots(1, 1, figsize=(10, 7))
    today = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    ax.set_title(f"{tag} curve of {cfg.policy} for {cfg.env_name} -T:{today}")
    ax.set_xlabel('epsiodes')
    for k,v in kw.items():
        ax.plot(v, label=k)
    # ax.plot(smooth(rewards), label='smoothed')
    # ax.plot(w1_returns, label='w1')

    ax.legend()
    plt.savefig(f'../images/{tag}.png')
    # plt.show()
    # plt.close()
    # time.sleep(1000)
def train_agent():
    g_speed = 5
    nenv, agent_main, agent_sub, cfg = env_agent_config(g_speed)
    cfg.GenerateTaskSpeed = g_speed
    dic = {}
    for p in ['agent']:
        cfg.policy = p

        rewards, w1_returns = train(cfg, nenv, agent_main, agent_sub)
        dic[p] = rewards

    print(dic)
    plot_rewards(cfg, tag=f'g_speed_{g_speed}_debug_state', **dic)
def test_agent(main_model_name = 'dqn_main_model.pkl',sub_model_name = 'dqn_sub_model.pkl'):
    g_speed = 5
    nenv, agent_main, agent_sub, cfg = env_agent_config(g_speed)
    model_dir = 'E:\\项目\\rf_computing\\models\\'
    main_model_name = ['dqn_main_model.pkl','dqn_main_model_continue_task.pkl']
    sub_model_name = ['dqn_sub_model.pkl','dqn_sub_model_continue_task.pkl']
    dic = {}
    for i,p in enumerate(['agent','agent']):
        with open(model_dir+main_model_name[i], 'rb') as f1:
            agent_main = pickle.load(f1)
        with open(model_dir+sub_model_name[i], 'rb') as f2:
            agent_sub = pickle.load(f2)
        cfg.policy = p
        reds = test(cfg, nenv, agent_main, agent_sub)
        dic[p+str(i)] = reds

    for p in ['random','optimal_time_matching']:
        cfg.policy = p
        reds = test(cfg,nenv,agent_main,agent_sub)
        dic[p] = reds
    print(dic)
    plot_rewards(cfg, tag=f'test_on_4_alo', **dic)

    
    

if __name__=='__main__':
    import os

    # 禁用 http 代理
    os.environ['http_proxy'] = ''
    os.environ['https_proxy'] = ''
    # train_agent()
    test_agent()


