import datetime
import os
import time
import numpy as np
import random
from arguments import get_args
from network_playground_v2 import NetworkEnv
from algorithm.DQN import DQN
import matplotlib.pyplot as plt
from config.DQNConfig import DQNConfig
from config.ENVConfig import ENVConfig
from config.TRAINConfig import TrainConfig
"""
两个个agent,一个main,子agent共享，分布训练，先训练子agent再训练main agent
"""

def env_agent_config():

    # 网络环境初始化
    cfg = ENVConfig(visenv='fenbu train')
    w1_num = cfg.w1_node_num
    nenv = NetworkEnv(cfg)
    dqncfg = DQNConfig(n_states=10,n_actions=2,hidden_dim=256,lr=0.001,buffer_size=int(5e5))  #4*2+2
    print(dqncfg.__dict__)
    dqn_cfgsub = DQNConfig(n_states=14,n_actions=3,hidden_dim=256,lr=0.002,buffer_size=int(5e5))#4*3+2
    agent_main = DQN(dqncfg)
    agent_sub = DQN(dqn_cfgsub)




    n_states = nenv.observation_space.shape[0]
    n_actions = nenv.action_space.n
    print(f"0级环境状态空间维度：{n_states}，动作空间维度：{n_actions}")
    print(nenv.info())

    return nenv,agent_main, agent_sub,cfg

def train(cfg,nenv,agent_main, agent_sub):
    totalStep =cfg.totalStep
    GenerateTaskSpeed = cfg.GenerateTaskSpeed
    GenerateTaskSpeed = 10 # 10->5

    rewards = []
    w1_returns = []
    cfg_str = '\n'.join([f"{key}: {value}" for key, value in cfg.__dict__.items()])
    print("Parsed arguments:\n" + cfg_str)
    train_main = False
    train_sub = True
    for i_ep in range(cfg.train_eps):
        taskID = cfg.TaskInitID
        tempbuffer ={}  #临时bufffer存放奖励
        tempbuffer_work1 = {}
        ep_return = 0
        w1_return = 0

        print(f'{i_ep+1}/{cfg.train_eps}')
        nenv.reset()

        for step in range(int(totalStep)):  #训练一定步长，取得样本，没有终止，步长完成后没有得到奖励的tempbuffer抛弃
            if i_ep%20==0:
                nenv.render(i_ep)
            # print(step)
            con_state = nenv.get_state() #连续状态，用于绘图
            # print(con_state)

            # 周期性产生相关任务

            if step % GenerateTaskSpeed == 0:
                # 由用户属性产生相关任务
                for index, user in enumerate(nenv.user_list):
                    task = user.generate_task(taskID)
                    taskID += 1
                    nenv.add_task(task)
                # for i in range(len(nenv.user_list)):
                #     random_user = random.choice(nenv.user_list)
                #     task = random_user.generate_task(taskID)
                #     taskID += 1
                #     nenv.add_task(task)


            if nenv.task_queue.qsize()>0:  #有任务的时候进行分配
                task_len = nenv.task_queue.qsize()
                if train_main:
                    for i in range(task_len):  #全部任务依次分配
                        # print(f'分配任务{i+1}/{task_len}')
                        state = nenv.get_state()
                        action = agent_main.sample_action(state)  # 选择动作
                        state_next,r,done,tid = nenv.step1(action)  #全局奖励
                        # if ep_step==9990 and i ==task_len-1:
                        #     print('本回合最后一次分配')
                        #     done = True
                        tempbuffer[tid]=[state,action,r,state_next,done]
                else:
                    action = random.choice([0, 1])
                    nenv.step1(action)  # 全局奖励

            for group_i in range(len(nenv.work1_group)):  #工作组内进行任务分配
                if nenv.task_queue_w1_list[group_i].qsize()>0:
                    task1_len = nenv.task_queue_w1_list[group_i].qsize()
                    for i in range(task1_len):
                        gstate = nenv.get_group_state(group_i)
                        action = agent_sub.sample_action(gstate)
                        state_next, r, done, taskid = nenv.stepbyworker1(group_i,action)   #内部奖励

                        tempbuffer_work1[taskid] =[gstate,action,r,state_next,done]
                    for i in range(int(len(state_next[2:])/4)):
                        nenv.cl[group_i][i].append(state_next[2+4*i+2])
                        nenv.bl[group_i][i].append(state_next[2+4*i+3])# work  带宽负载



            for group_i in  range(len(nenv.work1_group)) : #任务处理
                for worker in nenv.work1_group[group_i]:
                    worker.process_computing_task()
                    task_done = worker.process_transmit_task()
                    if len(task_done)>0:
                        for done in task_done:
                            taskid,reward = done[0],done[1]
                            if train_main==False:
                                sasrd_w1 = tempbuffer_work1[taskid]
                                sasrd_w1[2] = reward
                                agent_sub.memory.push(tuple(sasrd_w1))
                                w1_return += reward
                            else:
                                sasrd_main = tempbuffer[taskid]
                                sasrd_main[2] = reward
                                ep_return += reward
                                agent_main.memory.push(tuple(sasrd_main))  #更新main_agent buffer
                            # print(f'写入缓冲池：{tuple(sasrd)}')
            if train_main==False:
                agent_sub.update()
            else:
                agent_main.update()

            if agent_sub.epsilon<=0.1  and train_sub==True:  #控制main 更新速度
                train_main = True
                train_sub = False
                print('开始更新主agent')
                break



        if i_ep % 10 == 0:
            print('main buffer 大小：',len(agent_main.memory))
            print('sub buffer 大小：',len(agent_sub.memory))
        rewards.append(ep_return)
        w1_returns.append(w1_return)
        cl_str = np.array_str(np.array(nenv.cl))
        bl_str = np.array_str(np.array(nenv.bl))
# 将字符串保存到文件中
        now = datetime.datetime.now().strftime('%Y%m%d %H:%M:%S')
        with open('./log/log.txt', 'a') as f:
            f.write(now+f' {i_ep+1}_cl_bl|'+cl_str+'| '+bl_str+'\n')
        if train_main:
            print(f"回合：{i_ep + 1}/{cfg.train_eps}，奖励：{ep_return:.2f};{w1_return} mainEpislon：{agent_main.epsilon:.3f}")
        else:
            print(f"回合：{i_ep + 1}/{cfg.train_eps}，奖励：{ep_return:.2f};{w1_return} subEpislon：{agent_sub.epsilon:.3f}")


    print('完成训练！')
    return rewards,w1_returns
def test(cfg, env, agent):
    print("开始测试！")
    rewards = []  # 记录所有回合的奖励
    steps = []
    for i_ep in range(cfg.test_eps):
        ep_reward = 0  # 记录一回合内的奖励
        ep_step = 0
        state,_ = env.reset()  # 重置环境，返回初始状态
        for _ in range(cfg.ep_max_steps):
            ep_step+=1
            action = agent.predict_action(state)  # 选择动作
            next_state, reward, done, _,_ = env.step(action)  # 更新环境，返回transition
            state = next_state  # 更新下一个状态
            ep_reward += reward  # 累加奖励
            if done:
                break
        steps.append(ep_step)
        rewards.append(ep_reward)
        print(f"回合：{i_ep+1}/{cfg.test_eps}，奖励：{ep_reward:.2f}")
    print("完成测试")
    env.close()
    return {'rewards':rewards}




def smooth(data, weight=0.9):
    '''用于平滑曲线，类似于Tensorboard中的smooth曲线
    '''
    last = data[0]
    smoothed = []
    for point in data:
        smoothed_val = last * weight + (1 - weight) * point  # 计算平滑值
        smoothed.append(smoothed_val)
        last = smoothed_val
    return smoothed

def plot_rewards(rewards,w1_returns,cfg, tag='train'):
    ''' 画图
    '''
    # sns.set()
    plt.ioff()
    fig, ax = plt.subplots(1, 1, figsize=(10, 7))

    ax.set_title(f"{tag}ing curve on {cfg.device} of {cfg.algo_name} for {cfg.env_name}-  main-sub speed: 1:2,speed 5 ")
    ax.set_xlabel('epsiodes')
    ax.plot(rewards, label='rewards')
    ax.plot(smooth(rewards), label='smoothed')
    ax.plot(w1_returns, label='w1')

    ax.legend()
    plt.show()
    # time.sleep(1000)

if __name__=='__main__':
    import os

    # 禁用 http 代理
    os.environ['http_proxy'] = ''
    os.environ['https_proxy'] = ''

    nenv, agent_main, agent_sub,cfg = env_agent_config()


    rewards,w1_returns = train(cfg,nenv,agent_main, agent_sub)

    tcfg = TrainConfig()
    print(rewards)
    plot_rewards(rewards, w1_returns,tcfg)

