import time

from arguments import get_args
from network_playground import NetworkEnv
from algorithm.DQN import DQN,ReplayBuffer
import matplotlib.pyplot as plt
import seaborn as sns
#1.记录完成时间
#2.增加buff
#3.加入强化学习算法

def env_agent_config(args):

    # 网络环境初始化
    nenv = NetworkEnv(args)
    agent = DQN(args)


    n_states = nenv.observation_space.shape[0]
    n_actions = nenv.action_space.n
    print(f"状态空间维度：{n_states}，动作空间维度：{n_actions}")

    return nenv,agent
def train(args,nenv,agent):
    totalStep = args.totalStep
    GenerateTaskSpeed = args.GenerateTaskSpeed

    rewards = []
    args_str = '\n'.join([f"{key}: {value}" for key, value in vars(args).items()])
    print("Parsed arguments:\n" + args_str)
    for i_ep in range(args.train_eps):
        taskID = args.TaskInitID
        tempbuffer ={}  #临时bufffer存放奖励
        ep_return = 0
        ep_step=0
        print(f'{i_ep+1}/{args.train_eps}')
        nenv.reset()
        for step in range(int(totalStep)):  #训练一定步长，取得样本，没有终止，步长完成后没有得到奖励的tempbuffer抛弃
            # nenv.render()

            con_state = nenv.get_state() #连续状态，用于绘图
            # print(con_state)
            nenv.cl.append(con_state[4])  # work 0 计算负载
            nenv.bl.append(con_state[5])  # work 0 带宽负载
            # 周期性产生相关任务
            if ep_step % GenerateTaskSpeed == 0:
                # 由用户属性产生相关任务
                for index, user in enumerate(nenv.user_list):
                    task = user.generate_task(taskID)
                    taskID += 1
                    nenv.add_task(task)
            if nenv.task_queue.qsize()>0:  #有任务的时候进行分配
                task_len = nenv.task_queue.qsize()

                for i in range(task_len):  #全部任务依次分配
                    # print(f'分配任务{i+1}/{task_len}')
                    state = nenv.get_state()
                    action = agent.sample_action(state)  # 选择动作
                    state_next,r,done,tid = nenv.step(action)
                    # if ep_step==9990 and i ==task_len-1:
                    #     print('本回合最后一次分配')
                    #     done = True
                    tempbuffer[tid]=[state,action,r,state_next,done]


            for index, worker in enumerate(nenv.worker_list):
                worker.process_computing_task()
                task_done = worker.process_transmit_task()
                if len(task_done)>0:
                    for done in task_done:
                        taskid,reward = done[0],done[1]

                        sasrd = tempbuffer[taskid]
                        tempbuffer.pop(taskid)
                        sasrd[2] = reward
                        ep_return += reward
                        agent.memory.push(tuple(sasrd))
                        # print(f'写入缓冲池：{tuple(sasrd)}')
            ep_step+=1
            agent.update()

        # if (i_ep + 1) % 10 == 0:
        rewards.append(ep_return)
        print(f"回合：{i_ep + 1}/{args.train_eps}，奖励：{ep_return:.2f}，Epislon：{agent.epsilon:.3f}")


    print('完成训练！')
    return rewards
def test(cfg, env, agent):
    print("开始测试！")
    rewards = []  # 记录所有回合的奖励
    steps = []
    for i_ep in range(cfg.test_eps):
        ep_reward = 0  # 记录一回合内的奖励
        ep_step = 0
        state,_ = env.reset()  # 重置环境，返回初始状态
        for _ in range(cfg.ep_max_steps):
            ep_step+=1
            action = agent.predict_action(state)  # 选择动作
            next_state, reward, done, _,_ = env.step(action)  # 更新环境，返回transition
            state = next_state  # 更新下一个状态
            ep_reward += reward  # 累加奖励
            if done:
                break
        steps.append(ep_step)
        rewards.append(ep_reward)
        print(f"回合：{i_ep+1}/{cfg.test_eps}，奖励：{ep_reward:.2f}")
    print("完成测试")
    env.close()
    return {'rewards':rewards}




def smooth(data, weight=0.9):
    '''用于平滑曲线，类似于Tensorboard中的smooth曲线
    '''
    last = data[0]
    smoothed = []
    for point in data:
        smoothed_val = last * weight + (1 - weight) * point  # 计算平滑值
        smoothed.append(smoothed_val)
        last = smoothed_val
    return smoothed

def plot_rewards(rewards,cfg, tag='train'):
    ''' 画图
    '''
    # sns.set()
    plt.ioff()
    fig, ax = plt.subplots(1, 1, figsize=(10, 7))

    ax.set_title(f"{tag}ing curve on {cfg.device} of {cfg.algo_name} for {cfg.env_name}")
    ax.set_xlabel('epsiodes')
    ax.plot(rewards, label='rewards')
    ax.plot(smooth(rewards), label='smoothed')
    ax.legend()
    plt.show()
    # time.sleep(1000)

if __name__=='__main__':
    args = get_args()  # 从 arguments.py 获取配置参数
    nenv,agent = env_agent_config(args)
    rewards = train(args, nenv, agent)
    print(rewards)
    plot_rewards(rewards, args)

