import time
import configparser
from arguments import get_args
from network_playground import NetworkEnv
from algorithm.A2C import ActorCritic
import matplotlib.pyplot as plt
import torch
import numpy as np
from tqdm import tqdm
config = configparser.ConfigParser()
config.read('env_config.ini')
new_obs_dim = int(config.get('space', 'new_obs_dim'))
new_obs_dim = 22
new_action_dim = int(config.get('space', 'new_action_dim'))
new_action_dim= 5
"""
PPO 回合法计算优势
"""

class A2CConfig:
    def __init__(self):
        self.gamma = 0.99
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.n_states = new_obs_dim
        self.n_actions = new_action_dim
        self.actor_hidden_dim = 256
        self.critic_hidden_dim = 256
        self.actor_lr = 0.0003
        self.critic_lr = 0.0003


def env_agent_config(args,cfg):

    # 网络环境初始化
    nenv = NetworkEnv(args)
    actor_lr = 2e-4
    critic_lr = 5e-4
    hidden_dim = 256
    gamma = 0.98
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
        "cpu")

    torch.manual_seed(0)
    state_dim = nenv.observation_space.shape[0]
    action_dim = nenv.action_space.n
    # agent = PPO(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, lmbda,
    #             epochs, eps, gamma, device)
    agent = ActorCritic(state_dim, hidden_dim, action_dim, actor_lr, critic_lr,
                        gamma, device)

    n_states = nenv.observation_space.shape[0]
    n_actions = nenv.action_space.n
    print(f"状态空间维度：{n_states}，动作空间维度：{n_actions}")

    return nenv,agent
def train_onpolicy(args,nenv,agent):
    totalStep = args.totalStep
    GenerateTaskSpeed = args.GenerateTaskSpeed

    num_episodes = 200
    args_str = '\n'.join([f"{key}: {value}" for key, value in vars(args).items()])
    print("Parsed arguments:\n" + args_str)
    rewards_all = []

    with tqdm(total=int(num_episodes), desc='Iteration ' ) as pbar:
        for i_episode in range(int(num_episodes)):
            episode_return = 0

            transition_dict = {'states': [], 'actions': [], 'next_states': [], 'rewards': [], 'dones': []}
            nenv.reset()
            done_gen=0
            done_all = 0
            donetask = 0
            ep_step = 0
            taskID = args.TaskInitID
            tempbuffer={}
            while not done_all:
                con_state = nenv.get_state() #连续状态，用于绘图
                # print(con_state)
                nenv.cl.append(con_state[4])  # work 0 计算负载
                nenv.bl.append(con_state[5])  # work 0 带宽负载
                # 周期性产生相关任务
                if not done_gen: #停止信号后不再分配任务
                    if ep_step % GenerateTaskSpeed == 0:
                        # 由用户属性产生相关任务
                        for index, user in enumerate(nenv.user_list):
                            task = user.generate_task(taskID)

                            taskID += 1
                            nenv.add_task(task)
                if nenv.task_queue.qsize()>0:  #有任务的时候进行分配
                    task_len = nenv.task_queue.qsize()

                    for i in range(task_len):  #全部任务依次分配
                        # print(f'分配任务{i+1}/{task_len}')
                        state = nenv.get_state()

                        action = agent.take_action(state)  # 选择动作
                        next_state,reward,done,tid = nenv.step(action)
                        if done:
                            done_gen=1


                        tempbuffer[tid]=[state,action,next_state,reward,0] #存入临时buff
                    if done_gen:
                        tempbuffer[tid][4]=0 #0 无终止



                for index, worker in enumerate(nenv.worker_list):
                    worker.process_computing_task()
                    task_done = worker.process_transmit_task()
                    if len(task_done)>0:
                        for done in task_done:
                            taskid,reward = done[0],done[1]
                            tempbuffer[taskid][3] = reward #任务完成，更新奖励
                            donetask+=1


                if done_gen:
                    if taskID==donetask:
                        sorted_dict = dict(sorted(tempbuffer.items(), key=lambda x: x[0]))
                        for k,v in sorted_dict.items():

                            state = v[0]
                            action = v[1]
                            next_state = v[2]
                            reward = v[3]
                            done = v[4]

                            transition_dict['states'].append(state)
                            transition_dict['actions'].append(action)
                            transition_dict['next_states'].append(next_state)
                            transition_dict['rewards'].append(reward)
                            transition_dict['dones'].append(done)

                            episode_return += reward
                        done_all = 1 #结束本回合



                ep_step += 1
            print(f"回合：，奖励：{episode_return:.2f},缓存长度：{len(transition_dict['states'])}")
            pbar.update(1)
            agent.update(transition_dict)
            if (i_episode + 1) % 10 == 0:
                pbar.set_postfix({'episode': '%d' % (i_episode),
                                  'return': '%.3f' % np.mean(rewards_all[-10:])})

    # if (i_ep + 1) % 10 == 0:


            rewards_all.append(episode_return)
    print('完成训练！')
    return rewards_all
def test(cfg, env, agent):
    print("开始测试！")
    rewards = []  # 记录所有回合的奖励
    steps = []
    for i_ep in range(cfg.test_eps):
        ep_reward = 0  # 记录一回合内的奖励
        ep_step = 0
        state,_ = env.reset()  # 重置环境，返回初始状态
        for _ in range(cfg.ep_max_steps):
            ep_step+=1
            action = agent.predict_action(state)  # 选择动作
            next_state, reward, done, _,_ = env.step(action)  # 更新环境，返回transition
            state = next_state  # 更新下一个状态
            ep_reward += reward  # 累加奖励
            if done:
                break
        steps.append(ep_step)
        rewards.append(ep_reward)
        print(f"回合：{i_ep+1}/{cfg.test_eps}，奖励：{ep_reward:.2f}")
    print("完成测试")
    env.close()
    return {'rewards':rewards}




def smooth(data, weight=0.9):
    '''用于平滑曲线，类似于Tensorboard中的smooth曲线
    '''
    last = data[0]
    smoothed = []
    for point in data:
        smoothed_val = last * weight + (1 - weight) * point  # 计算平滑值
        smoothed.append(smoothed_val)
        last = smoothed_val
    return smoothed

def plot_rewards(rewards,cfg, tag='train'):
    ''' 画图
    '''
    # sns.set()
    plt.ioff()
    fig, ax = plt.subplots(1, 1, figsize=(10, 7))

    ax.set_title(f"{tag}ing curve on {cfg.device} of A2C for {cfg.env_name}")
    ax.set_xlabel('epsiodes')
    ax.plot(rewards, label='rewards')
    ax.plot(smooth(rewards), label='smoothed')
    ax.legend()
    plt.show()
    # time.sleep(1000)

if __name__=='__main__':
    args = get_args()  # 从 arguments.py 获取配置参数

    nenv,agent = env_agent_config(args,'')
    rewards = train_onpolicy(args, nenv, agent)
    print(rewards)
    plot_rewards(rewards, args)

