import random

import time
import configparser
from arguments import get_args
from network_playground import NetworkEnv
from algorithm.SAC_d import SAC
import matplotlib.pyplot as plt
import torch
from tqdm import tqdm
from utils import rl_utils
import numpy as np
config = configparser.ConfigParser()
config.read('env_config.ini')
new_obs_dim = int(config.get('space', 'new_obs_dim'))
new_action_dim = int(config.get('space', 'new_action_dim'))
"""
PPO 回合法计算优势
"""

class SAConfig:
    def __init__(self):
        self.gamma = 0.99
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.n_states = new_obs_dim
        self.n_actions = new_action_dim
        self.actor_hidden_dim = 256
        self.critic_hidden_dim = 256
        self.actor_lr = 0.0003
        self.critic_lr = 0.0003

def env_agent_config(args,ent):

    # 网络环境初始化
    nenv = NetworkEnv(args)
    actor_lr = 5e-4
    critic_lr = 1e-3
    alpha_lr = 1e-3

    hidden_dim = 256
    gamma = 0.99
    tau = 0.005  # 软更新参数


    target_entropy = ent #目标熵，连续时候的target entropy设置为了 -|dimA| 离散的则取np.log(dimA)  ?

    device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
        "cpu")


    state_dim = nenv.observation_space.shape[0]
    action_dim = nenv.action_space.n
    agent = SAC(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, alpha_lr,
                target_entropy, tau, gamma, device)


    n_states = nenv.observation_space.shape[0]
    n_actions = nenv.action_space.n
    print(f"状态空间维度：{n_states}，动作空间维度：{n_actions}")

    return nenv,agent
def train_offpolicy(nenv, agent, num_episodes, replay_buffer, minimal_size, batch_size):
    totalStep = 10000
    GenerateTaskSpeed = args.GenerateTaskSpeed

    return_list = []
    args_str = '\n'.join([f"{key}: {value}" for key, value in vars(args).items()])
    print("Parsed arguments:\n" + args_str)

    for tn in range(10):
        with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % tn) as pbar:
            for i_episode in range(int(num_episodes/10)):
                taskID = args.TaskInitID
                tempbuffer ={}  #临时bufffer存放奖励

                ep_return = 0


                nenv.reset()

                for step in range(int(totalStep)):  #训练一定步长，取得样本，没有终止，步长完成后没有得到奖励的tempbuffer抛弃
                    # nenv.render()

                    con_state = nenv.get_state() #连续状态，用于绘图
                    # print(con_state)
                    nenv.cl.append(con_state[4])  # work 0 计算负载
                    nenv.bl.append(con_state[5])  # work 0 带宽负载
                    # 周期性产生相关任务
                    if step % GenerateTaskSpeed == 0:
                        # 由用户属性产生相关任务
                        for index, user in enumerate(nenv.user_list):
                            task = user.generate_task(taskID)
                            taskID += 1
                            nenv.add_task(task)
                    if nenv.task_queue.qsize()>0:  #有任务的时候进行分配
                        task_len = nenv.task_queue.qsize()


                    #
                        for i in range(task_len):  #全部任务依次分配
                            # print(f'分配任务{i+1}/{task_len}')
                            state = nenv.get_state()

                            action = agent.take_action(state)
                            # action = random.randint(0,4)
                            #
                            #
                            next_state,reward,done,tid = nenv.step(action)

                            tempbuffer[tid]=[state, action, reward, next_state, done]

                    for index, worker in enumerate(nenv.worker_list):
                        worker.process_computing_task()
                        task_done = worker.process_transmit_task()
                        if len(task_done)>0:
                            for done in task_done:
                                taskid,reward = done[0],done[1]

                                tempbuffer[taskid][2] = reward
                                # tempbuffer.pop(taskid)
                                state, action, reward, next_state, done = tempbuffer[taskid][0],tempbuffer[taskid][1],tempbuffer[taskid][2],tempbuffer[taskid][3],tempbuffer[taskid][4]
                                replay_buffer.add(state, action, reward, next_state, done)
                                ep_return +=reward

                                # print(f'写入缓冲池：{tuple(sasrd)}')

                    if replay_buffer.size() > minimal_size:
                        b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                        transition_dict = {'states': b_s, 'actions': b_a, 'next_states': b_ns, 'rewards': b_r,
                                           'dones': b_d}

                        agent.update(transition_dict)


                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix({'episode': '%d' % (num_episodes / 10 * tn + i_episode + 1),
                                      'return': '%.3f' % np.mean(return_list[-10:])})
                pbar.update(1)

                return_list.append(ep_return)

                print(f"回合：{num_episodes / 10 * tn + i_episode + 1}/{num_episodes}，奖励：{ep_return:.2f},缓存长度：{replay_buffer.size()}")



    print('完成训练！')
    return return_list
def test(cfg, env, agent):
    print("开始测试！")
    rewards = []  # 记录所有回合的奖励
    steps = []
    for i_ep in range(cfg.test_eps):
        ep_reward = 0  # 记录一回合内的奖励
        ep_step = 0
        state,_ = env.reset()  # 重置环境，返回初始状态
        for _ in range(cfg.ep_max_steps):
            ep_step+=1
            action = agent.predict_action(state)  # 选择动作
            next_state, reward, done, _,_ = env.step(action)  # 更新环境，返回transition
            state = next_state  # 更新下一个状态
            ep_reward += reward  # 累加奖励
            if done:
                break
        steps.append(ep_step)
        rewards.append(ep_reward)
        print(f"回合：{i_ep+1}/{cfg.test_eps}，奖励：{ep_reward:.2f}")
    print("完成测试")
    env.close()
    return {'rewards':rewards}




def smooth(data, weight=0.9):
    '''用于平滑曲线，类似于Tensorboard中的smooth曲线
    '''
    last = data[0]
    smoothed = []
    for point in data:
        smoothed_val = last * weight + (1 - weight) * point  # 计算平滑值
        smoothed.append(smoothed_val)
        last = smoothed_val
    return smoothed

def plot_rewards(rewards,ent, tag='train'):
    ''' 画图
    '''
    # sns.set()
    plt.ioff()
    fig, ax = plt.subplots(1, 1, figsize=(10, 7))

    ax.set_title(f"{tag}ing curve on entropt={ent} of SAC ")
    ax.set_xlabel('epsiodes')
    ax.plot(rewards, label='rewards')
    ax.plot(smooth(rewards), label='smoothed')
    ax.legend()
    plt.show()
    # time.sleep(1000)

if __name__=='__main__':
    args = get_args()  # 从 arguments.py 获取配置参数
    buffer_size = 100000
    minimal_size = 100
    batch_size = 64
    num_episodes = 10
    replay_buffer = rl_utils.ReplayBuffer(buffer_size)

    nenv, agent = env_agent_config(args, 0.8)
    return_list = train_offpolicy(nenv, agent, num_episodes,
                                  replay_buffer, minimal_size,
                                  batch_size)

    plot_rewards(return_list, 0.8)

