import time

import einops
from tqdm import tqdm
from agents.AgentMSAC import MSACAgent
from config.cfg import Config
from environment.ElectricScheduleEnv import build_env
import torch
import numpy as np

from utils.misc import env_info, print_net, split_action
from utils.rl_utils import ReplayBuffer, moving_average
import wandb
import matplotlib.pyplot as plt


plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文
plt.rcParams['axes.unicode_minus'] = False  # 显示负号


def train_multi_agent(args: Config):
    torch.manual_seed(1277)
    np.random.seed(1223)
    from environment.ElectricScheduleEnv import env
    replay_buffer = ReplayBuffer(capacity=args.buffer_size)
    start_time = time.time()
    return_list = []
    rewards_list = []
    # 打印环境信息
    env_info(env)
    msacAgent = MSACAgent(
        nagents=2,
        net_dims=[256, 256, 256],
        state_dim=env.state_dim,
        action_dim=env.action_dim,
        args=args
    )

    # 展示网络结构
    print_net(msacAgent)

    # 训练网络
    for i in range(10):
        with tqdm(total=int(args.episode/10), desc="Iteration %d" % i) as pbar:
            for i_episode in range(int(args.episode / 10)):
                episode_return = []
                state = env.reset()
                done = False
                while not done:
                    actions = msacAgent.take_actions(state)
                    actionA, actionB = split_action(
                        actions,
                        actions_dims_dict={
                            'fire': [env.A.FireStationsSize, env.B.FireStationsSize],
                            'storage': [env.A.StorageStationsSize, env.B.StorageStationsSize],
                            "total": env.action_dim
                        },
                        ts=env.CurrentTime
                    )
                    next_state, rewards, dones = env.step(actionA, actionB)
                    # print("rewards=", rewards)
                    actions = np.hstack(actions)
                    replay_buffer.add(
                        state, actions, rewards, next_state, dones)
                    state = next_state
                    done = dones[0]
                    episode_return.append(rewards)
                    if replay_buffer.size() > args.minial_size:
                        s_state, s_actions, s_rewards, s_nextstate, s_done = replay_buffer.sample(
                            args.batch_size)
                        msacAgent.update_network(
                            sample=(s_state, s_actions, s_rewards, s_nextstate, s_done))
                episode_return = einops.reduce(
                    tensor=np.array(episode_return),
                    pattern='b a -> () a',
                    reduction='sum',
                )[0]
                rewards_list.append(episode_return)
                return_list.append(episode_return[0] + episode_return[1])
                if args.use_wandb:
                    wandb.log(data={  # 记录回报值
                        "reward-A": episode_return[0],
                        "reward-B": episode_return[1],
                    })
                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix({'episode': '%d' % (args.episode / 10 * i + i_episode + 1),
                                      'return': '%.3f' % np.mean(return_list[-10:])})
                pbar.update(1)
    end_time = time.time()
    print("training model cost %.2f min" % ((end_time-start_time)/60.0))
    # save model
    if args.is_save_model:
        actors_net_dict = {}
        for a in msacAgent.agents:
            actors_net_dict.update(a.get_params())
        torch.save(actors_net_dict, args.model_path)
    return return_list


if __name__ == '__main__':
    args = Config()
    # 初始化wandb记录训练信息
    if args.use_wandb:
        wandb.init(
            project=args.project_name,
            entity=args.entity,
            # sync_tensorboard=True,
            config=vars(args),
            save_code=True,
            tags=['masac', 'electric power schedule']
        )
    returns = train_multi_agent(args)
    # 展示结果
    # smooth_returns = moving_average(returns, 9)
    plt.plot(range(args.episode), returns)
    plt.xlabel('训练次数')
    plt.ylabel('奖励结果')
    plt.title('MASAC on {}'.format("Day-ahead Schedule"))
    plt.show()
