import logging

import gym
import numpy as np
from torch.utils.tensorboard import SummaryWriter

from agent.AgentBase import AgentBase
from buffer.Buffer import Buffer


class Trainer:
    def __init__(self, env: gym.Env, agent: AgentBase, replay_buffer: Buffer, args):
        # 环境
        self.env = env
        # agent
        self.agent = agent
        # ReplayBuffer
        self.replayBuffer = replay_buffer
        # 参数
        self.args = args

    def train_off_policy(self):
        # 初始化日志记录
        logger, writer = self.init_logging(self.args)
        self.agent.set_writer(writer)
        # 当前训练episodes数
        num_episode = 0
        # 与环境交互的总步数
        total_step = 0

        max_return = None
        episode_reward_list = []
        while num_episode < self.args.num_episodes:
            state = self.env.reset()
            done = False
            episode_step = 0
            episode_reward = 0
            while not done:
                action = self.agent.take_action(state)
                next_state, reward, done, info = self.env.step(action)
                episode_reward += reward
                self.replayBuffer.add(state, action, reward, next_state, done)
                state = next_state
                episode_step += 1
                # 对于OffPolicy 当buffer数据的数量超过一定值后,才进行训练
                if self.replayBuffer.size() > self.args.minimal_size:
                    b_s, b_a, b_r, b_ns, b_d = self.replayBuffer.sample(self.args.batch_size)
                    transition_dict = {
                        'states': b_s,
                        'actions': b_a,
                        'next_states': b_ns,
                        'rewards': b_r,
                        'dones': b_d
                    }
                    self.agent.update(transition_dict, total_step)
            episode_reward_list.append(episode_reward)
            if num_episode % self.args.internal_log == 0:
                # 打印细节
                logger.info("episode:{}, episode_step:{}, episode_reward:{}, total_step:{}, average_reward:{}".
                            format(num_episode, episode_step, episode_reward, total_step,
                                   np.mean(episode_reward_list[-50:])))

            # 记录随episode变化的reward
            writer.add_scalar("episode/reward", episode_reward, num_episode)
            # 记录随episode变化的step
            writer.add_scalar("episode/step", episode_step, num_episode)

            # 统计总步数
            total_step += episode_step
            # 每隔evaluate_internal个episode评估一下学习到的模型
            if num_episode % self.args.evaluate_internal == 0:
                # TODO 评估模型
                pass

            # 将累计回报最大的模型保存下来
            if max_return is None or max_return < episode_reward:
                # 一开始会先保存第一个模型，因为一开始max_return为空
                # 之后若一个episode的累计回报大于之前的回报，则将该模型保存下来
                self.save()
                max_return = episode_reward
            num_episode += 1

        logger.info("max_return:{}".format(max_return))
        writer.close()
        self.agent.writer.close()

    def train_on_policy(self):
        # 初始化日志记录
        logger, writer = self.init_logging(self.args)
        self.agent.set_writer(writer)
        # 当前训练episodes数
        num_episode = 0
        # 与环境交互的总步数
        total_step = 0

        max_return = None
        episode_reward_list = []
        while num_episode < self.args.num_episodes:
            state = self.env.reset()
            done = False
            episode_step = 0
            episode_reward = 0
            transition_dict = {'states': [], 'actions': [], 'next_states': [], 'rewards': [], 'dones': []}
            while not done:
                action = self.agent.take_action(state)
                next_state, reward, done, info = self.env.step(action)
                transition_dict['states'].append(state)
                transition_dict['actions'].append(action)
                transition_dict['next_states'].append(next_state)
                transition_dict['rewards'].append(reward)
                transition_dict['dones'].append(done)
                state = next_state
                episode_reward += reward
                episode_step += 1

            episode_reward_list.append(episode_reward)
            self.agent.update(transition_dict)

            if num_episode % self.args.internal_log == 0:
                # 打印细节
                logger.info("episode:{}, episode_step:{}, episode_reward:{}, total_step:{}, average_reward:{}".
                            format(num_episode, episode_step, episode_reward, total_step,
                                   np.mean(episode_reward_list[-50:])))

            # 记录随episode变化的reward
            writer.add_scalar("episode/reward", episode_reward, num_episode)
            # 记录随episode变化的step
            writer.add_scalar("episode/step", episode_step, num_episode)

            # 统计总步数
            total_step += episode_step
            # 每隔evaluate_internal个episode评估一下学习到的模型
            if num_episode % self.args.evaluate_internal == 0:
                # TODO 评估模型
                pass

            # 将累计回报最大的模型保存下来
            if max_return is None or max_return < episode_reward:
                # 一开始会先保存第一个模型，因为一开始max_return为空
                # 之后若一个episode的累计回报大于之前的回报，则将该模型保存下来
                self.save()
                max_return = episode_reward
            num_episode += 1

        logger.info("max_return:{}".format(max_return))
        writer.close()
        self.agent.writer.close()



    def save(self):
        self.agent.save()
        # torch.save(self.agent, self.args.model_path)

    def load(self):
        self.agent.load(self.args)
        # torch.load(self.agent, self.args.model_path)

    def init_logging(self, args):
        '''
        设置Python日志记录器与tensorboard记录
        :param args: 包含日志相关参数
        :return:
        '''
        logging.basicConfig(level=logging.DEBUG)
        logger = logging.getLogger(args.agent_name)
        # 获取参数列表中的log_level,若没有，则设置一个默认值
        level = getattr(args, 'log_level', logging.INFO)
        logger.setLevel(level)
        # tensorboard记录器
        writer = SummaryWriter(args.record_path)
        return logger, writer
