import os

import numpy as np
from matplotlib import pyplot as plt

from Examples.DQNExamples.HollowKnight2D.HK2DRPM import Experience


class Training():
    def __init__(self):
        self.rewards = []

    def train(self, env, agent, rpm,
              max_episodes=10000,
              max_steps=2000,
              memory_warmup_size=64,
              batch_size=32,
              test_freq=100,
              target_update_freq=5,
              load_model=True,
              save_model=True,
              path='./model/hk_move.ckpt'):
        # 开始训练
        total_episode = 0
        if load_model:
            if os.path.exists(path):
                agent.restore(path)
                print('模型加载成功')
        while total_episode < max_episodes:
            obs = env.reset()
            total_step = 0
            episode_reward = 0
            self.loss = []
            for i in range(max_steps):
                context = rpm.recent_obs()
                context.append(obs)
                context = np.stack(context, axis=0)
                action = agent.sample(context)
                next_obs, reward, done, key_point = env.step(action)
                rpm.append(Experience(obs, action, reward, key_point))
                obs = next_obs
                episode_reward += reward
                total_step += 1
                if i % 100 == 0:
                    print(f'action: {action}, reward: {reward}')
                    context=agent.deal_obs(context)
                    print(agent.alg.predict(context))
                if (rpm.size() > memory_warmup_size) and (total_step % target_update_freq == 0):
                    (batch_all_obs, batch_action, batch_reward, batch_done) = rpm.sample_batch(batch_size)
                    batch_obs = batch_all_obs[:, :4, :, :]
                    batch_next_obs = batch_all_obs[:, 1:, :, :]
                    loss_act = agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done)
                    self.loss.append(loss_act)
                if done:
                    break
            print(
                f'Ep: {total_episode} Re: {episode_reward} Loss: {np.mean(self.loss)},lr: {agent.alg.lr}, eg: {agent.e_greed}')
            if total_episode % test_freq == 0 or total_episode <= 1:
                self.test(env, agent, rpm, load_model=True, path=path, max_steps=max_steps)
            total_episode += 1
            if save_model:
                agent.save(path)
                # 备份
                agent.save(path + '.bak')

    def test(self, env, agent, rpm,
             max_steps=2000,
             load_model=True,
             path='./model/hk_act.ckpt'):
        if load_model:
            if os.path.exists(path):
                agent.restore(path)
                print('模型加载成功')
        obs = env.reset()
        print('reset done')
        total_step = 0
        episode_reward = 0
        for i in range(max_steps):
            context = rpm.recent_obs()
            context.append(obs)
            context = np.stack(context, axis=0)
            action = agent.predict(context, env.knight_mp)
            next_obs, reward, done, key_point = env.step(action)
            rpm.append(Experience(obs, action, reward, key_point))
            obs = next_obs
            episode_reward += reward
            total_step += 1
            if done:
                break
        print('test_reward {}'.format(episode_reward, episode_reward))
        # 画图
        self.rewards.append(episode_reward)
        Y = np.array(self.rewards)
        X = np.arange(len(self.rewards))
        plt.plot(X, Y)
        plt.show()
        plt.savefig('./model/reward.png')
        plt.close()
