import os

import numpy as np


class Dim2DTraining():

    def train(self, env, agent, rpm,
              max_episodes=1000,
              max_steps=200,
              max_test_steps=200,
              memory_warmup_size=2000,
              batch_size=32,
              target_update_freq=5,
              render=False,
              test_render=True,
              test_freq=200,
              load_model=True,
              save_model=True,
              frame_skipping=1,
              save_path='./model/dim_2D_model.ckpt'):
        # 预存经验
        while rpm.size() < memory_warmup_size:
            obs = env.reset()
            for i in range(max_steps):
                act = agent.sample(obs)
                next_obs, reward, done, _ = env.step(act)
                # obs, next_obs 归一化
                obs = obs / 255.0
                next_obs = next_obs / 255.0
                rpm.append(obs, act, reward, next_obs, done)
                obs = next_obs
                if done:
                    break
        print('预存经验结束')

            # 开始训练
        total_episode = 0
        if load_model:
            if os.path.exists(save_path):
                agent.restore(save_path)
                print('模型加载成功')
        while total_episode < max_episodes:
            obs = env.reset()
            obs = obs / 255.0
            total_step = 0
            episode_reward = 0
            for i in range(max_steps):
                act = agent.sample(obs)
                done = False
                for i in range(frame_skipping):
                    total_step += 1
                    if render:
                        env.render(mode="rgb_array")
                    next_obs, reward, done, _ = env.step(act)
                    # obs, next_obs 归一化
                    next_obs = next_obs / 255.0
                    rpm.append(obs, act, reward, next_obs, done)
                    episode_reward += reward
                    obs = next_obs
                    if total_step > memory_warmup_size and total_step % target_update_freq == 0:
                        batch_obs, batch_act, batch_reward, batch_next_obs, batch_terminal = rpm.sample_batch(
                            batch_size)
                        loss = agent.learn(batch_obs, batch_act, batch_reward, batch_next_obs, batch_terminal)
                    if done:
                        break
                if done:
                    break
            total_episode += 1
            print('episode {}, episode_reward {}, e_greed: {}'.format(total_episode, episode_reward, agent.e_greed))
            if save_model:
                agent.save(save_path)
            if total_episode % test_freq == 0:
                self.test(env, agent, max_steps=max_test_steps, render=test_render)

    def test(self, env, agent, max_steps=200, render=False, load_model=True,
             save_path='./model/dim_2D_model.ckpt'):
        obs = env.reset()
        obs = obs / 255.0
        total_step = 0
        episode_reward = 0
        if load_model:
            if os.path.exists(save_path):
                agent.restore(save_path)
                print('模型加载成功')
        for i in range(max_steps):
            if render:
                env.render(mode="rgb_array")
            total_step += 1
            act = agent.predict(obs)
            next_obs, reward, done, _ = env.step(act)
            next_obs = next_obs / 255.0
            episode_reward += reward
            obs = next_obs
            if done:
                break
        print('test_reward {}'.format(episode_reward, episode_reward))
