import os
from concurrent.futures import ThreadPoolExecutor, as_completed

import numpy as np

from Examples.DQNExamples.PingPong2D.PingPong2DRPM import Experience


class PingPongTraining():

    def train(self, env, agent, rpm,
              max_episodes=1000,
              max_steps=200,
              max_test_steps=200,
              memory_warmup_size=2000,
              batch_size=32,
              target_update_freq=5,
              render=False,
              test_render=True,
              test_freq=200,
              load_model=True,
              save_model=True,
              frame_skipping=1,
              save_path='./model/dim_one_model.ckpt'):
        executor = ThreadPoolExecutor(max_workers=1)

        # 开始训练
        total_episode = 0
        if load_model:
            if os.path.exists(save_path):
                agent.restore(save_path)
                print('模型加载成功')
        while total_episode < max_episodes:
            obs = env.reset()
            total_step = 0
            episode_reward = 0
            loss_lst = []
            t_list = []
            for i in range(max_steps):
                context = rpm.recent_obs()
                context.append(obs)
                context = np.stack(context, axis=0)

                action = agent.sample(context)
                next_obs, reward, done, _ = env.step(action)
                rpm.append(Experience(obs, action, reward, done))
                obs = next_obs
                episode_reward += reward
                for i in range(frame_skipping):
                    total_step += 1
                    if render:
                        env.render()
                    next_obs, reward, done, _ = env.step(action)
                    rpm.append(Experience(obs, action, reward, done))
                    episode_reward += reward
                    obs = next_obs
                    # if total_step > memory_warmup_size and total_step % target_update_freq == 0:
                    #     batch_obs, batch_act, batch_reward, batch_terminal = rpm.sample_batch(
                    #         batch_size)
                    #     loss = agent.learn(batch_obs, batch_act, batch_reward, batch_terminal)
                    if (rpm.size() > memory_warmup_size) and (total_step % target_update_freq == 0):
                        # s,a,r,s',done
                        (batch_all_obs, batch_action, batch_reward,
                         batch_done) = rpm.sample_batch(batch_size)
                        batch_obs = batch_all_obs[:, :4, :, :]
                        batch_next_obs = batch_all_obs[:, 1:, :, :]


                        # 放入进程池
                        not_done_list = [i for i in t_list if not i.done()]
                        if len(not_done_list) <= 4:
                            t = executor.submit(self.learn, agent, loss_lst, batch_obs, batch_action, batch_reward,
                                                batch_next_obs, batch_done)
                            t_list.append(t)
                    if done:
                        break
                if done:
                    break
            total_episode += 1
            print('episode {}, episode_reward: {},lost_mean: {}, e_greed: {}'.format(total_episode, episode_reward,
                                                                                     np.mean(loss_lst), agent.e_greed))
            for _ in as_completed(t_list):
                pass
            if save_model:
                agent.save(save_path)
            # if total_episode % test_freq == 0:
            #     self.test(env, agent, max_steps=max_test_steps, render=test_render)

    def learn(self, agent, loss_lst, batch_obs, batch_act, batch_reward, batch_next_obs, batch_terminal):
        train_loss = agent.learn(batch_obs, batch_act, batch_reward, batch_next_obs, batch_terminal)
        loss_lst.append(train_loss)

    def test(self, env, agent, max_steps=200, render=False, load_model=True,
             save_path='./model/dim_one_model.ckpt'):
        obs = env.reset()
        context = np.stack([obs] * 4, axis=0)
        total_step = 0
        episode_reward = 0
        if load_model:
            if os.path.exists(save_path):
                agent.restore(save_path)
                print('模型加载成功')
        for i in range(max_steps):
            if render:
                env.render()
            total_step += 1
            act = agent.predict(context)
            next_obs, reward, done, _ = env.step(act)
            episode_reward += reward
            # 将next_obs放入context中， 将最旧的obs丢弃
            context = np.stack([context[1], context[2], context[3], next_obs], axis=0)
            if done:
                break
        print('test_reward {}'.format(episode_reward, episode_reward))
