import os
from concurrent.futures import ThreadPoolExecutor

import numpy as np

from Examples.DQNExamples.HollowKnight2D.HK2DRPM import Experience


class Training2D():

    def train(self, env, agent, rpm,
              max_episodes=1000,
              max_steps=200,
              memory_warmup_size=64,
              batch_size=32,
              target_update_freq=5,
              load_model=True,
              save_model=True,
              path='./model/hk_act.ckpt'):
        # 开始训练
        total_episode = 0
        if load_model:
            if os.path.exists(path):
                agent.restore(path)
                print('模型加载成功')
        executor = ThreadPoolExecutor(max_workers=1)
        while total_episode < max_episodes:
            obs = env.reset()
            print(obs.shape)
            total_step = 0
            episode_reward = 0
            self.loss = []
            exe_list = []
            for i in range(max_steps):
                context = rpm.recent_obs()
                context.append(obs)
                context = np.stack(context, axis=0)
                action = agent.sample(context)

                next_obs, reward, done, _ = env.step(action)
                rpm.append(Experience(obs, action, reward, done))
                obs = next_obs
                episode_reward += reward
                total_step += 1
                not_done_list = [i.done for i in exe_list if not i.done()]

                if (rpm.size() > memory_warmup_size) and (total_step % target_update_freq == 0):
                    # s,a,r,s',done
                    (batch_all_obs, batch_action, batch_reward,
                     batch_done) = rpm.sample_batch(batch_size)

                    batch_obs = batch_all_obs[:, :4, :, :]
                    batch_next_obs = batch_all_obs[:, 1:, :, :]
                    if len(not_done_list) <= 8:
                        e = executor.submit(self.learn, agent, batch_obs, batch_action, batch_reward,
                                            batch_next_obs, batch_done)

                        exe_list.append(e)
                    if done:
                        break
                # print(f'\r########待学习队列长度: {len(not_done_list)}, 已学习队列长度: {len(exe_list) - len(not_done_list)}########',
                #       end='')
                if done:
                    break

            total_episode += 1
            print()
            print(
                f'Ep: {total_episode} Re: {episode_reward} Loss: {np.mean(self.loss)},lr: {agent.alg.lr}, e_g: {agent.e_greed}')

            # for _ in as_completed(exe_list):
            #     # 打印未完成的任务长度
            #     print(f'\r#####{len([i for i in exe_list if not i.done()])}#####', end='')
            # print('线程池执行完毕')
            if save_model:
                agent.save(path)
                # 备份
                agent.save(path + '.bak')

    def learn(self, agent, batch_obs, batch_action, batch_reward, batch_next_obs, batch_done):
        loss = agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done)
        self.loss.append(loss)

    def test(self, env, agent_move, agent_act, rpm_act, rpm_move,
             max_steps=200,
             load_model=True,
             act_path='./model/hk_act.ckpt',
             move_path='./model/hk_move.ckpt'):
        if load_model:
            if os.path.exists(act_path):
                agent_act.restore(act_path)
                print('模型加载成功')
            if os.path.exists(move_path):
                agent_move.restore(move_path)
                print('模型加载成功')
        obs = env.reset()
        print(obs.shape)
        total_step = 0
        episode_reward = 0
        for i in range(max_steps):
            context_act = rpm_act.recent_obs()
            context_act.append(obs)
            context_act = np.stack(context_act, axis=0)
            action_act = agent_act.sample(context_act)

            context_move = rpm_move.recent_obs()
            context_move.append(obs)
            context_move = np.stack(context_move, axis=0)
            action_move = agent_move.sample(context_move)

            next_obs, reward, done, _ = env.step(action_move, action_act)

            obs = next_obs
            episode_reward += reward
            total_step += 1

            if done:
                break
        for i in range(max_steps):
            total_step += 1
            act = agent_act.sample(obs)
            move = agent_move.sample(obs)
            next_obs, reward, done, _ = env.step(move, act)
            episode_reward += reward
            obs = next_obs
            if done:
                break
        print('test_reward {}'.format(episode_reward, episode_reward))
