import os
import numpy as np
import paddle

from Examples.DQNExamples.HollowKnight2D.HK2DRPM import Experience


class HKTrainFing2D():

    def train(self, env, agent_move, agent_act, rpm_act, rpm_move,
              max_episodes=1000,
              max_steps=200,
              memory_warmup_sizeF=64,
              batch_size=32,
              target_update_freq=5,
              load_model=True,
              save_model=True,
              act_path='./model/hk_act.ckpt',
              move_path='./model/hk_move.ckpt'):
        # 开始训练
        total_episode = 0
        if load_model:
            if os.path.exists(act_path):
                agent_act.restore(act_path)
                print('模型加载成功')
            if os.path.exists(move_path):
                agent_move.restore(move_path)
                print('模型加载成功')
        # executor = ThreadPoolExecutor(max_workers=2)
        # 0: 无动作, 1:左攻击, 2:右攻击, 3: 上攻击, 4: 下攻击, 5: 下砸, 6: 吼, 7: 左波, 8: 右波, 9: 左冲, 10: 右冲
        actions = ['无动作', '左攻击', '右攻击', '上攻击', '下攻击', '下砸', '吼', '左波', '右波', '左冲', '右冲']
        # 0: 无动作, 1: 跳跃, 2: 往左走, 3: 往右走, 4: 往左跳, 5 往右跳, 6 双跳
        moves = ['无动作', '跳跃', '往左走', '往右走', '往左跳', '往右跳', '双跳']
        while total_episode < max_episodes:
            obs = env.reset()
            print(obs.shape)
            total_step = 0
            episode_reward = 0
            self.loss = []
            train_total = 1
            train_better = 0
            train_worse = 0
            for i in range(max_steps):
                context_act = rpm_act.recent_obs()
                context_act.append(obs)
                context_act = np.stack(context_act, axis=0)
                action_act = agent_act.sample(context_act, env.knight_mp, env.is_sprint)

                context_move = rpm_move.recent_obs()
                context_move.append(obs)
                context_move = np.stack(context_move, axis=0)
                action_move = agent_move.sample(context_move)

                next_obs, reward_m, reward_a, done, key_point = env.step(action_move, action_act)
                rpm_act.append(Experience(obs, action_act, reward_a, key_point))
                rpm_move.append(Experience(obs, action_move, reward_m, key_point))
                if reward_a != 0 or total_step == 0:
                    if isinstance(context_act, np.ndarray) and len(context_act.shape) == 3:
                        context_act = np.expand_dims(context_act, axis=0)
                    context_act = paddle.to_tensor(context_act, dtype='float32')
                    print(f'奖励: {reward_a}, 动作: {actions[action_act]}, Q: {agent_act.alg.predict(context_act)}')
                if reward_m != 0 or total_step == 0:
                    if isinstance(context_move, np.ndarray) and len(context_move.shape) == 3:
                        context_move = np.expand_dims(context_move, axis=0)
                    if isinstance(context_move, list) and np.array(context_move).ndim == 3:
                        context_move = np.expand_dims(context_move, axis=0)
                    context_move = paddle.to_tensor(context_move, dtype='float32')
                    print(f'奖励: {reward_m}, 动作: {moves[action_move]}, Q: {agent_move.alg.predict(context_move)}')
                obs = next_obs
                episode_reward += reward_m
                episode_reward += reward_a
                total_step += 1

                # if (rpm_act.size() > memory_warmup_size) and (total_step % target_update_freq == 0):
                # # s,a,r,s',done
                # (batch_all_obs_act, batch_action_act, batch_reward_act,
                #  batch_done_act) = rpm_act.sample_batch(batch_size)
                # (batch_all_obs_move, batch_action_move, batch_reward_move, batch_done_move) = rpm_move.sample_batch(
                #     batch_size)
                # batch_obs_act = batch_all_obs_act[:, :4, :, :]
                # batch_next_obs_act = batch_all_obs_act[:, 1:, :, :]
                # batch_obs_move = batch_all_obs_move[:, :4, :, :]
                # batch_next_obs_move = batch_all_obs_move[:, 1:, :, :]
                # better = False
                # worse = False
                # for i in batch_reward_act + batch_reward_move:
                #     if i > 1:
                #         better = True
                #     if i < -1:
                #         worse = True
                # # if len(not_done_list) <= 8 or (better and train_better / train_total < 0.1) or (worse and train_worse / train_total < 0.1):
                # if better:
                #     train_better += 1
                # if worse:
                #     train_worse += 1
                # train_total += 1
                # loss_act = agent_act.learn(batch_obs_act, batch_action_act, batch_reward_act, batch_next_obs_act, batch_done_act)
                # self.loss.append(loss_act)
                # loss_move = agent_move.learn(batch_obs_move, batch_action_move, batch_reward_move, batch_next_obs_move, batch_done_move)
                # self.loss.append(loss_move)

                # e_act = executor.submit(self.learn, agent_act, batch_obs_act, batch_action_act, batch_reward_act,
                #                         batch_next_obs_act, batch_done_act)
                # e_move = executor.submit(self.learn, agent_move, batch_obs_move, batch_action_move,
                #                          batch_reward_move,
                #                          batch_next_obs_move, batch_done_move)
                # exe_list.append(e_act)
                # exe_list.append(e_move)
                # if done:
                #     break
                # print(f'\r########待学习队列长度: {len(not_done_list)}, 已学习队列长度: {train_total}, 已学习正奖励个数: {train_better}, 已学习负奖励: {train_worse}########', end='')
                if done:
                    break
            while train_total < max(total_step // 5, 100):
                (batch_all_obs_act, batch_action_act, batch_reward_act,
                 batch_done_act) = rpm_act.sample_batch(batch_size)
                (batch_all_obs_move, batch_action_move, batch_reward_move, batch_done_move) = rpm_move.sample_batch(
                    batch_size)
                batch_obs_act = batch_all_obs_act[:, :4, :, :]
                batch_next_obs_act = batch_all_obs_act[:, 1:, :, :]
                batch_obs_move = batch_all_obs_move[:, :4, :, :]
                batch_next_obs_move = batch_all_obs_move[:, 1:, :, :]
                better = False
                worse = False
                for i in batch_reward_act:
                    if i > 0:
                        better = True
                    if i < -2:
                        worse = True
                for i in batch_reward_move:
                    if i < -2:
                        worse = True
                # if len(not_done_list) <= 8 or (better and train_better / train_total < 0.1) or (worse and train_worse / train_total < 0.1):
                # if not better and not worse:
                #     continue
                if better:
                    train_better += 1
                if worse:
                    train_worse += 1
                train_total += 1
                loss_act = agent_act.learn(batch_obs_act, batch_action_act, batch_reward_act, batch_next_obs_act, batch_done_act)
                self.loss.append(loss_act)
                loss_move = agent_move.learn(batch_obs_move, batch_action_move, batch_reward_move, batch_next_obs_move, batch_done_move)
                self.loss.append(loss_move)
                print(f'\r########已学习队列长度: {train_total}, 已学习正奖励个数: {train_better}, 已学习负奖励: {train_worse}########', end='')
            # while train_total < total_step // 5 + 100:
            #     (batch_all_obs_act, batch_action_act, batch_reward_act,
            #      batch_done_act) = rpm_act.sample_batch(batch_size)
            #     (batch_all_obs_move, batch_action_move, batch_reward_move, batch_done_move) = rpm_move.sample_batch(
            #         batch_size)
            #     batch_obs_act = batch_all_obs_act[:, :4, :, :]
            #     batch_next_obs_act = batch_all_obs_act[:, 1:, :, :]
            #     batch_obs_move = batch_all_obs_move[:, :4, :, :]
            #     batch_next_obs_move = batch_all_obs_move[:, 1:, :, :]
            #     better = False
            #     worse = False
            #     for i in batch_reward_act:
            #         if i > 0:
            #             better = True
            #         if i < -1:
            #             worse = True
            #     # if len(not_done_list) <= 8 or (better and train_better / train_total < 0.1) or (worse and train_worse / train_total < 0.1):
            #     if better:
            #         train_better += 1
            #     if worse:
            #         train_worse += 1
            #     train_total += 1
            #     loss_act = agent_act.learn(batch_obs_act, batch_action_act, batch_reward_act, batch_next_obs_act, batch_done_act)
            #     self.loss.append(loss_act)
            #     loss_move = agent_move.learn(batch_obs_move, batch_action_move, batch_reward_move, batch_next_obs_move, batch_done_move)
            #     self.loss.append(loss_move)
            #     print(f'\r########已学习队列长度: {train_total}, 已学习正奖励个数: {train_better}, 已学习负奖励: {train_worse}########', end='')
            total_episode += 1
            print()
            print(
                f'Ep: {total_episode} Re: {episode_reward} Loss: {np.mean(self.loss)},lr: {agent_act.alg.lr}, e_g: {agent_act.e_greed}')

            # for _ in as_completed(exe_list):
            #     # 打印未完成的任务长度
            #     print(f'\r#####{len([i for i in exe_list if not i.done()])}#####', end='')
            # print('线程池执行完毕')
            if save_model:
                agent_act.save(act_path)
                agent_move.save(move_path)
                # 备份
                agent_act.save(act_path + '.bak')
                agent_move.save(move_path + '.bak')

    def learn(self, agent, batch_obs, batch_action, batch_reward, batch_next_obs, batch_done):
        loss = agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done)
        self.loss.append(loss)

    def test(self, env, agent_move, agent_act, rpm_act, rpm_move,
             max_steps=200,
             load_model=True,
             act_path='./model/hk_act.ckpt',
             move_path='./model/hk_move.ckpt'):
        if load_model:
            if os.path.exists(act_path):
                agent_act.restore(act_path)
                print('模型加载成功')
            if os.path.exists(move_path):
                agent_move.restore(move_path)
                print('模型加载成功')
        obs = env.reset()
        print(obs.shape)
        total_step = 0
        episode_reward = 0
        for i in range(max_steps):
            context_act = rpm_act.recent_obs()
            context_act.append(obs)
            context_act = np.stack(context_act, axis=0)
            action_act = agent_act.sample(context_act)

            context_move = rpm_move.recent_obs()
            context_move.append(obs)
            context_move = np.stack(context_move, axis=0)
            action_move = agent_move.sample(context_move)

            next_obs, reward, done, _ = env.step(action_move, action_act)

            obs = next_obs
            episode_reward += reward
            total_step += 1

            if done:
                break
        for i in range(max_steps):
            total_step += 1
            act = agent_act.sample(obs)
            move = agent_move.sample(obs)
            next_obs, reward, done, _ = env.step(move, act)
            episode_reward += reward
            obs = next_obs
            if done:
                break
        print('test_reward {}'.format(episode_reward, episode_reward))
