#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import os
import shutil
import time
import traceback

import numpy as np
import paddle
from matplotlib import pyplot as plt

from Examples.PPOExsamples.HollowKnight2D.HKEnv2D import HKEnv2D
from Examples.PPOExsamples.HollowKnight2D.Model import PPOModel
from Examples.PPOExsamples.HollowKnight2D.PPOConfig import ppo_config
from Examples.PPOExsamples.HollowKnight2D.agent import PPOAgent
from Examples.PPOExsamples.HollowKnight2D.env_utils import ParallelEnv, LocalEnv
from Examples.PPOExsamples.HollowKnight2D.ppo import PPO
from Examples.PPOExsamples.HollowKnight2D.storage import RolloutStorage


def run_evaluate_episodes(agent, eval_env, eval_episodes):
    eval_episode_rewards = []
    while len(eval_episode_rewards) < eval_episodes:
        obs = eval_env.reset()
        done = False
        while not done:
            action = agent.predict(obs, eval_env.env.knight_mp)
            obs, reward, done, info = eval_env.step(action)
            eval_episode_rewards.append(reward)
    agent.save('./大蚊母.ckpt')
    agent.save('./大蚊母备份.ckpt')
    print("save model")
    return np.sum(eval_episode_rewards)


def main():
    config = ppo_config
    config['seed'] = args.seed
    config['xparl_addr'] = args.xparl_addr
    config['test_every_steps'] = args.test_every_steps
    config['train_total_steps'] = args.train_total_steps

    config['batch_size'] = int(config['env_num'] * config['step_nums'])
    config['num_updates'] = int(
        config['train_total_steps'] // config['batch_size'])

    print("------------------- PPO ---------------------")
    print('Env: {}, seed: {}'.format(config['env'], config['seed']))
    eg_env = HKEnv2D()
    envs = ParallelEnv(eg_env)
    eval_env = LocalEnv(eg_env, test=True)

    obs_space = eval_env.obs_space
    act_space = eval_env.act_space

    model = PPOModel(obs_space, act_space)

    ppo = PPO(
        model,
        clip_param=config['clip_param'],
        entropy_coef=config['entropy_coef'],
        value_loss_coef=config['value_loss_coef'],
        max_grad_norm=config['max_grad_norm'],
        eps=config['eps'],
        initial_lr=config['initial_lr'])

    agent = PPOAgent(ppo, config)
    if os.path.exists('./大蚊母.ckpt'):
        agent.restore('./大蚊母.ckpt')
        print('load model from ./大蚊母.ckpt')
    rollout = RolloutStorage(128, config['env_num'], obs_space,
                             act_space)

    obs = envs.reset()
    done = np.zeros(config['env_num'], dtype='float32')
    point = np.zeros(config['env_num'], dtype='float32')

    test_flag = 0
    total_steps = 0
    total_reward = 0
    turn_step = 0
    total_reward_list = []
    for update in range(1, config['num_updates'] + 1):
        for step in range(config['step_nums'] + 4):

            total_steps += 1 * config['env_num']
            value, action, logprob, _ = agent.sample(obs, eval_env.env.knight_mp)
            # if update < 10:
            #     action = np.random.randint(5, 8, size=(config['env_num'],))
            next_obs, reward, next_done, next_point = envs.step(action)
            if step < 4:
                continue
            total_reward += reward[0]
            if reward[0] > 0 or step == 100:
                print(f'actin:{action[0]},  reward:{reward[0]}')
                print(list(model.policy(paddle.to_tensor(obs, dtype='float32'))[0].numpy()))
            rollout.append(obs, action, logprob, reward, point, value.flatten())
            obs, done, point = next_obs, next_done, next_point
            turn_step += 1
            if done:
                print(f'Episode finished after {turn_step} steps, total reward: {total_reward}')
                total_reward = 0
                turn_step = 0
                if (total_steps + 1) // config['test_every_steps'] >= test_flag and total_steps > 2000:
                    while (total_steps + 1) // config['test_every_steps'] >= test_flag:
                        test_flag += 1
                    print('开始测试')
                    sum_reward = run_evaluate_episodes(agent, eval_env,
                                                       config['eval_episode'])
                    total_reward_list.append(sum_reward)
                    # matplotlib 保存图像
                    Y = np.array(total_reward_list)
                    X = np.arange(len(total_reward_list))
                    plt.plot(X, Y)
                    plt.savefig('reward.png')
                    # 打印图像
                    plt.show()
                    plt.close()

                    print('Evaluation over: {} episodes, Reward: {}'.format(
                        config['eval_episode'], sum_reward))
                    eval_env.reset()
                else:
                    eval_env.reset()
        # Bootstrap value if not done
        value = agent.value(obs)
        rollout.compute_returns(value, point, gamma=0.9)

        value_loss, action_loss, entropy_loss, lr = agent.learn(rollout)
        agent.save('./大蚊母.ckpt')
        print(f'update: {update}, value_loss: {value_loss}, action_loss: {action_loss}, lr: {lr}')


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--env",
        type=str,
        default="PongNoFrameskip-v4",
        help="OpenAI gym environment name")
    parser.add_argument(
        "--seed", type=int, default=None, help="seed of the experiment")
    parser.add_argument(
        "--env_num",
        type=int,
        default=None,
        help=
        "number of the environment. Note: if greater than 1, xparl is needed")
    parser.add_argument(
        '--continuous_action',
        action='store_true',
        default=False,
        help='action type of the environment')
    parser.add_argument(
        "--xparl_addr",
        type=str,
        default=None,
        help="xparl address for distributed training ")
    parser.add_argument(
        '--train_total_steps',
        type=int,
        default=10e6,
        help='number of total time steps to train (default: 10e6)')
    parser.add_argument(
        '--test_every_steps',
        type=int,
        default=int(5e3),
        help='the step interval between two consecutive evaluations')

    args = parser.parse_args()
    while True:
        try:
            main()
        except Exception as e:
            print(traceback.format_exc())
            # 删除 大蚊母.ckpt
            os.remove('./大蚊母.ckpt')
            # 将 大蚊母备份.ckpt 复制一份为 大蚊母.ckpt
            shutil.copyfile('./大蚊母备份.ckpt', './大蚊母.ckpt')
            time.sleep(10)
            print('run again')
