#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import os
import time

import numpy as np
from matplotlib import pyplot as plt

from Examples.PPOExsamples.PingPong.PingPongConfig import pingpong_config
from Examples.PPOExsamples.PingPong.PingPongEnv import PingPong2DEnv
from Examples.PPOExsamples.EasyGame.agent import PPOAgent
from Examples.PPOExsamples.EasyGame.env_utils import ParallelEnv, LocalEnv
from Examples.PPOExsamples.EasyGame.ppo import PPO
from Examples.PPOExsamples.EasyGame.storage import RolloutStorage

# Runs policy until 'real done' and returns episode reward
# A fixed seed is used for the eval environment
from Examples.PPOExsamples.PingPong.GameModel import PingPongModel

os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'


def run_evaluate_episodes(agent, eval_env, eval_episodes):
    eval_episode_rewards = []
    while len(eval_episode_rewards) < eval_episodes:
        obs = eval_env.reset()
        done = False
        while not done:
            action = agent.predict(obs)
            eval_env.render()
            obs, reward, done, info = eval_env.step(action)
            eval_episode_rewards.append(reward)
    agent.save('./model.ckpt')
    return np.sum(eval_episode_rewards)


def main():
    config = pingpong_config
    config['seed'] = args.seed
    config['xparl_addr'] = args.xparl_addr
    config['test_every_steps'] = args.test_every_steps
    config['train_total_steps'] = args.train_total_steps

    config['batch_size'] = int(config['env_num'] * config['step_nums'])
    config['num_updates'] = int(
        config['train_total_steps'] // config['batch_size'])

    print("------------------- PPO ---------------------")
    print('Env: {}, seed: {}'.format(config['env'], config['seed']))
    eg_env = PingPong2DEnv()
    envs = ParallelEnv(eg_env)
    eval_env = LocalEnv(eg_env, test=True)

    obs_space = eval_env.obs_space
    act_space = eval_env.act_space

    model = PingPongModel(obs_space, act_space)

    ppo = PPO(
        model,
        clip_param=config['clip_param'],
        entropy_coef=config['entropy_coef'],
        value_loss_coef=config['value_loss_coef'],
        max_grad_norm=config['max_grad_norm'],
        eps=config['eps'],
        initial_lr=config['initial_lr'])

    agent = PPOAgent(ppo, config)
    if os.path.exists('./model.ckpt'):
        agent.restore('./model.ckpt')
    rollout = RolloutStorage(128, config['env_num'], obs_space,
                             act_space)

    obs = envs.reset()
    done = np.zeros(config['env_num'], dtype='float32')

    test_flag = 0
    total_steps = 0
    total_reward = 0
    total_reward_list = []
    for update in range(1, config['num_updates'] + 1):
        for step in range(config['step_nums'] + 4):
            total_steps += 1 * config['env_num']
            value, action, logprob, _ = agent.sample(obs)
            eg_env.render()
            next_obs, reward, next_done, info = envs.step(action)
            if step < 4:
                continue
            total_reward += int(reward[0])
            rollout.append(obs, action, logprob, reward, done, value.flatten())
            obs, done = next_obs, next_done
            if done:
                print(f'Episode finished after {step} steps, total reward: {total_reward}')
                total_reward_list.append(total_reward)
                # matplotlib 支撑图像
                Y = np.array(total_reward_list)
                X = np.arange(len(total_reward_list))
                plt.plot(X, Y)
                plt.savefig('reward.png')
                # 打印图像
                plt.show()
                plt.close()
                total_reward = 0
                eval_env.reset()
        # Bootstrap value if not done
        value = agent.value(obs)
        rollout.compute_returns(value, done)





        value_loss, action_loss, entropy_loss, lr = agent.learn(rollout)
        print(f'update: {update}, value_loss: {value_loss}, action_loss: {action_loss}, lr: {lr}')

        if (total_steps + 1) // config['test_every_steps'] >= test_flag:
            while (total_steps + 1) // config['test_every_steps'] >= test_flag:
                test_flag += 1
            print('开始测试')

            avg_reward = run_evaluate_episodes(agent, eval_env,
                                               config['eval_episode'])

            print('Evaluation over: {} episodes, Reward: {}'.format(
                config['eval_episode'], avg_reward))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--env",
        type=str,
        default="PongNoFrameskip-v4",
        help="OpenAI gym environment name")
    parser.add_argument(
        "--seed", type=int, default=None, help="seed of the experiment")
    parser.add_argument(
        "--env_num",
        type=int,
        default=None,
        help=
        "number of the environment. Note: if greater than 1, xparl is needed")
    parser.add_argument(
        '--continuous_action',
        action='store_true',
        default=False,
        help='action type of the environment')
    parser.add_argument(
        "--xparl_addr",
        type=str,
        default=None,
        help="xparl address for distributed training ")
    parser.add_argument(
        '--train_total_steps',
        type=int,
        default=10e6,
        help='number of total time steps to train (default: 10e6)')
    parser.add_argument(
        '--test_every_steps',
        type=int,
        default=int(5e3),
        help='the step interval between two consecutive evaluations')

    args = parser.parse_args()
    main()
