#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import os

import numpy as np

from Examples.PPOExsamples.EasyGame.EasyGameEnv import EasyGameEnv
from Examples.PPOExsamples.EasyGame.EasyGameModel import EasyGameModel
from Examples.PPOExsamples.EasyGame.OOXXGAME import OOXXGame
from Examples.PPOExsamples.EasyGame.env_utils import ParallelEnv, LocalEnv
from Examples.PPOExsamples.EasyGame.storage import RolloutStorage
from PPOConfig import easy_game_config
from agent import PPOAgent
from ppo import PPO


# Runs policy until 'real done' and returns episode reward
# A fixed seed is used for the eval environment
def run_evaluate_episodes(agent_black, agent_white, eval_env, eval_episodes):
    eval_episode_rewards = []
    while len(eval_episode_rewards) < eval_episodes:
        obs = eval_env.reset()
        done = False
        step = 0
        while not done:
            step += 1
            action = agent_black.predict(obs)
            obs, reward_b, done, info = eval_env.step(action)
            eval_env.render()
            action = agent_white.predict(obs)
            obs, reward_a, done, info = eval_env.step(action)
            eval_env.render()
            eval_episode_rewards.append(reward_b)
            if step > 5:
                break
        print(f'test done at {step} steps')
    return np.sum(eval_episode_rewards)


def main():
    config = easy_game_config
    config['seed'] = args.seed
    config['xparl_addr'] = args.xparl_addr
    config['test_every_steps'] = args.test_every_steps
    config['train_total_steps'] = args.train_total_steps

    config['batch_size'] = int(config['env_num'] * config['step_nums'])
    config['num_updates'] = int(
        config['train_total_steps'] // config['batch_size'])

    print("------------------- PPO ---------------------")
    print('Env: {}, seed: {}'.format(config['env'], config['seed']))
    eg_env = OOXXGame()
    envs = ParallelEnv(eg_env)
    eval_env = LocalEnv(eg_env, test=True)

    obs_space = eval_env.obs_space
    act_space = eval_env.act_space

    model_black = EasyGameModel(obs_space, act_space)
    model_white = EasyGameModel(obs_space, act_space)

    ppo_black = PPO(
        model_black,
        clip_param=config['clip_param'],
        entropy_coef=config['entropy_coef'],
        value_loss_coef=config['value_loss_coef'],
        max_grad_norm=config['max_grad_norm'],
        eps=config['eps'],
        initial_lr=config['initial_lr'])
    ppo_white = PPO(
        model_white,
        clip_param=config['clip_param'],
        entropy_coef=config['entropy_coef'],
        value_loss_coef=config['value_loss_coef'],
        max_grad_norm=config['max_grad_norm'],
        eps=config['eps'],
        initial_lr=config['initial_lr'])

    agent_black = PPOAgent(
        ppo_black, config
    )

    agent_white = PPOAgent(ppo_white, config)
    if os.path.exists('./model_black.ckpt'):
        agent_black.restore('./model_black.ckpt')
    if os.path.exists('./model_white.ckpt'):
        agent_white.restore('./model_white.ckpt')
    rollout_black = RolloutStorage(config['step_nums'], config['env_num'], obs_space,
                                   act_space)
    rollout_white = RolloutStorage(config['step_nums'], config['env_num'], obs_space, act_space)

    obs = envs.reset()
    done = np.zeros(config['env_num'], dtype='float32')
    next_obs_b = obs

    test_flag = 0
    total_steps = 0

    for update in range(1, config['num_updates'] + 1):
        for step in range(config['step_nums']):
            total_steps += 1 * config['env_num']
            black_win = False
            white_win = False

            value_b, action_b, logprob_b, _ = agent_black.sample(obs)
            next_obs_b, reward_b, next_done_b, info_b = envs.step(action_b)
            if reward_b[0] == 1:
                black_win = True
            value_w, action_w, logprob_w, _ = agent_white.sample(next_obs_b)
            next_obs_w, reward_w, next_done_w, info_w = envs.step(action_w)
            if reward_w[0] == 1:
                white_win = True
            if black_win:
                reward_b[0] = 1
                reward_w[0] = -1
                next_done_w[0] = 1
            elif white_win:
                reward_b[0] = -1
                reward_w[0] = 1
                next_done_w[0] = 1

            rollout_black.append(obs, action_b, logprob_b, reward_b, done, value_b.flatten())
            rollout_white.append(next_obs_b, action_w, logprob_w, reward_w, done, value_w.flatten())
            obs, done = next_obs_w, next_done_w
            if black_win or white_win or next_done_w[0]:
                eval_env.render()
                obs = envs.reset()
                print(f'Episode finished after {step} steps, reward_b: {reward_b}, reward_w: {reward_w}')

        # Bootstrap value if not done
        value_b = agent_black.value(obs)
        rollout_black.compute_returns(value_b, done)
        value_w = agent_white.value(next_obs_b)
        rollout_white.compute_returns(value_w, done)

        # Optimizing the policy and value network
        agent_black.learn(rollout_black)
        agent_white.learn(rollout_white)
        # print(f'update {update} done')
        eval_env.reset()
        agent_black.save('./model_black.ckpt')
        agent_white.save('./model_white.ckpt')

        if (total_steps + 1) // config['test_every_steps'] >= test_flag:
            while (total_steps + 1) // config['test_every_steps'] >= test_flag:
                test_flag += 1

            print('开始测试')
            avg_reward = run_evaluate_episodes(agent_black, agent_white, eval_env,
                                               config['eval_episode'])

            print('Evaluation over: {} episodes, Reward: {}'.format(
                config['eval_episode'], avg_reward))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--env",
        type=str,
        default="PongNoFrameskip-v4",
        help="OpenAI gym environment name")
    parser.add_argument(
        "--seed", type=int, default=None, help="seed of the experiment")
    parser.add_argument(
        "--env_num",
        type=int,
        default=None,
        help=
        "number of the environment. Note: if greater than 1, xparl is needed")
    parser.add_argument(
        '--continuous_action',
        action='store_true',
        default=False,
        help='action type of the environment')
    parser.add_argument(
        "--xparl_addr",
        type=str,
        default=None,
        help="xparl address for distributed training ")
    parser.add_argument(
        '--train_total_steps',
        type=int,
        default=10e6,
        help='number of total time steps to train (default: 10e6)')
    parser.add_argument(
        '--test_every_steps',
        type=int,
        default=int(5e3),
        help='the step interval between two consecutive evaluations')

    args = parser.parse_args()
    main()
