'''
Author: Morphlng
Date: 2022-05-05 12:48:28
LastEditTime: 2022-05-06 15:57:31
LastEditors: Morphlng
Description:
FilePath: \flappy_bird\train.py
'''

import os
from typing import Any, Union
import flappy_bird_gym
import numpy as np
import parl
import time
from parl.utils import logger

from flappyBird_model import FlappyBirdModel
from flappyBird_agent import FlappyBirdAgent
from replay_memory import ReplayMemory

LEARN_FREQ = 5  # update parameters every 5 steps
MEMORY_SIZE = 20000  # replay memory size
MEMORY_WARMUP_SIZE = 200  # store some experiences in the replay memory in advance
BATCH_SIZE = 32
LEARNING_RATE = 1e-3
GAMMA = 0.99  # discount factor of reward


# train an episode
def train(agent: Union[FlappyBirdAgent, Any], env: Union[flappy_bird_gym.FlappyBirdEnvSimple, Any], rpm: Union[ReplayMemory, Any]):
    total_reward = 0
    step = 0
    obs = env.reset()

    while True:
        step += 1
        action = agent.sample(obs)
        next_obs, reward, done, info = env.step(action)

        rpm.append((obs, action, reward, next_obs, done))

        # train model
        if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
            (batch_obs, batch_action, batch_reward, batch_next_obs,
             batch_done) = rpm.sample(BATCH_SIZE)
            train_loss = agent.learn(batch_obs, batch_action, batch_reward,
                                     batch_next_obs,
                                     batch_done)
        total_reward += reward
        obs = next_obs
        if done:
            break

    return total_reward


# evaluate 5 episodes
def evaluate(agent: Union[FlappyBirdAgent, Any], env: Union[flappy_bird_gym.FlappyBirdEnvSimple, Any], eval_episodes=5, render=False):
    eval_reward = []

    for i in range(eval_episodes):
        obs = env.reset()
        episode_reward = 0
        while True:
            action = agent.predict(obs)
            obs, reward, isOver, _ = env.step(action)
            episode_reward += reward
            if render:
                env.render()
                time.sleep(1 / 30)
            if isOver:
                break

        eval_reward.append(episode_reward)

    return np.mean(eval_reward)


def main():
    env = flappy_bird_gym.make('FlappyBird-v0')
    # env = env.unwrapped # Cancel the minimum score limit
    obs_dim = env.observation_space.shape[0]
    act_dim = env.action_space.n
    logger.info('obs_dim {}, act_dim {}'.format(obs_dim, act_dim))

    # build up model
    rpm = ReplayMemory(MEMORY_SIZE)
    model = FlappyBirdModel(obs_dim=obs_dim, act_dim=act_dim)
    alg = parl.algorithms.DQN(model, gamma=GAMMA, lr=LEARNING_RATE)
    agent = FlappyBirdAgent(alg, obs_dim, act_dim,
                            e_greed=0.2, e_greed_decrement=1e-6)

    # load model and evaluate
    if os.path.exists('./model.ckpt'):
        agent.restore('./model.ckpt')
        evaluate(agent, env, render=True)
        exit()

    while len(rpm) < MEMORY_WARMUP_SIZE:  # warm up replay memory
        train(agent, env, rpm)

    best_test_reward = 100

    for i in range(10000):
        train_reward = train(agent, env, rpm)
        if i % 10 == 0:
            logger.info("Episode {}, Reward Sum {}.".format(
                i, train_reward))

        if (i + 1) % 100 == 0:
            test_reward = evaluate(agent, env, render=False)
            logger.info('Test reward: {}'.format(test_reward))
            if test_reward > best_test_reward:
                best_test_reward = test_reward
                agent.save('./model_{}_rwd.ckpt'.format(best_test_reward))

            if (i+1) % 2000 == 0:
                agent.save('./model_{}_turn.ckpt'.format(i+1))

            # learning rate adjustment
            if alg.lr >= 5e-4:
                alg.lr *= 0.995
            if alg.lr <= 5e-4 and alg.lr >= 1e-4:
                alg.lr *= 0.99
            logger.info('Current learning rate: {}'.format(alg.lr))

    # save the parameters to ./model.ckpt
    agent.save('./model_final.ckpt')


if __name__ == '__main__':
    main()
