#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# -*- coding: utf-8 -*-

import os
import gym
import numpy as np
import matplotlib.pyplot as plt
import parl
from parl.utils import logger  # 日志打印工具
import gym_game2048
from scipy import io
from datetime import datetime

from agent import Agent
from model import Model
from algorithm import PolicyGradient  # from parl.algorithms import PolicyGradient

from parl.utils import logger

LEARNING_RATE = 1e-4


# 训练一个episode
def run_episode(env, agent, greedy):
    obs_list, action_list, reward_list = [], [], []
    obs = env.reset()
    while True:
        obs_list.append(np.transpose(obs, (2, 0, 1)))
        action = agent.sample(np.transpose(obs, (2, 0, 1)), greedy)
        action_list.append(action)

        obs, reward, done, info = env.step(action)
        reward_list.append(reward)

        last_board = env.get_board()
        if last_board.max == 2048:
            break
        if done:
            break
    return obs_list, action_list, reward_list


# 评估 agent, 跑 5 个episode，总reward求平均
def evaluate(env, agent, render=False):
    eval_reward = []
    eval_step = []
    for i in range(5):
        obs = env.reset()
        episode_reward = 0
        total_step = 0
        while True:
            action = agent.predict(np.transpose(obs, (2, 0, 1)))
            obs, reward, isOver, _ = env.step(action)
            episode_reward += reward
            total_step += 1
            if render:
                env.render()
            if isOver:
                break
            last_board = env.get_board()
            if last_board.max == 2048:
                break
        eval_step.append(total_step)
        eval_reward.append(episode_reward)
    return np.mean(eval_reward), np.max(eval_reward), eval_step


def calc_reward_to_go(reward_list, gamma=1.0):
    for i in range(len(reward_list) - 2, -1, -1):
        # G_i = r_i + γ·G_i+1
        reward_list[i] += gamma * reward_list[i + 1]  # Gt
    return np.array(reward_list)


def main():
    env = gym.make("game2048-v0", board_size=4, seed=None, binary=True, extractor="cnn", penalty=0)
    act_dim = env.action_space.n
    obs_dim = list(env.observation_space.shape)
    # obs_dim = [16, 4, 4]

    # env = gym.make('CartPole-v0')
    # # env = env.unwrapped # Cancel the minimum score limit
    # obs_dim = env.observation_space.shape[0]
    # act_dim = env.action_space.n
    logger.info('obs_dim {}, act_dim {}'.format(obs_dim, act_dim))

    # 根据parl框架构建agent
    model = Model(act_dim=act_dim)
    alg = PolicyGradient(model, lr=LEARNING_RATE)
    agent = Agent(alg, obs_dim=obs_dim, act_dim=act_dim)

    # 加载模型
    # if os.path.exists('./model.ckpt'):
    #     agent.restore('./model.ckpt')
    #     run_episode(env, agent, train_or_test='test', render=True)
    #     exit()

    best_score = 0
    trainReward = []
    testReward = []
    dt = datetime.now()
    saveDir = os.path.join(os.path.abspath('.'), 'model_dir_{}'.format(dt.strftime('%Y%m%d%H%M%S')))
    if os.path.exists(saveDir):
        os.rmdir(saveDir)
    os.mkdir(saveDir)

    max_episode = 10000
    greedy = 0.0
    for i in range(max_episode):
        obs_list, action_list, reward_list = run_episode(env, agent, greedy)
        # greedy = max(0.01, greedy-1.5e-4)
        # greedy = max(0.01, greedy - 1.5e-4)
        # if i == 10:
        #     print('Running~')
        if i % (max_episode//1000) == 0:
            # logger.info("Episode {}, Reward Sum {}.".format(i, sum(reward_list)))
            trainReward.append([i, sum(reward_list)])

        batch_obs = np.array(obs_list)
        batch_action = np.array(action_list)
        batch_reward = calc_reward_to_go(reward_list, 0.99)

        agent.learn(batch_obs, batch_action, batch_reward)

        if i % (max_episode//100) == 0:
            total_reward, best_reward, step = evaluate(env, agent, render=True)
            logger.info('Episode {}, Test {}, Best {}, Step {}, Gr {}'.format(i, total_reward, best_reward, np.mean(step), greedy))
            testReward.append([i, total_reward, best_reward, np.mean(step)])
            if best_reward > best_score:
                # best_score = max_score
                best_score = total_reward
                save_path = '{}/steps_{}_{}_{}.ckpt'.format(saveDir, best_reward, total_reward, i)
                agent.save(save_path)

    # save the parameters to ./model.ckpt
    save_path = '{}/0model.ckpt'.format(saveDir)
    agent.save(save_path)
    save_path = '{}/0results.mat'.format(saveDir)
    io.savemat(save_path, {'test': testReward, 'train': trainReward})  # c = io.loadmat('a.mat')

    plt.plot([i[0] for i in trainReward], [i[1] for i in trainReward], label='train')
    plt.plot([i[0] for i in testReward], [i[1] for i in testReward], label='test')
    plt.plot([i[0] for i in testReward], [i[2] for i in testReward], label='best')
    plt.legend(loc='best')
    plt.savefig('{}/0results.png'.format(saveDir))
    plt.show()


if __name__ == '__main__':
    main()
