#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# -*- coding: utf-8 -*-

import os
import gym
import numpy as np
import matplotlib.pyplot as plt
import parl
from parl.utils import logger  # 日志打印工具
import gym_game2048
from scipy import io
from datetime import datetime

from model import Model_2048
from algorithm import DQN_2048
from agent import Agent_2048
from replay_memory import ReplayMemory

LEARN_FREQ = 5  # 训练频率，不需要每一个step都learn，攒一些新增经验后再learn，提高效率
MEMORY_SIZE = 20000  # replay memory的大小，越大越占用内存
MEMORY_WARMUP_SIZE = 200  # replay_memory 里需要预存一些经验数据，再从里面sample一个batch的经验让agent去learn
BATCH_SIZE = 32  # 每次给agent learn的数据数量，从replay memory随机里sample一批数据出来
LEARNING_RATE = 0.001  # 学习率
GAMMA = 0.995  # reward 的衰减因子，一般取 0.9 到 0.999 不等


# 训练一个episode
def run_episode(env, agent, rpm):
    total_reward = 0
    obs = env.reset()
    step = 0
    while True:
        step += 1
        action = agent.sample(np.transpose(obs, (2, 0, 1)))  # 采样动作，所有动作都有概率被尝试到
        next_obs, reward, done, _ = env.step(action)
        rpm.append((obs, action, reward, next_obs, done))

        # train model
        if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
            (batch_obs, batch_action, batch_reward, batch_next_obs,
             batch_done) = rpm.sample(BATCH_SIZE)
            train_loss = agent.learn(batch_obs, batch_action, batch_reward,
                                     batch_next_obs,
                                     batch_done)  # s,a,r,s',done

        total_reward += reward
        obs = next_obs
        if done:
            break
    return total_reward


# 评估 agent, 跑 5 个episode，总reward求平均
def evaluate(env, agent, render=False):
    eval_reward = []
    eval_step = []
    for i in range(10):
        obs = env.reset()
        episode_reward = 0
        total_step = 0
        while True:
            action = agent.predict(np.transpose(obs, (2, 0, 1)))  # 预测动作，只选最优动作
            obs, reward, done, _ = env.step(action)
            episode_reward += reward
            total_step += 1
            if render:
                env.render()
            if done:
                break
        eval_reward.append(episode_reward)
        eval_step.append(total_step)

    return np.mean(eval_reward), np.max(eval_reward), eval_step


def main():
    print('hello 2048!')
    env = gym.make("game2048-v0", board_size=4, seed=None, binary=True, extractor="cnn", penalty=0)
    act_dim = env.action_space.n
    obs_shape = list(env.observation_space.shape)
    rpm = ReplayMemory(MEMORY_SIZE)  # DQN的经验回放池

    # 根据parl框架构建agent
    model = Model_2048(act_dim=act_dim)
    algorithm = DQN_2048(model, act_dim=act_dim, gamma=GAMMA, lr=LEARNING_RATE)
    agent = Agent_2048(
        algorithm,
        obs_dim=obs_shape,
        act_dim=act_dim,
        e_greed=0.01,  # 有一定概率随机选取动作，探索
        e_greed_decrement=1e-6)  # 随着训练逐步收敛，探索的程度慢慢降低

    # 加载模型
    agent.restore('model_dir_20200628234812/steps_11800_7308.4_20000.ckpt')

    # # 先往经验池里存一些数据，避免最开始训练的时候样本丰富度不够
    while len(rpm) < MEMORY_WARMUP_SIZE:
        run_episode(env, agent, rpm)

    max_episode = 30000
    episode = 0
    best_score = 0
    trainReward = []
    testReward = []
    dt = datetime.now()
    saveDir = os.path.join(os.path.abspath('.'), 'model_dir_{}'.format(dt.strftime('%Y%m%d%H%M%S')))
    if os.path.exists(saveDir):
        os.rmdir(saveDir)
    os.mkdir(saveDir)
    while episode < max_episode:
        # train part
        for i in range(0, 200):
            total_reward = run_episode(env, agent, rpm)
            episode += 1
            if episode == 10:
                print('Running~')
            if i % 20 == 0:
                trainReward.append([episode, total_reward])
                logger.info('episode:{}    Test reward:{}'.format(episode, total_reward))

        # test part
        eval_reward, max_score, eval_step = evaluate(env, agent, render=True)
        logger.info('episode:{}    e_greed:{}   Test reward:{}   Step:{}'.format(
            episode, agent.e_greed, eval_reward, max_score, eval_step))
        testReward.append([episode, eval_reward, max_score, np.mean(eval_step)])
        if max_score > best_score:
            best_score = eval_reward
            save_path = '{}/steps_{}_{}_{}.ckpt'.format(saveDir, max_score, eval_reward, episode)
            agent.save(save_path)

    # 训练结束，保存模型
    save_path = '{}/0dqn_model.ckpt'.format(saveDir)
    agent.save(save_path)
    save_path = '{}/results.mat'.format(saveDir)
    io.savemat(save_path, {'test': testReward, 'train': trainReward}) #c = io.loadmat('a.mat')

    plt.plot([i[0] for i in trainReward], [i[1] for i in trainReward], label='train')
    plt.plot([i[0] for i in testReward], [i[1] for i in testReward], label='test')
    plt.plot([i[0] for i in testReward], [i[2] for i in testReward], label='best')
    plt.legend(loc='best')
    plt.savefig('{}/0results.png'.format(saveDir))
    plt.show()


if __name__ == '__main__':
    main()
