#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import gym
import numpy as np
import parl
from parl.utils import logger

from cartpole_model import CartpoleModel
from cartpole_agent import CartpoleAgent
import os
import platform

from replay_memory import ReplayMemory, Exp
import cv2
import sys

if (platform.system() == 'Linux'):
    os.chdir("ParlDqn/")
sys.path.append('/home/aistudio/external-libraries')

LEARN_FREQ = 5  # update parameters every 1 steps
MEMORY_SIZE = 100000  # replay memory size
MEMORY_WARMUP_SIZE = 20000  # store some experiences in the replay memory in advance
BATCH_SIZE = 2048
LEARNING_RATE = 2e-5
GAMMA = 0.99  # discount factor of reward
obs_dim = [1, 100, 100]
load_model = True # 是否加载保存的模型

def convert_image(state):
    # cv2.imwrite(os.path.join(os.getcwd(), 'aaa.jpg'), observation)
    #img_resize = (obs_dim[1], obs_dim[2])
    state = cv2.cvtColor(cv2.resize(state, (100, 100)), cv2.COLOR_BGR2GRAY)
    # cv2.imshow("img_gray", state)
    # cv2.waitK
    state = state / 255.0
    return np.reshape(state, (1, 100, 100)).astype('float32')
    #return np.reshape(state, (1, 100, 100))

def run_episode(agent, env, replay):
    total_reward = 0
    total_loss = []
    state = env.reset()
    state = convert_image(state)

    step = 0
    while True:
        step += 1
        action = agent.sample(state)
        next_state, reward, isOver, _ = env.step(action)
        next_state = convert_image(next_state)
        replay.append(Exp(state, action, reward, next_state, isOver))

        # train model
        if (len(replay) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
            (batch_obs, batch_action, batch_reward, batch_next_obs,
             batch_isOver) = replay.sample(BATCH_SIZE)
            loss = agent.learn(batch_obs, batch_action, batch_reward,
                                     batch_next_obs, batch_isOver)
            total_loss.append(loss)

        total_reward += reward
        state = next_state
        if isOver:
            break
    return total_reward, np.mean(total_loss)


def evaluate(agent, env, render=False):
    # test part, run 5 episodes and average
    eval_reward = []
    for i in range(5):
        state = env.reset()
        state = convert_image(state)
        episode_reward = 0
        isOver = False
        while not isOver:
            action = agent.predict(state)
            if render:
                env.render()
            state, reward, isOver, _ = env.step(action)
            state = convert_image(state)
            episode_reward += reward
        eval_reward.append(episode_reward)
    return np.mean(eval_reward)


#保存模型参数
def save(agent):
    print('save model')
    learnDir = os.path.join(os.getcwd(),'learn')
    predictDir = os.path.join(os.getcwd(),'predict')
    agent.save_params(learnDir, predictDir)

def load(agent):
    learnDir = os.path.join(os.getcwd(), 'learn')
    predictDir = os.path.join(os.getcwd(), 'predict')
    print('load model', learnDir, predictDir)
    agent.load_params(learnDir, predictDir)

def main():
    env = gym.make('SpaceInvaders-v4')
    action_dim = env.action_space.n
    logger.auto_set_dir('d')

    #obs_shape = env.observation_space.shape

    rpm = ReplayMemory(MEMORY_SIZE)

    model = CartpoleModel(act_dim=action_dim)
    algorithm = parl.algorithms.DQN(
        model, act_dim=action_dim, gamma=GAMMA, lr=LEARNING_RATE)
    agent = CartpoleAgent(
        algorithm,
        obs_dim=obs_dim,
        act_dim=action_dim,
        e_greed=0.2,  # explore
        e_greed_decrement=1e-6
    )  # probability of exploring is decreasing during training

    if load_model:
        load(agent)
    else:
        save(agent)

    logger.info('warmup start')

    while len(rpm) < MEMORY_WARMUP_SIZE:  # warm up replay memory
        run_episode(agent, env, rpm)

    max_episode = 200000
    logger.info('warmup done {}'.format(MEMORY_WARMUP_SIZE))
    # start train
    episode = 0
    save_period = 0
    while episode < max_episode:
        train_r = []
        # train part
        for i in range(0, 50):
            total_reward, total_loss = run_episode(agent, env, rpm)
            episode += 1
            train_r.append(total_reward)
            print("episode")
            logger.info('episode:{}\ttotal_reward:{:.2f}\ttotal_loss:{:.2f}'.format(episode, total_reward, total_loss))

        eval_reward = evaluate(agent, env)
        logger.info('one round')
        logger.info('episode:{}\ttrain_reward:{:.2f}\ttest_reward:{:.2f}'.format(
            episode, np.mean(train_r), eval_reward))

        save_period += 1
        if save_period % 5 == 0: # n个周期存一次
            save(agent)


if __name__ == '__main__':
    main()
