import datetime
import os

import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, layers, activations

from dqn.dqn import Agent


class QNetwork(Model):
    def __init__(self, num_classes):
        super(QNetwork, self).__init__()
        # self.dense1 = layers.Dense(units=24, activation=activations.relu, kernel_initializer='he_uniform')
        # self.dense2 = layers.Dense(units=16, activation=activations.relu, kernel_initializer='he_uniform')

        self.dense1 = layers.Dense(units=256, activation=activations.relu, kernel_initializer='he_uniform')
        self.dense2 = layers.Dense(units=128, activation=activations.relu, kernel_initializer='he_uniform')
        self.dense3 = layers.Dense(units=64, activation=activations.relu, kernel_initializer='he_uniform')
        self.dense4 = layers.Dense(units=16, activation=activations.relu, kernel_initializer='he_uniform')
        self.dense5 = layers.Dense(units=num_classes, activation=activations.linear, kernel_initializer='he_uniform')

    def call(self, inputs, **kwargs):
        x = self.dense1(inputs)
        x = self.dense2(x)
        x = self.dense3(x)
        x = self.dense4(x)
        x = self.dense5(x)
        return x

    def predict(self, inputs, **kwargs):
        q_values = self(inputs)
        return tf.argmax(q_values, axis=-1)


def main():
    num_episodes = 5000  # 游戏训练的总episode数量
    num_exploration_episodes = 100  # 探索过程所占的episode数量
    max_len_episode = 1000  # 每个episode的最大回合数
    early_stop_count = 0

    batch_size = 1000  # 批次大小
    learning_rate = 0.001  # 学习率

    gamma = 0.95  # 折扣因子
    # epsilon = 1.  # 探索起始时的探索率
    epsilon_decay = 0.01
    # epsilon_min = 0.01  # 探索终止时的探索率

    env = gym.make('CartPole-v1')  # 实例化一个游戏环境，参数为游戏名称
    env._max_episode_steps = max_len_episode

    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
    agent = Agent(env, optimizer, q_network=QNetwork, input_shape=env.observation_space.shape, gamma=gamma,
                  epsilon_decay=epsilon_decay)

    loss_object = tf.keras.losses.MeanSquaredError()
    train_loss = tf.keras.metrics.Sum(name='train_loss', dtype=tf.float32)

    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    train_log_dir = 'logs/gradient_tape/' + current_time + '-openai_gym/train'
    summary_writer = tf.summary.create_file_writer(train_log_dir)

    for i_episodes in range(1, num_episodes + 1):
        obs = env.reset()

        episode_reward = 0

        for t in range(1, max_len_episode + 1):
            env.render()
            # epsilon-greedy 探索策略，以 epsilon 的概率选择随机动作
            action = agent.epsilon_greedy_policy(obs)
            # 让环境执行动作，获得执行完动作的下一个状态，动作的奖励，游戏是否已结束以及额外信息
            next_obs, reward, done, _ = env.step(action)
            # 如果游戏Game Over，给予大的负奖励
            reward = -10. if done and t < 500 else reward
            # 将(state, action, reward, next_state)的四元组（外加 done 标签表示是否结束）放入经验回放池
            agent.store(obs, action, reward, next_obs, done)
            # 更新当前 state
            obs = next_obs

            episode_reward += reward

            if done:  # 游戏结束则退出本轮循环，进行下一个 episode
                agent.update_epsilon(num_exploration_episodes, i_episodes)
                early_stop_count = max(early_stop_count - 1 if t < 500 else early_stop_count + 1, 0)
                template = "episode: {}, epsilon: {}, score: {}, early stop count: {}."
                print(template.format(i_episodes, agent.epsilon, t, early_stop_count))
                break

            if len(agent.replay_buffer) >= batch_size:
                agent.train(batch_size, loss_object, train_loss)

        agent.align_target_model()

        with summary_writer.as_default():
            # print('Loss', train_loss.result(), "Episode Reward", episode_reward)
            tf.summary.scalar('Epsilon', agent.epsilon, step=i_episodes)
            tf.summary.scalar("Episode Reward", episode_reward, step=i_episodes)
            # if i_episodes >= 200:
            tf.summary.scalar('Loss', train_loss.result(), step=i_episodes)
            # tf.summary.scalar("Max Q Value", episode_ave_max_q)
        train_loss.reset_states()

        if i_episodes % 500 == 0:
            agent.save_model(os.path.join('saved', current_time + "_" + str(i_episodes)))


def evaluate():
    env = gym.make('CartPole-v1')
    env._max_episode_steps = 10000
    loaded_model = QNetwork(num_classes=env.action_space.n)
    tf.keras.models.load_model(os.path.join('saved', '20200630-203032_725'))

    done = False
    t = 1
    state = env.reset()
    while not done:
        env.render()
        state_tensor = tf.convert_to_tensor(np.expand_dims(state, axis=0))
        q_values = loaded_model(state_tensor)
        action = tf.argmax(q_values, axis=-1).numpy()[0]
        next_state, reward, done, info = env.step(action)
        state = next_state
        if done:  # 游戏结束则退出本轮循环，进行下一个 episode
            print("Episode finished after {} steps".format(t))
    env.close()


if __name__ == '__main__':
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            tf.config.experimental.set_virtual_device_configuration(
                gpus[0],
                [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])
        except RuntimeError as e:
            print(e)
    main()
    # evaluate()
