import datetime
import os

import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, Model, optimizers, activations

from notebook2.dqn import Agent


class QNetwork(Model):

    def __init__(self, num_classes):
        super(QNetwork, self).__init__()
        self.conv1 = layers.Conv2D(32, 7, 2, "same", activation=activations.relu)
        self.conv2 = layers.Conv2D(64, 3, 2, "same", activation=activations.relu)
        self.conv3 = layers.Conv2D(128, 3, 1, "same", activation=activations.relu)

        self.flatten = layers.Flatten()
        self.fc = layers.Dense(128)
        self.out = layers.Dense(units=num_classes)

    def call(self, inputs, training=None, **kwargs):
        x = self.conv1(inputs)
        x = self.conv2(x)
        x = self.conv3(x)

        x = self.flatten(x)
        x = self.fc(x)
        x = self.out(x)
        return x

    def predict(self, inputs, **kwargs):
        q_values = self(inputs)
        return tf.argmax(q_values, axis=-1)


color = np.array([210, 164, 74]).mean()


def preprocess_observation(obs):
    # Crop and resize the image
    img = obs[1:176:2, ::2]
    # Convert the image to greyscale
    img = img.mean(axis=2)
    # Improve image contrast
    img[img == color] = 0
    # Next we normalize the image from -1 to +1
    img = (img - 128) / 128 - 1
    return img.reshape(88, 80, 1)


def main():
    num_episodes = 1000  # 游戏训练的总episode数量
    num_exploration_episodes = 100  # 探索过程所占的episode数量
    max_len_episode = 1000  # 每个episode的最大回合数

    batch_size = 32
    learning_rate = 0.005  # 学习率

    gamma = 0.95
    # epsilon = 1.  # 探索起始时的探索率
    epsilon_decay = 0.01
    # epsilon_min = 0.01  # 探索终止时的探索率

    env = gym.make("MsPacman-v0").env
    optimizer = optimizers.Adam(learning_rate=learning_rate)
    agent = Agent(env, optimizer, q_network=QNetwork, input_shape=(88, 80, 1), gamma=gamma,
                  epsilon_decay=epsilon_decay)
    # print(agent.model.summary())

    loss_object = tf.keras.losses.MeanSquaredError()
    train_loss = tf.keras.metrics.Sum(name='train_loss', dtype=tf.float32)

    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    train_log_dir = 'logs/gradient_tape/' + current_time + '-openai_gym/train'
    summary_writer = tf.summary.create_file_writer(train_log_dir)

    for i_episodes in range(num_episodes):
        obs = env.reset()
        episode_reward = 0
        for t in range(max_len_episode):
            # env.render()
            obs = preprocess_observation(obs)
            # epsilon-greedy 探索策略，以 epsilon 的概率选择随机动作
            action = agent.epsilon_greedy_policy(obs)
            # 让环境执行动作，获得执行完动作的下一个状态，动作的奖励，游戏是否已结束以及额外信息
            next_obs, reward, done, _ = env.step(action)
            # 将(state, action, reward, next_state)的四元组（外加 done 标签表示是否结束）放入经验回放池
            agent.store(obs, action, reward, preprocess_observation(next_obs), done)
            # 更新当前 state
            obs = next_obs

            episode_reward += reward

            if done:  # 游戏结束则退出本轮循环，进行下一个 episode
                agent.update_epsilon(num_exploration_episodes, i_episodes)
                print("episode: {}, epsilon: {}, score: {}".format(i_episodes, agent.epsilon, t))
                break

            if len(agent.replay_buffer) >= batch_size:
                agent.train(batch_size, loss_object, train_loss)

        agent.align_target_model()

        with summary_writer.as_default():
            # print('Loss', train_loss.result(), "Episode Reward", episode_reward)
            tf.summary.scalar('Epsilon', agent.epsilon, step=i_episodes)
            tf.summary.scalar("Episode Reward", episode_reward, step=i_episodes)
            # if i_episodes >= 200:
            tf.summary.scalar('Loss', train_loss.result(), step=i_episodes)
            # tf.summary.scalar("Max Q Value", episode_ave_max_q)
        train_loss.reset_states()

        if i_episodes + 1 == 200 or (i_episodes + 1) % 500 == 0:
            agent.save_model(os.path.join('saved', current_time + "_" + str(i_episodes + 1)))


if __name__ == '__main__':
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            tf.config.experimental.set_virtual_device_configuration(
                gpus[0],
                [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])
        except RuntimeError as e:
            print(e)
    main()
