import tensorflow as tf
import gym
import numpy as np
from keras import layers, models
import random
import collections

# Hyper Parameters for DQN
GAMMA = 0.9  # discount factor for target Q
INITIAL_EPSILON = 0.5  # starting value of epsilon
FINAL_EPSILON = 0.01  # final value of epsilon
MEMORY_SIZE = 20000  # replay memory的大小，越大越占用内存
MEMORY_WARMUP_SIZE = 200  # replay_memory 里需要预存一些经验数据，再从里面sample一个batch的经验让agent去learn
BATCH_SIZE = 32  # size of minibatch
REPLACE_TARGET_FREQ = 10  # frequency to update target Q network
LEARNING_RATE = 0.1  # 学习率

# ---------------------------------------------------------
# Hyper Parameters
ENV_NAME = "CartPole-v1"
MAXEPISODE = 3000  # Episode limitation
STEP = 300  # Step limitation in an episode
LEARN_FREQ = 5  # The number of experiment test every 100 episode
MAXT_TIME = 300  # 最大运行时间


class ReplayMemory:
    def __init__(self, max_size):
        # deque 保证队列满了之后左边会出去
        self.buffer = collections.deque(maxlen=max_size)

    def append(self, exp):
        self.buffer.append(exp)

    def sample(self, batch_size):
        mini_batch = random.sample(self.buffer, batch_size)
        obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = (
            [],
            [],
            [],
            [],
            [],
        )

        for experience in mini_batch:
            s, a, r, s_p, done = experience
            obs_batch.append(s)
            action_batch.append(a)
            reward_batch.append(r)
            next_obs_batch.append(s_p)
            done_batch.append(done)

        return (
            np.array(obs_batch).astype("float32"),
            np.array(action_batch).astype("int32"),
            np.array(reward_batch).astype("float32"),
            np.array(next_obs_batch).astype("float32"),
            np.array(done_batch).astype("float32"),
        )

    def __len__(self):
        return len(self.buffer)


class Model:
    def __init__(self, obs_n, act_dim):
        self.act_dim = act_dim
        self.obs_n = obs_n
        self._build_model()

    def _build_model(self):
        hid1_size = 32
        hid2_size = 32
        # ------------------ build evaluate_net ------------------
        eval_model = models.Sequential()
        eval_model.add(layers.Input(shape=(self.obs_n)))
        eval_model.add(layers.Dense(hid1_size, activation="relu", name="l1"))
        # eval_model.add(layers.Dense(hid1_size, activation="relu", name="l2"))
        eval_model.add(layers.Dense(self.act_dim, name="l3"))
        eval_model.summary()
        self.eval_model = eval_model
        # ------------------ build target_model ------------------
        target_model = models.Sequential()
        target_model.add(layers.Input(shape=(self.obs_n)))
        target_model.add(layers.Dense(hid2_size, activation="relu", name="l1"))
        # target_model.add(layers.Dense(hid2_size, activation="relu", name="l2"))
        target_model.add(layers.Dense(self.act_dim, name="l3"))
        target_model.summary()
        self.target_model = target_model

    def predict(self, s):
        return self.eval_model.predict(s)

    def save(self, path):
        self.eval_model.save(path)

    def loss(self, labels, pred_action_value):
        return self.eval_model.loss_func(labels, pred_action_value)


class DDQN:
    # DDQN Agent
    def __init__(self, model: Model, gamma=0.9, learnging_rate=0.01):
        self.model = model.eval_model
        self.target_model = model.target_model
        self.gamma = gamma
        self.lr = learnging_rate
        # --------------------------训练模型--------------------------- #
        self.model.optimizer = tf.keras.optimizers.Adam(learning_rate=learnging_rate)
        self.model.loss = tf.losses.MeanSquaredError()

        self.global_step = 0
        self.update_target_steps = 200  # 每隔200个training steps再把model的参数复制到target_model中

    def predict(self, obs):
        """使用self.model的value网络来获取 [Q(s,a1),Q(s,a2),...]"""
        return self.model.predict(obs)

    def loss(self, labels, pred_action_value):
        return self.model.loss(labels, pred_action_value)

    # def store_to_buffer(self, state, action, reward, next_state, done):
    #     one_hot_action = np.zeros(self.action_dim)
    #     one_hot_action[action] = 1
    #     self.replay_buffer.append((state, one_hot_action, reward, next_state, done))
    #     if len(self.replay_buffer) > REPLAY_SIZE:
    #         self.replay_buffer.popleft()

    def learn(self, batch_obs, batch_action, batch_reward, batch_next_obs, batch_done):
        if self.global_step % self.update_target_steps == 0:
            self.replace_target()
        q_target_values = []
        # 模型预测Q_batch
        current_Q_batch = self.model.predict(batch_next_obs)
        # 选择最大的动作，得到目标值
        max_action_next = np.argmax(current_Q_batch, axis=1)
        # 得到目标网络的预测值
        target_Q_batch = self.target_model.predict(batch_next_obs)

        # 更新Reward 部分
        for i in range(0, BATCH_SIZE):
            done = batch_done[i]
            if done:
                q_target_values.append(batch_reward[i])
            else:
                target_Q_value = target_Q_batch[i, max_action_next[i]]
                q_target_values.append(batch_reward[i] + GAMMA * target_Q_value)

        with tf.GradientTape() as tape:
            # 使用状态得到的 Q值
            q_values = self.model(batch_obs, training=True)
            enum_atcions = list(enumerate(batch_action))
            # 拿出params 中索引对应的元素组成新的矩阵
            q_values = tf.gather_nd(params=q_values, indices=enum_atcions)
            loss = self.loss(q_values, q_target_values)
        # 评估函数根据得到的loss进行梯度下降更新权重
        grads = tape.gradient(loss, self.model.trainable_variables)
        # 将得到的梯度下降更新到数据里面
        self.model.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
        self.global_step += 1
        print("梯度更新")

    def update_target_q_network(self, episode):
        # update target Q netowrk
        if episode % REPLACE_TARGET_FREQ == 0:
            self.replace_target()

    def replace_target(self):
        """预测模型权重更新到target模型权重"""
        self.target_model.get_layer(name="l1").set_weights(
            self.model.get_layer(name="l1").get_weights()
        )
        self.target_model.get_layer(name="l3").set_weights(
            self.model.get_layer(name="l3").get_weights()
        )


class Agent:
    def __init__(self, act_dim, algorithm: DDQN, e_greed=0.1, e_greed_decrement=0):
        self.act_dim = act_dim
        self.algorithm = algorithm
        self.e_greed = e_greed
        self.e_greed_decrement = e_greed_decrement

    def sample(self, obs):
        sample = np.random.rand()  # 产生0~1之间的小数
        if sample < self.e_greed:
            act = np.random.randint(self.act_dim)  # 探索：每个动作都有概率被选择
        else:
            act = self.predict(obs)  # 选择最优动作
        self.e_greed = max(
            FINAL_EPSILON, self.e_greed - self.e_greed_decrement
        )  # 随着训练逐步收敛，探索的程度慢慢降低
        return act

    def predict(self, obs):
        obs = tf.expand_dims(obs, axis=0)
        action = self.algorithm.predict(obs)
        return np.argmax(action)


def run_episode(env, algorithm: DDQN, agent: Agent, rpm: ReplayMemory, sample=True):
    step = 0
    total_reward = 0
    obs = env.reset()
    for step in range(MAXT_TIME):
        if len(obs) == 2:
            obs = obs[0]
        action = agent.sample(obs)
        next_obs, reward, done, _, _ = env.step(action)
        if sample == True:
            reward = -1 if done else 0.1
        rpm.append((obs, action, reward, next_obs, done))
        # 代码并用，如果不够warm_up_siz 不学习，只是增加 rpm
        if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
            (
                batch_obs,
                batch_action,
                batch_reward,
                batch_next_obs,
                batch_done,
            ) = rpm.sample(BATCH_SIZE)
            algorithm.learn(
                batch_obs, batch_action, batch_reward, batch_next_obs, batch_done
            )
        obs = next_obs
        total_reward += reward
        if done:
            break
    return total_reward


# 评估 agent, 跑 5 个episode，总reward求平均
def evaluate(env, agent: Agent, render=False):
    eval_reward = []
    for i in range(5):
        obs = env.reset()
        episode_reward = 0
        while True:
            if len(obs) == 2:
                obs = obs[0]
            action = agent.predict(obs)  # 预测动作，只选最优动作
            obs, reward, done, _, _ = env.step(action)
            episode_reward += reward
            if render:
                env.render()
            if done:
                break
        eval_reward.append(episode_reward)
    return np.mean(eval_reward)


def main():
    # initialize OpenAI Gym env and dqn agent
    env = gym.make(ENV_NAME)
    action_dim = env.action_space.n  # 2
    obs_shape = env.observation_space.shape  # (4,)

    rpm = ReplayMemory(MEMORY_SIZE)
    model = Model(obs_shape[0], action_dim)
    algorithm = DDQN(model, gamma=GAMMA, learnging_rate=LEARNING_RATE)
    agent = Agent(
        action_dim, algorithm, e_greed=INITIAL_EPSILON, e_greed_decrement=1e-6
    )

    while len(rpm) < MEMORY_WARMUP_SIZE:
        run_episode(env, algorithm, agent, rpm)

    episode = 0
    max_episode = 2000
    while episode < max_episode:  # 训练max_episode个回合，test部分不计算入episode数量
        # 训练
        for i in range(0, 50):
            run_episode(env, algorithm, agent, rpm, False)
            episode += 1
        # 测试
        eval_reward = evaluate(env, agent, render=True)
        print(
            "episode:{}   e_greed:{}   Test reward:{}".format(
                episode, agent.e_greed, eval_reward
            )
        )
    # 训练结束，保存模型
    save_path = "./dqn_model.h5"
    model.save(save_path)
    env.close()


if __name__ == "__main__":
    main()
