import numpy as np

from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam

import gym


class MemoryBuffer(object):
    def __init__(self):
        # 指定记忆库的大小
        self.mem_size = 1000000
        self.mem_cntr = 0
        # 指定输入向量的维度
        self.input_dims = 8
        # 指定动作的个数
        self.n_action = 4
        # 指定每次随机取出的数据样本个数
        self.batch_size = 64
        # 定义用于存储当前状态、动作、奖励、下一个状态、回合结束的存储空间
        self.state_memory = np.zeros((self.mem_size, self.input_dims))
        self.action_memory = np.zeros(self.mem_size, dtype=np.int8)
        self.reward_memory = np.zeros(self.mem_size)
        self.next_state_memory = np.zeros((self.mem_size, self.input_dims))
        self.done_memory = np.zeros(self.mem_size, dtype=np.bool)

    def store_memory(self, state, action, reward, next_state, done):
        index = self.mem_cntr % self.mem_size
        # 对当前状态、动作、奖励、下一个状态、回合结束进行存储
        self.state_memory[index] = state
        self.action_memory[index] = action
        self.reward_memory[index] = reward
        self.next_state_memory[index] = next_state
        self.done_memory[index] = 1 - int(done)
        self.mem_cntr += 1

    def sample(self, batch_size):
        max_mem = min(self.mem_cntr, self.mem_size)
        # 从记忆库中随机抽取批量的数据
        batch = np.random.choice(max_mem, batch_size)  # 随机抽取batch_size个索引（乱序）
        states = self.state_memory[batch]
        actions = self.action_memory[batch]
        rewards = self.reward_memory[batch]
        next_states = self.next_state_memory[batch]
        dones = self.done_memory[batch]
        return states, actions, rewards, next_states, dones


class Agent(object):
    def __init__(self):
        # 指定输入向量的维度
        self.input_dims = 8
        # 指定动作空间
        self.action_space = [0, 1, 2, 3]
        # 指定动作的个数
        self.n_actions = 4
        # 指定折扣率
        self.gamma = 0.99
        # 指定探索率
        self.epsilon = 1.0
        self.epsilon_decay = 0.996
        self.epsilon_min = 0.01
        # 指定批次尺寸
        self.batch_size = 64
        self.memory = MemoryBuffer()
        self.dqn = self.build_dqn()

    # 构建DQN模型
    def build_dqn(self):
        model = Sequential()
        model.add(Dense(units=256,
                        input_shape=(self.input_dims,),
                        activation='relu'))
        model.add(Dense(units=256,
                        activation='relu'))
        model.add(Dense(units=self.n_actions,
                        activation=None))
        model.compile(optimizer=Adam(lr=0.0005),
                      loss='mse',
                      metrics=None)
        return model

    # 存储当前状态、动作、奖励、下一个状态、回合结束信息
    def remember(self, state, action, reward, next_state, done):
        self.memory.store_memory(state, action, reward, next_state, done)

    # 根据当前状态使用贪婪策略选择一个动作
    def choose_action(self, state):
        state = state[np.newaxis, :]
        # 生成一个随机值
        rand = np.random.random()
        if rand < self.epsilon:
            # 随机选择一个动作
            action = np.random.choice(self.action_space)
        else:
            # 使用DQN模型预测每一个动作对应的Q值
            actions = self.dqn.predict(state)
            # 采取对应Q值最大的动作
            action = np.argmax(actions)
        return action

    # 训练DQN模型
    def learn(self):
        # 当存储空间中的数据个数大于批尺寸时才开始训练
        if self.memory.mem_cntr < self.batch_size:
            return
        # 取出批量的数据
        state, action, reward, next_state, done = self.memory.sample(self.batch_size)
        # 对当前状态每一个动作的Q值使用DQN模型进行预测
        q_eval = self.dqn.predict(state)
        # 对下一个状态每一个动作的Q值使用DQN模型进行预测
        q_next = self.dqn.predict(next_state)
        # 目标Q值
        q_target = q_eval.copy()
        batch_index = np.arange(self.batch_size, dtype=np.int8)
        # 对目标Q值进行计算
        q_target[batch_index, action] = reward + self.gamma * np.max(q_next, axis=1) * done
        # 对DQN模型进行训练
        self.dqn.fit(state, q_target, verbose=0)
        # 在训练过程中逐渐减小探索率
        if self.epsilon > self.epsilon_min:
            self.epsilon = self.epsilon * self.epsilon_decay
        else:
            self.epsilon = self.epsilon_min


def main():
    # 从模拟器中加载《月球登录》游戏的环境
    env = gym.make('LunarLander-v2')
    # 使用智能体与环境进行500个回合的交互与训练
    n_episodes = 500
    # 加载智能体
    agent = Agent()
    for i in range(n_episodes):
        # 当前回合是否结束
        done = False
        # 当前回合获得的累计奖励值
        total_reward = 0
        # 获取环境的初始状态
        state = env.reset()
        while not done:
            # 智能体根据状态选择一个动作
            action = agent.choose_action(state)
            # 环境根据智能体采取的行动做出反馈
            next_state, reward, done, _ = env.step(action)
            # 计算当前回合智能体获取到的累计奖励值
            total_reward += reward
            # 将智能体与环境的交互信息进行存储
            agent.remember(state, action, reward, next_state, done)
            state = next_state
            # 对智能体中的DQN模型进行训练
            agent.learn()
        print(f'Episode {i}/{n_episodes} ---> Total Reward: {total_reward}')


if __name__ == '__main__':
    main()
