import numpy as np  # 导入numpy库，用于数组和数学运算
import tensorflow as tf  # 导入tensorflow库，用于构建和训练神经网络
import os  # 导入os库，用于设置环境变量

from keras.models import Model  # 从keras导入Model类，用于定义模型
from keras.layers import Dense, Input, Add  # 从keras导入构建模型所需的层类型
from keras.optimizers import RMSprop  # 从keras导入RMSprop优化器
from keras.initializers import glorot_normal  # 从keras导入Glorot初始化器

class DQN:
    def __init__(self, state_size, n_actions, n_nodes,
                 state_action_memory_size, memory_size=500, replace_target_iter=200, batch_size=32, learning_rate=0.01,
                 gamma=0.9, epsilon=1, epsilon_min=0.01, epsilon_decay=0.995):
        # 初始化超参数
        self.state_size = state_size  # 状态空间的维度
        self.n_actions = n_actions  # 动作空间的大小
        self.n_nodes = n_nodes  # 智能体的数量
        self.state_action_memory_size = state_action_memory_size  # 状态-动作记忆体的大小
        self.memory_size = memory_size  # 经验回放缓冲区的大小
        self.replace_target_iter = replace_target_iter  # 目标网络参数更新的迭代间隔
        self.batch_size = batch_size  # 训练时的批次大小
        self.learning_rate = learning_rate  # 学习率
        self.gamma = gamma  # 奖励折扣因子
        self.epsilon = epsilon  # 探索率
        self.epsilon_min = epsilon_min  # 最小探索率
        self.epsilon_decay = epsilon_decay  # 探索率衰减率
        # 初始化状态-动作记忆体和经验回放缓冲区
        self.state_action_memory = np.zeros((self.state_action_memory_size, self.state_size + 1))
        self.memory = np.zeros((self.memory_size, self.state_size * 2 + 2))
        # 初始化临时参数
        self.learn_state_action_counter = 0
        self.learn_step_counter = 0
        self.memory_couter = 0

        # 构建DenseNet模型
        self.model = self.build_DenseNet_model()  # 当前模型用于评估Q值
        self.target_model = self.build_DenseNet_model()  # 目标网络用于稳定训练

    def build_DenseNet_model(self):
        # 定义输入层
        inputs = Input(shape=(self.state_size,))
        # 定义隐藏层
        h1 = Dense(64, activation="relu", kernel_initializer=glorot_normal(seed=247))(inputs)
        h2 = Dense(64, activation="relu", kernel_initializer=glorot_normal(seed=2407))(h1)
        h3 = Dense(64, activation="relu", kernel_initializer=glorot_normal(seed=2403))(h2)
        h4 = Dense(64, activation="relu", kernel_initializer=glorot_normal(seed=24457))(h3)
        # 使用Add层进行残差连接
        add1 = Add()([h4, h2])
        h5 = Dense(64, activation="relu", kernel_initializer=glorot_normal(seed=24657))(add1)
        h6 = Dense(64, activation="relu", kernel_initializer=glorot_normal(seed=27567))(h5)
        add2 = Add()([h6, add1])
        # 定义输出层
        outputs = Dense(self.n_actions, kernel_initializer=glorot_normal(seed=242147))(add2)
        # 创建模型实例
        model = Model(inputs=inputs, outputs=outputs)
        # 编译模型，设置损失函数和优化器
        model.compile(loss="mse", optimizer=RMSprop(lr=self.learning_rate))
        return model

    def choose_action(self, state):
        # 将状态转换为批次维度
        state = state[np.newaxis, :]
        # 衰减epsilon
        self.epsilon *= self.epsilon_decay
        # 确保epsilon不低于epsilon_min
        self.epsilon = max(self.epsilon_min, self.epsilon)
        # 使用epsilon-greedy策略选择动作
        if np.random.random() < self.epsilon:
            return np.random.randint(0, 2)
        # 使用当前模型预测Q值
        action_values = self.model.predict(state)
        # 返回预测Q值最大的动作
        return np.argmax(action_values)

    def store_transition(self, s, a, r, s_):
        # 如果memory_couter属性不存在，则初始化为0
        if not hasattr(self, 'memory_couter'):
            self.memory_couter = 0
        # 合并状态、动作、奖励和下一个状态
        transition = np.concatenate((s, [a, r], s_))
        # 计算存储位置
        index = self.memory_couter % self.memory_size
        # 存储转换
        self.memory[index, :] = transition
        # 增加记忆体计数器
        self.memory_couter += 1

    def replace_target_parameters(self):
        # 获取当前模型的权重
        weights = self.model.get_weights()
        # 设置目标网络的权重
        self.target_model.set_weights(weights)

    def learn(self):
        # 如果达到更新目标网络的迭代次数
        if self.learn_step_counter % self.replace_target_iter == 0:
            # 更新目标网络参数
            self.replace_target_parameters()
        # 增加学习步骤计数器
        self.learn_step_counter += 1

        # 从经验回放缓冲区中随机抽取一个批次的转换
        if self.memory_couter > self.memory_size:
            sample_index = np.random.choice(self.memory_size, size=self.batch_size)
        else:
            sample_index = np.random.choice(self.memory_couter, size=self.batch_size)

        # 根据批次中的索引抽取对应的状态-动作对
        sample_index1 = []
        sample_index2 = []
        for i in sample_index:
            # 如果索引接近缓冲区末尾
            if i >= self.memory_size - self.state_action_memory_size:
                # 如果记忆体计数器超过了缓冲区大小
                if self.memory_couter > self.memory_size:
                    # 随机选择一个起始索引
                    a11 = np.random.randint(0, self.memory_size - self.state_action_memory_size)
                    # 添加起始索引
                    sample_index1.append(a11)
                    # 添加结束索引
                    sample_index2.append(a11 + self.state_action_memory_size - 1)
                else:  # 如果记忆体计数器未超过缓冲区大小
                    # 随机选择一个起始索引
                    a12 = np.random.randint(0, self.memory_couter - self.state_action_memory_size)
                    # 添加起始索引
                    sample_index1.append(a12)
                    # 添加结束索引
                    sample_index2.append(a12 + self.state_action_memory_size - 1)
            else:  # 如果索引不在缓冲区末尾
                # 添加起始索引
                sample_index1.append(i)
                # 添加结束索引
                sample_index2.append(i + self.state_action_memory_size - 1)

        # 获取第一个批次的状态-动作对
        batch_memory1 = self.memory[sample_index1, :]
        # 获取第二个批次的状态-动作对
        batch_memory2 = self.memory[sample_index2, :]

        # 从批次记忆中提取状态、动作、奖励和下一个状态
        state = batch_memory1[:, :self.state_size]
        action = batch_memory1[:, self.state_size].astype(int)
        reward = batch_memory2[:, self.state_size + 1]
        next_state = batch_memory2[:, -self.state_size:]

        # 使用当前模型预测当前状态的Q值
        q_eval = self.model.predict(state)
        # 使用目标网络预测下一个状态的Q值
        q_next = self.target_model.predict(next_state)
        # 创建Q值的目标数组
        q_target = q_eval.copy()
        # 创建一个与批次大小相同的索引数组
        batch_index = np.arange(self.batch_size, dtype=np.int32)
        # 使用时间差分学习公式更新Q值的目标数组
        q_target[batch_index, action] = reward + self.gamma * np.max(q_next, axis=1)
        # 使用Q值的目标数组训练当前模型
        self.model.fit(state, q_target, self.batch_size, epochs=1, verbose=0)