import numpy as np  # 导入NumPy库，用于处理数组和数值计算
import torch  # 导入PyTorch库，用于构建和训练深度学习模型
import torch.nn as nn  # 导入PyTorch的神经网络模块，用于构建网络结构
import torch.optim as optim  # 导入PyTorch的优化器模块，用于优化神经网络参数
import random  # 导入Python的随机模块，用于实现随机采样
from collections import deque  # 导入deque数据结构，用于存储经验回放池

# Hyperparameters
GAMMA = 0.99  # 折扣因子，控制奖励的时间衰减
LR = 0.001  # 学习率，用于控制优化器的步长
BATCH_SIZE = 64  # 每次训练的批量大小
MEMORY_CAPACITY = 10000  # 经验回放池的最大容量
TARGET_UPDATE = 10  # 目标网络更新的周期（每10个回合更新一次）

# Define the neural network
class QNetwork(nn.Module):  # 定义Q网络，用于逼近Q值函数
    def __init__(self, state_dim, action_dim):
        super(QNetwork, self).__init__()  # 初始化父类
        self.fc1 = nn.Linear(state_dim, 128)  # 第一层全连接层，输入维度为状态维度，输出128维
        self.fc2 = nn.Linear(128, 128)  # 第二层全连接层，输入和输出均为128维
        self.fc3 = nn.Linear(128, action_dim)  # 输出层，输出维度为动作维度
    
    def forward(self, x):  # 定义前向传播过程
        x = torch.relu(self.fc1(x))  # 第一层激活函数为ReLU
        x = torch.relu(self.fc2(x))  # 第二层激活函数为ReLU
        x = self.fc3(x)  # 输出层不加激活函数，直接输出Q值
        return x

# Replay buffer
class ReplayBuffer:  # 定义经验回放池，用于存储和采样经验数据
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)  # 使用deque实现固定长度的经验池
    
    def push(self, state, action, reward, next_state, done):  # 添加新的经验
        self.buffer.append((state, action, reward, next_state, done))
    
    def sample(self, batch_size):  # 随机采样一个批量的经验
        batch = random.sample(self.buffer, batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)  # 解压为单独的数组
        return (np.array(states), np.array(actions), np.array(rewards),
                np.array(next_states), np.array(dones))
    
    def __len__(self):  # 返回经验池的当前大小
        return len(self.buffer)

# Double DQN Agent
class DoubleDQNAgent:  # 定义Double DQN智能体
    def __init__(self, state_dim, action_dim):
        self.state_dim = state_dim  # 状态维度
        self.action_dim = action_dim  # 动作维度
        
        # Online and target networks
        self.online_net = QNetwork(state_dim, action_dim)  # 在线网络，用于实时决策
        self.target_net = QNetwork(state_dim, action_dim)  # 目标网络，用于稳定目标计算
        self.target_net.load_state_dict(self.online_net.state_dict())  # 初始化目标网络参数
        self.target_net.eval()  # 设置目标网络为评估模式
        
        self.optimizer = optim.Adam(self.online_net.parameters(), lr=LR)  # 使用Adam优化器
        self.memory = ReplayBuffer(MEMORY_CAPACITY)  # 创建经验回放池
        self.steps_done = 0  # 记录执行的步数
    
    def select_action(self, state, epsilon):  # 动作选择，使用ε-贪婪策略
        if random.random() < epsilon:  # 以概率epsilon选择随机动作（探索）
            return random.randint(0, self.action_dim - 1)
        else:  # 否则选择当前Q值最大的动作（利用）
            state = torch.FloatTensor(state).unsqueeze(0)  # 转换为张量并增加批量维度
            with torch.no_grad():  # 关闭梯度计算
                q_values = self.online_net(state)  # 通过在线网络计算Q值
            return q_values.argmax().item()  # 返回最大Q值对应的动作索引
    
    def store_transition(self, state, action, reward, next_state, done):  # 存储经验
        self.memory.push(state, action, reward, next_state, done)
    
    def update(self):  # 更新在线网络
        if len(self.memory) < BATCH_SIZE:  # 如果经验不足一个批量，则不更新
            return
        
        # Sample a batch of transitions
        states, actions, rewards, next_states, dones = self.memory.sample(BATCH_SIZE)  # 从经验池中采样
        states = torch.FloatTensor(states)
        actions = torch.LongTensor(actions).unsqueeze(1)  # 转换为张量并调整维度
        rewards = torch.FloatTensor(rewards).unsqueeze(1)
        next_states = torch.FloatTensor(next_states)
        dones = torch.FloatTensor(dones).unsqueeze(1)
        
        # Compute Q values
        q_values = self.online_net(states).gather(1, actions)  # 提取当前Q值
        
        # Compute target Q values using Double DQN
        with torch.no_grad():
            next_actions = self.online_net(next_states).argmax(dim=1, keepdim=True)  # 在线网络选择动作
            next_q_values = self.target_net(next_states).gather(1, next_actions)  # 目标网络评估动作
            target_q_values = rewards + (1 - dones) * GAMMA * next_q_values  # 计算目标Q值
        
        # Compute loss
        loss = nn.MSELoss()(q_values, target_q_values)  # 均方误差损失函数
        
        # Update online network
        self.optimizer.zero_grad()  # 清空梯度
        loss.backward()  # 反向传播计算梯度
        self.optimizer.step()  # 更新在线网络参数
    
    def update_target_network(self):  # 定期更新目标网络
        self.target_net.load_state_dict(self.online_net.state_dict())

# Environment simulation (example: CartPole-v1)
import gym  # 导入Gym库，用于创建和交互强化学习环境

env = gym.make('CartPole-v1')  # 创建CartPole环境
state_dim = env.observation_space.shape[0]  # 获取状态空间的维度
action_dim = env.action_space.n  # 获取动作空间的维度

agent = DoubleDQNAgent(state_dim, action_dim)  # 创建Double DQN智能体

# Training Loop
num_episodes = 500  # 总训练回合数
epsilon_start = 1.0  # ε-贪婪策略的初始探索率
epsilon_end = 0.01  # 最终的探索率
epsilon_decay = 500  # 探索率的衰减速度

for episode in range(num_episodes):  # 按回合循环训练
    state, _ = env.reset()  # 重置环境
    done = False  # 标记是否完成
    total_reward = 0  # 累计奖励初始化为0
    while not done:  # 每个时间步内
        # Epsilon-greedy action selection
        epsilon = epsilon_end + (epsilon_start - epsilon_end) * np.exp(-1. * agent.steps_done / epsilon_decay)  # 动态调整探索率
        action = agent.select_action(state, epsilon)  # 根据ε-贪婪策略选择动作
        
        # Step in the environment
        next_state, reward, done, _, _ = env.step(action)  # 执行动作并获取下一状态和奖励
        total_reward += reward  # 累加奖励
        
        # Store transition and update
        agent.store_transition(state, action, reward, next_state, done)  # 存储经验
        agent.update()  # 更新在线网络
        
        state = next_state  # 更新当前状态
        agent.steps_done += 1  # 增加步数计数
    
    # Update target network periodically
    if episode % TARGET_UPDATE == 0:
        agent.update_target_network()  # 定期更新目标网络
    
    print(f"Episode {episode}, Total Reward: {total_reward}")  # 打印当前回合的累计奖励

env.close()  # 关闭环境
