import torch  # 引入 PyTorch 库，用于构建和训练深度学习模型
import torch.nn as nn  # PyTorch 的神经网络模块
import torch.optim as optim  # PyTorch 的优化模块，用于更新模型参数
import numpy as np  # NumPy 库，用于高效的数值计算
import gym  # OpenAI Gym 库，用于创建和交互强化学习环境
import random  # Python 的随机模块，用于随机抽样
from collections import deque  # Python 的双端队列模块，用于构建经验回放缓冲区

# 超参数设置
GAMMA = 0.99  # 折扣因子，用于计算未来奖励
TAU = 0.005  # 软更新目标网络的参数
ALPHA = 0.2  # 熵正则化系数，鼓励探索
LR = 0.001  # 学习率，用于优化器
BATCH_SIZE = 256  # 每次训练的样本数量
MEMORY_CAPACITY = 100000  # 经验回放缓冲区的最大容量

# 策略网络（用于生成随机的策略动作）
class PolicyNetwork(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(PolicyNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, 256)  # 第一层全连接层，输入状态维度
        self.fc2 = nn.Linear(256, 256)  # 第二层全连接层
        self.mean = nn.Linear(256, action_dim)  # 输出动作均值
        self.log_std = nn.Linear(256, action_dim)  # 输出动作的对数标准差
        self.max_action = max_action  # 动作的最大值，用于缩放

    def forward(self, state):
        x = torch.relu(self.fc1(state))  # 激活第一层
        x = torch.relu(self.fc2(x))  # 激活第二层
        mean = self.mean(x)  # 计算动作均值
        log_std = self.log_std(x).clamp(-20, 2)  # 将对数标准差限制在合理范围内
        std = torch.exp(log_std)  # 通过对数标准差计算标准差
        return mean, std  # 返回均值和标准差

    def sample(self, state):
        mean, std = self.forward(state)  # 获取动作分布的均值和标准差
        normal = torch.distributions.Normal(mean, std)  # 正态分布
        x_t = normal.rsample()  # 使用重参数化技巧采样
        y_t = torch.tanh(x_t)  # 使用 Tanh 将动作限制在 [-1, 1]
        action = y_t * self.max_action  # 缩放动作到最大值范围
        log_prob = normal.log_prob(x_t)  # 计算动作的对数概率
        log_prob -= torch.log(1 - y_t.pow(2) + 1e-6)  # Tanh 的修正项
        log_prob = log_prob.sum(dim=-1, keepdim=True)  # 对每个维度求和
        return action, log_prob  # 返回动作和对数概率

# Q 网络（价值函数，用于评估状态-动作对的价值）
class QNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(QNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim + action_dim, 256)  # 输入包括状态和动作
        self.fc2 = nn.Linear(256, 256)  # 第二层全连接层
        self.fc3 = nn.Linear(256, 1)  # 输出单一 Q 值

    def forward(self, state, action):
        x = torch.cat([state, action], dim=-1)  # 将状态和动作连接起来作为输入
        x = torch.relu(self.fc1(x))  # 激活第一层
        x = torch.relu(self.fc2(x))  # 激活第二层
        x = self.fc3(x)  # 输出 Q 值
        return x  # 返回 Q 值

# 经验回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)  # 初始化一个具有固定最大容量的双端队列

    def push(self, state, action, reward, next_state, done):  # 存储经验
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):  # 随机采样
        batch = random.sample(self.buffer, batch_size)  # 随机选取 batch_size 个经验
        states, actions, rewards, next_states, dones = zip(*batch)  # 解压
        return (np.array(states), np.array(actions), np.array(rewards),
                np.array(next_states), np.array(dones))

    def __len__(self):  # 返回缓冲区当前大小
        return len(self.buffer)

# SAC 算法智能体
class SACAgent:
    def __init__(self, state_dim, action_dim, max_action):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 检查是否有 GPU

        # 初始化网络
        self.actor = PolicyNetwork(state_dim, action_dim, max_action).to(self.device)  # 策略网络
        self.q1 = QNetwork(state_dim, action_dim).to(self.device)  # 第一个 Q 网络
        self.q2 = QNetwork(state_dim, action_dim).to(self.device)  # 第二个 Q 网络

        # 初始化优化器
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=LR)  # 策略网络优化器
        self.q1_optimizer = optim.Adam(self.q1.parameters(), lr=LR)  # Q1 优化器
        self.q2_optimizer = optim.Adam(self.q2.parameters(), lr=LR)  # Q2 优化器

        # 初始化经验回放缓冲区
        self.replay_buffer = ReplayBuffer(MEMORY_CAPACITY)
        self.max_action = max_action  # 最大动作值

    def select_action(self, state):  # 根据策略选择动作
        state = torch.FloatTensor(state).to(self.device).unsqueeze(0)  # 转换状态为张量
        action, _ = self.actor.sample(state)  # 从策略中采样动作
        return action.cpu().detach().numpy()[0]  # 转换为 NumPy 格式返回

    def train(self):  # 训练智能体
        if len(self.replay_buffer) < BATCH_SIZE:  # 如果经验回放缓冲区不足，跳过训练
            return

        # 从回放缓冲区采样
        states, actions, rewards, next_states, dones = self.replay_buffer.sample(BATCH_SIZE)
        states = torch.FloatTensor(states).to(self.device)
        actions = torch.FloatTensor(actions).to(self.device)
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(self.device)
        next_states = torch.FloatTensor(next_states).to(self.device)
        dones = torch.FloatTensor(dones).unsqueeze(1).to(self.device)

        # 更新 Q 网络
        with torch.no_grad():
            next_actions, log_probs = self.actor.sample(next_states)  # 计算下一步的动作及其概率
            target_q1 = self.q1(next_states, next_actions)  # Q1 值
            target_q2 = self.q2(next_states, next_actions)  # Q2 值
            target_q = torch.min(target_q1, target_q2) - ALPHA * log_probs  # 使用最小值更新
            q_target = rewards + GAMMA * (1 - dones) * target_q  # 计算目标 Q 值

        q1_loss = ((self.q1(states, actions) - q_target) ** 2).mean()  # Q1 损失
        q2_loss = ((self.q2(states, actions) - q_target) ** 2).mean()  # Q2 损失

        self.q1_optimizer.zero_grad()  # 清空梯度
        q1_loss.backward()  # 反向传播
        self.q1_optimizer.step()  # 更新 Q1

        self.q2_optimizer.zero_grad()
        q2_loss.backward()
        self.q2_optimizer.step()

        # 更新策略网络
        new_actions, log_probs = self.actor.sample(states)
        q1_new = self.q1(states, new_actions)
        q2_new = self.q2(states, new_actions)
        q_new = torch.min(q1_new, q2_new)

        actor_loss = (ALPHA * log_probs - q_new).mean()  # 策略损失

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

    def update_replay_buffer(self, state, action, reward, next_state, done):
        self.replay_buffer.push(state, action, reward, next_state, done)

# 训练循环
env = gym.make("Pendulum-v1")  # 创建环境
state_dim = env.observation_space.shape[0]  # 状态空间维度
action_dim = env.action_space.shape[0]  # 动作空间维度
max_action = float(env.action_space.high[0])  # 最大动作值

agent = SACAgent(state_dim, action_dim, max_action)  # 初始化智能体

num_episodes = 500  # 训练的总回合数
for episode in range(num_episodes):
    state = env.reset()  # 重置环境
    episode_reward = 0  # 初始化总奖励
    done = False

    while not done:
        action = agent.select_action(state)  # 根据策略选择动作
        next_state, reward, done, _ = env.step(action)  # 执行动作
        agent.update_replay_buffer(state, action, reward, next_state, done)  # 更新经验
        agent.train()  # 训练智能体
        state = next_state  # 更新状态
        episode_reward += reward  # 累积奖励

    print(f"Episode {episode}, Reward: {episode_reward}")  # 打印回合奖励
