import gym  # 引入OpenAI Gym提供的环境
import numpy as np  # 引入NumPy库用于数值计算
import random  # 引入random模块用于随机数生成
from collections import deque, namedtuple  # 引入deque和namedtuple用于定义经验回放缓冲区

import torch  # 引入PyTorch深度学习库
import torch.nn as nn  # 引入PyTorch神经网络模块
import torch.optim as optim  # 引入PyTorch优化器模块
import torch.nn.functional as F  # 引入PyTorch函数模块

# 定义经验元组，用于存储经验
Experience = namedtuple('Experience', ('state', 'action', 'next_state', 'reward', 'done'))

# 定义经验回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)  # 使用deque作为经验回放缓冲区，设置最大长度为capacity

    def add(self, experience):
        self.buffer.append(experience)  # 将经验元组添加到缓冲区中

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)  # 从缓冲区中随机采样一个批次的经验
        states, actions, next_states, rewards, dones = zip(*batch)  # 将批次中的经验拆分为单独的元组
        return np.array(states), np.array(actions), np.array(next_states), np.array(rewards, dtype=np.float32), np.array(dones, dtype=np.uint8)  # 返回批次中的状态、动作、下一个状态、奖励和完成标志

    def __len__(self):
        return len(self.buffer)  # 返回经验回放缓冲区的当前长度

# 定义Actor网络
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, action_bound):
        super(Actor, self).__init__()
        self.fc1 = nn.Linear(state_dim, 64)  # 第一个全连接层，输入维度为state_dim，输出维度为64
        self.fc2 = nn.Linear(64, 64)  # 第二个全连接层，输入和输出维度都为64
        self.fc3 = nn.Linear(64, action_dim)  # 第三个全连接层，输入维度为64，输出维度为action_dim
        self.action_bound = torch.tensor(action_bound, dtype=torch.float32)  # 设置动作边界

    def forward(self, state):
        x = F.relu(self.fc1(state))  # 使用ReLU激活函数的第一个全连接层
        x = F.relu(self.fc2(x))  # 使用ReLU激活函数的第二个全连接层
        x = torch.tanh(self.fc3(x)) * self.action_bound  # 使用tanh激活函数的最后一个全连接层，输出乘以动作边界
        return x  # 返回动作

# 定义Critic网络
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Critic, self).__init__()
        self.fc1 = nn.Linear(state_dim + action_dim, 64)  # 第一个全连接层，输入维度为state_dim + action_dim，输出维度为64
        self.fc2 = nn.Linear(64, 64)  # 第二个全连接层，输入和输出维度都为64
        self.fc3 = nn.Linear(64, 1)  # 第三个全连接层，输入维度为64，输出维度为1（Q值）

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)  # 将状态和动作连接起来
        x = F.relu(self.fc1(x))  # 使用ReLU激活函数的第一个全连接层
        x = F.relu(self.fc2(x))  # 使用ReLU激活函数的第二个全连接层
        x = self.fc3(x)  # 最后一个全连接层直接输出Q值
        return x  # 返回Q值

# 定义DDPG智能体
class DDPGAgent:
    def __init__(self, env, buffer_capacity=10000, batch_size=64, gamma=0.99, tau=0.001):
        self.env = env  # 初始化环境
        self.state_dim = env.observation_space.shape[0]  # 获取状态空间维度
        self.action_dim = env.action_space.shape[0]  # 获取动作空间维度
        self.action_bound = env.action_space.high[0]  # 获取动作空间的上限

        self.actor = Actor(self.state_dim, self.action_dim, self.action_bound)  # 创建Actor网络
        self.target_actor = Actor(self.state_dim, self.action_dim, self.action_bound)  # 创建目标Actor网络
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-3)  # 使用Adam优化器来更新Actor网络的参数

        self.critic = Critic(self.state_dim, self.action_dim)  # 创建Critic网络
        self.target_critic = Critic(self.state_dim, self.action_dim)  # 创建目标Critic网络
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3)  # 使用Adam优化器来更新Critic网络的参数

        # 初始化目标网络的权重，使其与主网络相同
        self.target_actor.load_state_dict(self.actor.state_dict())
        self.target_critic.load_state_dict(self.critic.state_dict())

        self.buffer = ReplayBuffer(buffer_capacity)  # 创建经验回放缓冲区
        self.batch_size = batch_size  # 设置批量大小
        self.gamma = gamma  # 设置折扣因子
        self.tau = tau  # 设置软更新参数

    def select_action(self, state):
        state = torch.FloatTensor(state.reshape(1, -1))  # 将状态转换为PyTorch张量
        return self.actor(state).detach().numpy().flatten()  # 使用Actor网络选择动作，并转换为NumPy数组

    def store_experience(self, state, action, next_state, reward, done):
        experience = Experience(state, action, next_state, reward, done)  # 创建经验元组
        self.buffer.add(experience)  # 将经验存储到经验回放缓冲区中

    def train(self):
        if len(self.buffer) < self.batch_size:
            return  # 如果经验回放缓冲区中的样本数量不足批量大小，则退出训练

        states, actions, next_states, rewards, dones = self.buffer.sample(self.batch_size)  # 从经验回放缓冲区中采样一个批次的经验

        states = torch.FloatTensor(states)  # 转换为PyTorch张量
        actions = torch.FloatTensor(actions)  # 转换为PyTorch张量
        next_states = torch.FloatTensor(next_states)  # 转换为PyTorch张量
        rewards = torch.FloatTensor(rewards).unsqueeze(1)  # 转换为PyTorch张量，并增加一个维度
        dones = torch.FloatTensor(dones).unsqueeze(1)  # 转换为PyTorch张量，并增加一个维度

        # 计算目标Q值和预期Q值
        next_actions = self.target_actor(next_states)  # 通过目标Actor网络计算下一个状态的动作
        target_q = rewards + self.gamma * (1 - dones) * self.target_critic(next_states, next_actions)  # 计算目标Q值
        expected_q = self.critic(states, actions)  # 计算预期Q值

        actor_loss = -self.critic(states, self.actor(states)).mean()  # 计算Actor网络的损失
        critic_loss = F.mse_loss(expected_q, target_q.detach())  # 计算Critic网络的损失

        # 更新Actor网络
        self.actor_optimizer.zero_grad()  # 梯度清零
        actor_loss.backward()  # 反向传播，计算梯度
        self.actor_optimizer.step()  # 更新网络参数

        # 更新Critic网络
        self.critic_optimizer.zero_grad()  # 梯度清零
        critic_loss.backward()  # 反向传播，计算梯度
        self.critic_optimizer.step()  # 更新网络参数

        # Soft 更新目标网络
        self.soft_update(self.target_actor, self.actor)
        self.soft_update(self.target_critic, self.critic)

        # 软更新目标网络
        self.soft_update(self.target_actor, self.actor)
        self.soft_update(self.target_critic, self.critic)

    def soft_update(self, target_net, net):
        for target_param, param in zip(target_net.parameters(), net.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

    def run(self, num_episodes=1000):
        for episode in range(1, num_episodes + 1):
            state = self.env.reset()  # 重置环境，获取初始状态
            total_reward = 0  # 初始化总奖励
            done = False  # 初始化完成标志

            while not done:
                action = self.select_action(state)  # 根据当前状态选择动作
                next_state, reward, done, _ = self.env.step(action)  # 执行动作，获取下一个状态、奖励和完成标志
                self.store_experience(state, action, next_state, reward, done)  # 存储经验到经验回放缓冲区
                self.train()  # 训练智能体

                state = next_state  # 更新当前状态为下一个状态
                total_reward += reward  # 更新总奖励

            print(f"Episode: {episode}, Total Reward: {total_reward}")  # 打印每个Episode的总奖励


# 创建环境（Pendulum-v1为例）
env = gym.make('Pendulum-v1',render_mode='human')

# 创建DDPG智能体
agent = DDPGAgent(env)

# 训练智能体
agent.run(num_episodes=100)

# 关闭环境
env.close()