import gym
import numpy as np
from collections import namedtuple, deque
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

# 定义经验元组，用于存储经验
Experience = namedtuple('Experience', ('state', 'action', 'next_state', 'reward', 'done'))


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)  # 使用deque作为经验回放缓冲区，设置最大长度为capacity

    def add(self, experience):
        self.buffer.append(experience)  # 将经验元组添加到缓冲区中

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)  # 从缓冲区中随机采样一个批次的经验
        states, actions, next_states, rewards, dones = zip(*batch)  # 将批次中的经验拆分为单独的元组
        states = np.array(states)  # 将状态转换为NumPy数组
        next_states = np.array(next_states)  # 将下一个状态转换为NumPy数组
        return states, actions, next_states, rewards, dones  # 返回批次中的状态、动作、下一个状态、奖励和完成标志

    def __len__(self):
        return len(self.buffer)  # 返回经验回放缓冲区的当前长度


class DQN(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_dim, 64)  # 第一个全连接层，输入维度为input_dim，输出维度为64
        self.fc2 = nn.Linear(64, 64)  # 第二个全连接层，输入和输出维度都为64
        self.fc3 = nn.Linear(64, output_dim)  # 第三个全连接层，输入维度为64，输出维度为output_dim

    def forward(self, x):
        x = torch.relu(self.fc1(x))  # 使用ReLU激活函数的第一个全连接层
        x = torch.relu(self.fc2(x))  # 使用ReLU激活函数的第二个全连接层
        return self.fc3(x)  # 返回最后一个全连接层的输出


class DQNAgent:
    def __init__(self, env, buffer_capacity=10000, batch_size=64, gamma=0.99, epsilon=1.0, epsilon_decay=0.995,
                 epsilon_min=0.01, target_update=10):
        self.env = env  # 初始化环境
        self.buffer = ReplayBuffer(buffer_capacity)  # 初始化经验回放缓冲区
        self.batch_size = batch_size  # 批次大小
        self.gamma = gamma  # 折扣因子
        self.epsilon = epsilon  # 贪婪策略的初始epsilon值
        self.epsilon_decay = epsilon_decay  # 贪婪策略的epsilon衰减率
        self.epsilon_min = epsilon_min  # 贪婪策略的最小epsilon值
        self.target_update = target_update  # 更新目标网络的频率

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设置设备为GPU或CPU

        # 创建主Q网络和目标Q网络，并将它们移动到指定的设备上
        self.policy_net = DQN(env.observation_space.shape[0], env.action_space.n).to(self.device)
        self.target_net = DQN(env.observation_space.shape[0], env.action_space.n).to(self.device)
        self.target_net.load_state_dict(self.policy_net.state_dict())  # 将目标网络的权重初始化为与主网络相同
        self.target_net.eval()  # 将目标网络设置为评估模式，不进行梯度更新

        self.optimizer = optim.Adam(self.policy_net.parameters())  # 使用Adam优化器来更新主网络参数

        self.steps_done = 0  # 记录智能体经历的步数

    def select_action(self, state):
        if np.random.rand() < self.epsilon:
            return self.env.action_space.sample()  # 以epsilon概率随机选择动作
        else:
            with torch.no_grad():
                state = torch.FloatTensor(state).to(self.device).unsqueeze(0)  # 将状态转换为PyTorch张量
                return self.policy_net(state).argmax().item()  # 选择使Q值最大的动作

    def store_experience(self, state, action, next_state, reward, done):
        self.buffer.add(Experience(state, action, next_state, reward, done))  # 将经验存储到经验回放缓冲区中

    def train(self):
        if len(self.buffer) < self.batch_size:
            return

        states, actions, next_states, rewards, dones = self.buffer.sample(self.batch_size)  # 从经验回放缓冲区中采样一个批次的经验

        states = torch.FloatTensor(states).to(self.device)  # 转换为PyTorch张量，并移动到指定的设备上
        actions = torch.LongTensor(actions).unsqueeze(1).to(self.device)  # 转换为PyTorch张量，并添加维度，移动到指定的设备上
        next_states = torch.FloatTensor(next_states).to(self.device)  # 转换为PyTorch张量，并移动到指定的设备上
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(self.device)  # 转换为PyTorch张量，并添加维度，移动到指定的设备上
        dones = torch.FloatTensor(dones).unsqueeze(1).to(self.device)  # 转换为PyTorch张量，并添加维度，移动到指定的设备上

        state_action_values = self.policy_net(states).gather(1, actions)  # 计算当前状态动作值
        next_state_values = self.target_net(next_states).max(1)[0].unsqueeze(1)  # 计算下一个状态的最大动作值，用于计算预期状态动作值
        expected_state_action_values = rewards + (1 - dones) * self.gamma * next_state_values  # 计算预期状态动作值

        loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)  # 使用平滑L1损失计算损失

        self.optimizer.zero_grad()  # 梯度清零
        loss.backward()  # 反向传播，计算梯度
        self.optimizer.step()  # 更新主Q网络的参数

        self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)  # 更新epsilon值

        self.steps_done += 1  # 增加步数计数器

        if self.steps_done % self.target_update == 0:
            self.target_net.load_state_dict(self.policy_net.state_dict())  # 每隔一定步数更新目标Q网络的权重

    def run_episode(self, max_steps=1000):
        state = self.env.reset()  # 重置环境并获取初始状态
        total_reward = 0  # 初始化总奖励

        for t in range(max_steps):
            action = self.select_action(state)  # 根据当前状态选择动作
            next_state, reward, done, _ = self.env.step(action)  # 执行动作，获取下一个状态、奖励和完成标志
            total_reward += reward  # 更新总奖励

            self.store_experience(state, action, next_state, reward, done)  # 存储经验到经验回放缓冲区
            self.train()  # 训练主Q网络

            state = next_state  # 更新当前状态为下一个状态

            if done:
                break  # 如果游戏结束，则跳出循环

        return total_reward  # 返回总奖励


if __name__ == "__main__":
    env = gym.make('CartPole-v1')  # 创建CartPole-v1环境
    agent = DQNAgent(env)  # 创建DQN智能体

    num_episodes = 1000  # 设置训练的Episode数
    for episode in range(num_episodes):
        total_reward = agent.run_episode()  # 运行一个Episode，并获取总奖励

        print(f"Episode {episode + 1}, Reward: {total_reward}, Epsilon: {agent.epsilon}")

    env.close()  # 关
