import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
import random
import torch
import torch.nn as nn
import torch.optim as optim

# 创建CartPole环境
env = gym.make('CartPole-v1')

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 定义DQN网络结构
class DQN(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 128)
        self.out = nn.Linear(128, action_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.out(x)

# 经验回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = map(np.array, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)

# 超参数设置
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
lr = 0.001
gamma = 0.99
epsilon_start = 1.0
epsilon_end = 0.01
epsilon_decay = 500
buffer_capacity = 50000
batch_size = 64
target_update_freq = 10
episodes = 500

# 初始化网络与优化器
policy_net = DQN(state_dim, action_dim).to(device)
target_net = DQN(state_dim, action_dim).to(device)
target_net.load_state_dict(policy_net.state_dict())
optimizer = optim.Adam(policy_net.parameters(), lr=lr)
replay_buffer = ReplayBuffer(buffer_capacity)

# epsilon贪婪策略
def epsilon_greedy(state, epsilon):
    if random.random() > epsilon:
        with torch.no_grad():
            state = torch.tensor([state], dtype=torch.float32).to(device)
            return policy_net(state).argmax().item()
    else:
        return random.randrange(action_dim)

# 训练过程
rewards_history = []
for episode in range(episodes):
    state = env.reset()
    total_reward = 0
    done = False
    while not done:
        epsilon = epsilon_end + (epsilon_start - epsilon_end) *             np.exp(-1. * episode / epsilon_decay)
        action = epsilon_greedy(state, epsilon)
        next_state, reward, done, _ = env.step(action)
        total_reward += reward
        replay_buffer.push(state, action, reward, next_state, done)
        state = next_state

        # 学习更新
        if len(replay_buffer) >= batch_size:
            s, a, r, s_, d = replay_buffer.sample(batch_size)
            s = torch.FloatTensor(s).to(device)
            a = torch.LongTensor(a).unsqueeze(1).to(device)
            r = torch.FloatTensor(r).unsqueeze(1).to(device)
            s_ = torch.FloatTensor(s_).to(device)
            d = torch.FloatTensor(d).unsqueeze(1).to(device)

            q_values = policy_net(s).gather(1, a)
            max_next_q = target_net(s_).max(1)[0].unsqueeze(1)
            expected_q = r + gamma * max_next_q * (1 - d)

            loss = nn.MSELoss()(q_values, expected_q)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    rewards_history.append(total_reward)

    # 更新目标网络
    if episode % target_update_freq == 0:
        target_net.load_state_dict(policy_net.state_dict())

    if episode % 50 == 0:
        print(f"Episode {episode}, Reward: {total_reward:.2f}")

# 绘制奖励变化曲线
plt.plot(rewards_history)
plt.xlabel("Episode")
plt.ylabel("Total Reward")
plt.title("Training Rewards over Episodes")
plt.grid(True)
plt.savefig("training_rewards.png")
plt.show()

env.close()
