import gymnasium as gym
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.distributions import Categorical
import matplotlib.pyplot as plt

# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 环境设置
env = gym.make("CartPole-v1", render_mode=None)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n


class ActorCritic(nn.Module):
    def __init__(self):
        super(ActorCritic, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(state_dim, 128),  # 输入层：状态维度 → 128维隐藏层
            nn.ReLU()  # 激活函数
        )
        self.actor = nn.Linear(128, action_dim)  # 输出动作概率分布的原始分数（logits）
        self.critic = nn.Linear(128, 1)  # 输出状态价值（标量）

    def forward(self, x):
        x = self.fc(x)
        logits = self.actor(x)
        value = self.critic(x)
        return logits, value

    def act(self, state):
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        logits, _ = self.forward(state)
        probs = torch.softmax(logits, dim=-1)
        action = Categorical(probs).sample().cpu().item()
        return action


# 初始化模型和优化器
model = ActorCritic().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-4)

# 超参数
GAMMA = 0.99 #折扣因子
LAMBDA = 0.95   #GAE参数
EPSILON = 0.2   #探索率
EPOCHS = 4  #训练轮数
BATCH_SIZE = 64  # 减小批量大小以适应CartPole
MAX_STEPS = 2000  # 增加收集的步数

episode_rewards = []


def collect_trajectories(env, model, steps):
    states, actions, rewards, dones, log_probs, values = [], [], [], [], [], []
    state, _ = env.reset()  # 重置环境，获取初始状态
    episode_reward = 0  # 初始化当前回合奖励

    for _ in range(steps):
        # 1. 状态预处理
        state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
        # 2. 模型推理（无梯度计算）
        with torch.no_grad():
            logits, value = model(state_tensor)
            prob = torch.softmax(logits, dim=-1)
            # # 温度参数控制探索
            # temperature = 0.5  # 温度越低越贪婪
            # action = Categorical(prob ** (1/temperature)).sample()
            action = Categorical(prob).sample().cpu().item()
            # log_prob = Categorical(prob).log_prob(torch.tensor(action, device=device)).cpu().item()
            # 避免log_prob计算中的数值下溢
            log_prob = Categorical(prob).log_prob(torch.tensor(action, device=device)).clamp(min=-20).cpu().item()

        next_state, reward, terminated, truncated, _ = env.step(action)
        done = terminated or truncated # 合并终止和截断信号
        #数据储存
        states.append(state)
        actions.append(action)
        rewards.append(reward)
        dones.append(done)
        log_probs.append(log_prob)
        values.append(value.cpu().item())

        state = next_state
        episode_reward += reward

        if done:
            episode_rewards.append(episode_reward)
            episode_reward = 0
            state, _ = env.reset()

    return states, actions, rewards, dones, log_probs, values


def compute_advantages(rewards, values, dones):
    advantages = []
    returns = []
    gae = 0
    ret = 0

    # 反向计算
    for t in reversed(range(len(rewards))):
        # 计算 TD 误差
        delta = rewards[t] + GAMMA * (1 - dones[t]) * (values[t + 1] if t + 1 < len(values) else 0) - values[t]
        # 更新 GAE
        gae = delta + GAMMA * LAMBDA * (1 - dones[t]) * gae
        # 更新 Returns
        ret = rewards[t] + GAMMA * (1 - dones[t]) * ret
        # 插入到列表开头（因为反向计算）
        advantages.insert(0, gae)
        returns.insert(0, ret)

    advantages = torch.tensor(advantages, dtype=torch.float32)
    returns = torch.tensor(returns, dtype=torch.float32)

    # 标准化优势函数
    advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
    return advantages, returns


def ppo_update(states, actions, old_log_probs, advantages, returns):
    #数据预处理输入数据转换为张量并转移到GPU中
    states = torch.FloatTensor(np.array(states)).to(device)
    actions = torch.LongTensor(actions).to(device)
    old_log_probs = torch.FloatTensor(old_log_probs).to(device)
    advantages = advantages.to(device)
    returns = returns.to(device)

    # 小批量训练
    indices = np.arange(len(states))    # 生成索引 [0, 1, 2, ..., T-1]
    for _ in range(EPOCHS):     # 多轮训练（通常 3-10 轮），根据超参数进行调整，这里使用的4轮
        np.random.shuffle(indices)      # 随机打乱索引
        for start in range(0, len(states), BATCH_SIZE):     # 按批次处理
            end = start + BATCH_SIZE
            batch_idx = indices[start:end]      # 当前批次的索引

            logits, values = model(states[batch_idx])
            probs = torch.softmax(logits, dim=-1)
            new_log_probs = Categorical(probs).log_prob(actions[batch_idx])

            ratio = (new_log_probs - old_log_probs[batch_idx]).exp()
            surr1 = ratio * advantages[batch_idx]
            surr2 = torch.clamp(ratio, 1.0 - EPSILON, 1.0 + EPSILON) * advantages[batch_idx]
            policy_loss = -torch.min(surr1, surr2).mean()

            value_loss = 0.5 * (values.squeeze() - returns[batch_idx]).pow(2).mean()
            entropy = Categorical(probs).entropy().mean()

            loss = policy_loss + 0.5 * value_loss - 0.01 * entropy

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()


def train():
    for episode in range(1000):
        # 收集轨迹
        trajectories = collect_trajectories(env, model, MAX_STEPS)
        states, actions, rewards, dones, old_log_probs, values = trajectories

        # 添加最后一个值函数估计（用于计算最后一个优势）
        with torch.no_grad():
            last_state = torch.FloatTensor(states[-1]).unsqueeze(0).to(device)
            _, last_value = model(last_state)
            values.append(last_value.cpu().item())

        # 计算优势函数和回报
        advantages, returns = compute_advantages(rewards, values, dones)

        # PPO更新
        ppo_update(states, actions, old_log_probs, advantages, returns)

        # 打印训练信息
        if episode % 10 == 0:
            avg_reward = np.mean(episode_rewards[-10:]) if len(episode_rewards) >= 10 else np.mean(episode_rewards)
            print(f"Episode {episode}, Avg Reward (last 10): {avg_reward:.2f}")

            # 提前终止条件
            if avg_reward > 490:
                print("Solved!")
                break

    env.close()


# 运行训练
train()

# 绘制奖励曲线
plt.figure(figsize=(10, 5))
plt.plot(episode_rewards, label="Episode Reward")
plt.xlabel("Episode")
plt.ylabel("Reward")
plt.title("Training Rewards")
plt.legend()
plt.show()