import gymnasium as gym  # 导入 gymnasium 替代旧版 gym
import torch
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
import random
from torch.distributions import Categorical  # 新增导入


# 定义环境包装类，对原始环境进行修改和扩展
class MyWrapper(gym.Wrapper):
    def __init__(self):
        # 创建CartPole-v1环境，并设置渲染模式为RGB数组
        env = gym.make('CartPole-v1', render_mode='rgb_array')
        super().__init__(env)
        self.env = env
        # 记录当前步数
        self.step_n = 0

    def reset(self):
        # 重置环境，获取初始状态
        state, _ = self.env.reset()
        self.step_n = 0
        return state

    def step(self, action):
        # 执行动作，获取状态、奖励、终止标志等信息
        state, reward, terminated, truncated, info = self.env.step(action)
        over = terminated or truncated

        # 限制最大步数，超过200步则终止
        self.step_n += 1
        if self.step_n >= 200:
            over = True

        # 如果提前结束（没坚持到200步），给予惩罚性奖励
        if over and self.step_n < 200:
            reward = -1000

        return state, reward, over

    # 显示当前游戏画面
    def show(self):
        plt.figure(figsize=(3, 3))
        plt.imshow(self.env.render())
        plt.axis('off')
        plt.show()


# 创建环境实例
env = MyWrapper()
# 初始化环境并显示初始画面
env.reset()
env.show()

# 定义策略网络，用于生成动作概率分布
model_action = torch.nn.Sequential(
    torch.nn.Linear(4, 64),  # 输入层：4个状态特征到64个隐藏单元
    torch.nn.ReLU(),  # 激活函数
    torch.nn.Linear(64, 64),  # 隐藏层
    torch.nn.ReLU(),  # 激活函数
    torch.nn.Linear(64, 2),  # 输出层：64个隐藏单元到2个动作
    torch.nn.Softmax(dim=1),  # 将输出转换为概率分布
)

# 定义价值网络，用于评估状态价值
model_value = torch.nn.Sequential(
    torch.nn.Linear(4, 64),  # 输入层：4个状态特征到64个隐藏单元
    torch.nn.ReLU(),  # 激活函数
    torch.nn.Linear(64, 64),  # 隐藏层
    torch.nn.ReLU(),  # 激活函数
    torch.nn.Linear(64, 1),  # 输出层：64个隐藏单元到1个价值估计
)

# 测试模型
print("测试模型输出:", model_action(torch.randn(2, 4)), model_value(torch.randn(2, 4)))

# 存储训练过程中的数据
action_losses = []
value_losses = []
test_rewards = []
entropies = []  # 新增：存储策略熵值


# 玩一局游戏并记录数据
def play(show=False):
    state = []  # 存储状态序列
    action = []  # 存储动作序列
    reward = []  # 存储奖励序列
    next_state = []  # 存储下一个状态序列
    over = []  # 存储终止标志序列

    s = env.reset()  # 重置环境
    o = False  # 初始化终止标志
    while not o:
        # 根据策略网络输出的概率分布采样动作
        prob = model_action(torch.FloatTensor(s).reshape(1, 4))[0].tolist()
        a = random.choices(range(2), weights=prob, k=1)[0]
        ns, r, o = env.step(a)  # 执行动作

        # 记录数据
        state.append(s)
        action.append(a)
        reward.append(r)
        next_state.append(ns)
        over.append(o)

        s = ns  # 更新状态

        if show:
            display.clear_output(wait=True)
            env.show()

    # 将数据转换为PyTorch张量
    state = torch.FloatTensor(state).reshape(-1, 4)
    action = torch.LongTensor(action).reshape(-1, 1)
    reward = torch.FloatTensor(reward).reshape(-1, 1)
    next_state = torch.FloatTensor(next_state).reshape(-1, 4)
    over = torch.LongTensor(over).reshape(-1, 1)

    return state, action, reward, next_state, over, reward.sum().item()


# 测试游戏并打印总奖励
state, action, reward, next_state, over, reward_sum = play()
print("测试游戏总奖励:", reward_sum)

# 定义优化器
optimizer_action = torch.optim.Adam(model_action.parameters(), lr=1e-3)
optimizer_value = torch.optim.Adam(model_value.parameters(), lr=1e-2)


# 设置模型参数是否需要梯度计算
def requires_grad(model, value):
    for param in model.parameters():
        param.requires_grad_(value)


# 训练价值网络
def train_value(state, reward, next_state, over):
    requires_grad(model_action, False)  # 关闭策略网络的梯度计算
    requires_grad(model_value, True)  # 开启价值网络的梯度计算

    # 计算目标价值
    with torch.no_grad():
        target = model_value(next_state)
    target = target * 0.98 * (1 - over) + reward  # 使用贝尔曼方程计算目标值

    value_loss = 0
    # 每批数据反复训练10次
    for _ in range(10):
        # 计算当前价值估计
        value = model_value(state)

        # 计算均方误差损失
        loss = torch.nn.functional.mse_loss(value, target)
        value_loss += loss.item()

        # 反向传播和优化
        loss.backward()
        optimizer_value.step()
        optimizer_value.zero_grad()

    # 记录平均损失
    value_losses.append(value_loss / 10)

    # 减去价值估计相当于去基线，减少方差
    return (target - value).detach()


# 训练策略网络（新增熵计算）
def train_action(state, action, value):
    requires_grad(model_action, True)  # 开启策略网络的梯度计算
    requires_grad(model_value, False)  # 关闭价值网络的梯度计算

    # 计算优势函数估计
    delta = []
    for i in range(len(value)):
        s = 0
        for j in range(i, len(value)):
            s += value[j] * (0.98 * 0.95) ** (j - i)  # 计算累积回报
        delta.append(s)
    delta = torch.FloatTensor(delta).reshape(-1, 1)

    # 计算更新前的动作概率，用于PPO的比率计算
    with torch.no_grad():
        prob_old = model_action(state).gather(dim=1, index=action)

    action_loss = 0
    # 每批数据反复训练10次
    for _ in range(10):
        # 计算更新后的动作概率
        prob_new = model_action(state)
        dist = Categorical(prob_new)  # 创建分类分布用于计算熵
        entropy = dist.entropy().mean()  # 计算平均熵值

        # 记录熵值（每次更新记录一次）
        entropies.append(entropy.item())

        # 计算概率比率
        ratio = prob_new.gather(dim=1, index=action) / prob_old

        # 计算截断的和不截断的两份目标函数，取较小值
        surr1 = ratio * delta
        surr2 = ratio.clamp(0.8, 1.2) * delta  # 截断以稳定训练

        loss = -torch.min(surr1, surr2).mean()  # 取负号因为要最大化目标函数
        action_loss += loss.item()

        # 反向传播和优化
        loss.backward()
        optimizer_action.step()
        optimizer_action.zero_grad()

    # 记录平均损失
    action_losses.append(action_loss / 10)

    return loss.item()


# 绘制训练过程中的损失和奖励曲线（新增熵可视化）
def plot_training():
    plt.figure(figsize=(16, 6))  # 扩大图像宽度以容纳新子图

    # 绘制策略网络损失
    plt.subplot(2, 2, 1)
    plt.title('Policy Network Loss')
    plt.plot(action_losses)
    plt.xlabel('Training Batch')
    plt.ylabel('Loss')

    # 绘制价值网络损失
    plt.subplot(2, 2, 2)
    plt.title('Value Network Loss')
    plt.plot(value_losses)
    plt.xlabel('Training Batch')
    plt.ylabel('Loss')

    # 绘制测试奖励
    plt.subplot(2, 2, 3)
    plt.title('Average Test Reward')
    plt.plot(test_rewards)
    plt.xlabel('Training Epoch (×50)')
    plt.ylabel('Reward')

    # 新增：绘制策略熵
    plt.subplot(2, 2, 4)
    plt.title('Policy Entropy')
    plt.plot(entropies)
    plt.xlabel('Training Step')
    plt.ylabel('Entropy')

    plt.tight_layout()
    display.clear_output(wait=True)
    plt.show()


# 训练主函数
def train():
    model_action.train()
    model_value.train()

    # 训练N局
    for epoch in range(300):
        # 一个epoch最少玩N步
        steps = 0
        while steps < 200:
            state, action, reward, next_state, over, _ = play()
            steps += len(state)

            # 训练两个模型
            delta = train_value(state, reward, next_state, over)
            loss = train_action(state, action, delta)

        # 每50个epoch测试一次并记录结果（与熵计算频率匹配）
        if epoch % 50 == 0:
            test_result = sum([play()[-1] for _ in range(20)]) / 20
            test_rewards.append(test_result)
            print(
                f"Epoch {epoch}, Policy Loss: {action_losses[-1]:.4f}, "
                f"Value Loss: {value_losses[-1]:.4f}, "
                f"Test Reward: {test_result:.2f}, "
                f"Last Entropy: {entropies[-1]:.4f}"
            )

            # 绘制训练曲线
            plot_training()


# 开始训练
train()