# PPO算法
# 状态连续：如Box([-1.2  -0.07], [0.6  0.07], (2,), float32) numpy.ndarray
# 动作连续：如Box(-1.0, 1.0, (4,), float32) numpy.ndarray
import os
import torch
import numpy as np
import torch.nn as nn
from tqdm import tqdm
import gymnasium as gym
import matplotlib.pyplot as plt
from config import (GAME_NAME,GAME_UPPER_STEP,HIDDEN_DIM,DEVICE,ACTER_LR,CRITIC_LR,DISCOUNT_FACTOR,NUM_EPISODES,REWARD_SCALE,
                    EPS,LAMDA)

class PolicyNet(nn.Module):
    """ 策略网络：输入状态，输出动作 """
    def __init__(self,
        state_dim: int,
        hidden_dim: int,
        action_dim: int,
        action_low: np.ndarray,
        action_high: np.ndarray
    ):
        super().__init__()
        # 把不需要梯度、但需要和模型一起保存/加载/设备迁移的张量 注册为 buffer
        self.register_buffer('action_low', torch.tensor(action_low, dtype=torch.float32))
        self.register_buffer('action_high', torch.tensor(action_high, dtype=torch.float32))
        self.net = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
        )
        self.mean_head = nn.Linear(hidden_dim, action_dim)
        self.log_std = nn.Parameter(torch.zeros(action_dim))

    def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: # torch.Size([B, state_dim])
        features = self.net(x) # torch.Size([B, hidden_dim])
        mean = self.mean_head(features) # torch.Size([B, action_dim]) 无界
        std = torch.exp(self.log_std.clamp(-20, 2)) # torch.Size([action_dim])
        return mean, std # (torch.Size([B, action_dim]), torch.Size([action_dim]))

    def sample_action(self, state: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
        mean, std = self.forward(state) # (torch.Size([B, action_dim]),torch.Size([action_dim]))
        dist = torch.distributions.Normal(mean, std) # 多维独立高斯分布，标准差会自动广播第一个维度为B
        z = dist.rsample() # torch.Size([B, action_dim])并且无界
        # .rsample(),可微分采样,z的梯度可以回传到mean和std
        # .sample(),则梯度无法传播

        action_tanh = torch.tanh(z) # torch.Size([B, action_dim]) 将z的每一维从(-∞, +∞)压缩到(-1, 1)
        action_scaled = (action_tanh + 1) * 0.5 * (self.action_high - self.action_low) + self.action_low
        # torch.Size([B, action_dim]) 将action_tanh从(-1, 1)映射到规定动作范围

        # dist.log_prob(z) 得到 torch.Size([B, action_dim])
        # .sum(dim=-1, keepdim=True) 沿最后1维求和，并保持其他维度的形状不变
        log_prob = dist.log_prob(z).sum(dim=-1, keepdim=True) # torch.Size([B, 1])
        log_prob -= torch.log(1 - action_tanh.pow(2) + 1e-6).sum(dim=-1, keepdim=True) # torch.Size([B, 1])

        return action_scaled, log_prob # (torch.Size([B, action_dim]), torch.Size([B, 1]))

# todo:可以和策略网络合并吗？共享前面的参数
class ValueNet(nn.Module):
    """ 价值网络：输入状态，输出价值 """
    def __init__(self,
        state_dim: int,
        hidden_dim: int,
    ):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
        )
        self.value_head = nn.Linear(hidden_dim, 1)

    def forward(self, x: torch.Tensor) -> torch.Tensor: # torch.Size([B, state_dim])
        x = self.net(x) # torch.Size([B, hidden_dim])
        return self.value_head(x) # torch.Size([B, 1])

class PPO_Agent:
    ''' 基于PPO算法的智能体 '''
    def __init__(self,
        state_dim: int,
        hidden_dim: int,
        action_dim: int,
        device: torch.device,
        actor_lr: float,
        critic_lr: float,
        discount_factor: float,
        action_low: np.ndarray,
        action_high: np.ndarray,
        eps: float, # 裁剪因子
        lamda: float, # GAE因子
    ):
        self.state_dim = state_dim
        self.hidden_dim = hidden_dim
        self.action_dim = action_dim
        self.device = device
        self.actor_lr = actor_lr
        self.critic_lr = critic_lr
        self.discount_factor = discount_factor
        self.action_low = action_low
        self.action_high = action_high
        self.eps = eps
        self.lamda = lamda

        self.actor = PolicyNet(
            state_dim, hidden_dim, action_dim,
            action_low, action_high
        ).to(device)
        self.critic = ValueNet(state_dim, hidden_dim).to(device)

        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)

        self.count = 0  # 记录策略网络更新次数

    def take_action(self, state: np.ndarray) -> tuple[np.ndarray, torch.Tensor]:
        """
        PPO 要求知道 执行该动作时的 log π(a|s)，
        这个值必须在采样时计算并保存，不能在 update 时重新计算（因为动作是采样出来的，无法反推）
        """
        state_tensor = torch.tensor(state, dtype=torch.float32, device=self.device).unsqueeze(0) # torch.Size([1,state_dim])
        action, log_prob = self.actor.sample_action(state_tensor) # (torch.Size([B, action_dim]), torch.Size([B, 1]))
        value = self.critic(state_tensor)  # torch.Size([B, 1])

        # action.cpu().detach().numpy()[0] np.ndarray (action_dim,)
        # log_prob: torch.Size([1,1])
        return action.cpu().detach().numpy()[0], log_prob, value

    # TODO:修改代码,实现PPO的更新
    def update(self, trajectory_record: dict):
        states = torch.tensor(np.array(trajectory_record['states']), dtype=torch.float32, device=self.device)
        actions = torch.tensor(np.array(trajectory_record['actions']), dtype=torch.float32, device=self.device)
        rewards = torch.tensor(trajectory_record['rewards'], dtype=torch.float32, device=self.device).unsqueeze(1)
        old_log_probs = torch.cat(trajectory_record['log_probs'], dim=0).detach()
        values = torch.cat(trajectory_record['values'], dim=0).detach()
        last_value = trajectory_record['last_value']

        # ===== 计算 GAE 优势 =====
        returns = []
        advantages = []
        gae = 0.0
        T = len(rewards)

        # 从后往前计算
        for t in reversed(range(T)):
            if t == T - 1:
                next_value = last_value
            else:
                next_value = values[t + 1]
            delta = rewards[t] + self.discount_factor * next_value - values[t]
            gae = delta + self.discount_factor * self.lamda * gae
            advantages.insert(0, gae)
            returns.insert(0, gae + values[t])

        returns = torch.cat(returns).unsqueeze(1)  # [T, 1]
        advantages = torch.tensor(advantages, dtype=torch.float32, device=self.device).unsqueeze(1)  # [T, 1]

        # 标准化优势（可选但推荐）
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        action_tanh = (actions - self.actor.action_low) / (self.actor.action_high - self.actor.action_low) * 2 - 1
        action_tanh = torch.clamp(action_tanh, -1 + 1e-6, 1 - 1e-6)
        # ===== 多次 epoch 更新 =====
        # 注意：PPO 通常在一个轨迹上做多轮 mini-batch 更新（这里简化为全批量、多轮）
        for _ in range(10):  # PPO epochs
            # 重新计算 log_prob 和 value
            mean, std = self.actor(states)
            dist = torch.distributions.Normal(mean, std)
            z = torch.atanh(action_tanh)  # 反 tanh
            log_prob = dist.log_prob(z).sum(dim=-1, keepdim=True)
            log_prob -= torch.log(1 - actions.pow(2) + 1e-6).sum(dim=-1, keepdim=True)

            entropy = dist.entropy().mean()

            # 计算 ratio
            ratio = torch.exp(log_prob - old_log_probs)
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.eps, 1 + self.eps) * advantages
            actor_loss = -torch.min(surr1, surr2).mean()
            print(actor_loss.item())

            # Critic loss
            current_values = self.critic(states)
            critic_loss = nn.MSELoss()(current_values, returns)
            print(critic_loss.item())

            # 更新 Actor
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), max_norm=0.5)
            self.actor_optimizer.step()

            # 更新 Critic
            self.critic_optimizer.zero_grad()
            critic_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.critic.parameters(), max_norm=0.5)
            self.critic_optimizer.step()

        self.count += 1

    def save_model(self, path: str):
        """保存策略网络、优化器状态和训练超参数"""
        torch.save({
            'policy_net_state_dict': self.actor.state_dict(),
            'value_net_state_dict': self.critic.state_dict(),
            'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),
            'critic_optimizer_state_dict': self.critic_optimizer.state_dict(),
            'count': self.count
        }, path)
        print(f"Model saved to {path}")

    def load_model(self, path: str):
        """加载策略网络"""
        if not os.path.exists(path):
            print(f"No saved model found at {path}, starting from scratch.")
            return False
        checkpoint = torch.load(path, map_location=self.device, weights_only=True)
        self.actor.load_state_dict(checkpoint['policy_net_state_dict'])
        self.critic.load_state_dict(checkpoint['value_net_state_dict'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])
        self.count = checkpoint.get('count', self.count)
        print(f"Model loaded from {path} and target network synchronized.")
        return True

# todo:需要修改
def train_PPO(
    env: gym.Env,
    agent: PPO_Agent,
    num_episodes: int
):
    return_list = [] # 轨迹奖励

    for _ in tqdm(range(num_episodes), desc="Training Episodes"):
        episode_return = 0.0
        trajectory_record = {
            'states'   : [],
            'actions'  : [],
            'rewards'  : [],
            'log_probs': [],
            'values'   : [],
        }

        state, _ = env.reset()  # numpy.ndarray (state_dim,)
        done = False

        while not done:
            action, log_prob, value = agent.take_action(state) # numpy.ndarray (action_dim,) torch.Size([1,1])

            next_state, reward, terminated, truncated, _ = env.step(action)
            # next_state: numpy.ndarray (state_dim,)
            # reward: float
            # terminated truncated: bool
            done = terminated or truncated  # bool

            scaled_reward = float(reward) * REWARD_SCALE

            trajectory_record['states'].append(state.copy())
            trajectory_record['actions'].append(action)
            trajectory_record['rewards'].append(scaled_reward)
            trajectory_record['log_probs'].append(log_prob)
            trajectory_record['values'].append(value)

            episode_return += reward

            state = next_state

        # 最后一个状态的 value（用于 GAE）
        if not done:
            last_value = agent.critic(torch.tensor(state, dtype=torch.float32, device=agent.device).unsqueeze(0))
        else:
            last_value = torch.zeros(1, 1, device=agent.device)
        trajectory_record['last_value'] = last_value.detach()

        return_list.append(episode_return)
        agent.update(trajectory_record)

        if len(return_list) % 10 == 0:
            plt.figure(figsize=(10, 5))
            plt.plot(return_list)
            plt.title("REINFORCE Training Returns")
            plt.xlabel("Episode")
            plt.ylabel("Return")
            plt.grid()
            plt.savefig(plot_path)
            plt.show()

    return return_list

if __name__ == "__main__":
    # 定义结果保存目录：results/{GAME_NAME}/
    results_dir = os.path.join("results", GAME_NAME)
    os.makedirs(results_dir, exist_ok=True)

    # 模型和图像保存路径
    model_path = os.path.join(results_dir, "ppo_model.pth")
    plot_path = os.path.join(results_dir, "training_returns.png")

    # 创建环境
    env = gym.make(GAME_NAME, render_mode=None, max_episode_steps=GAME_UPPER_STEP)
    state_dim   = env.observation_space.shape[0]
    action_dim  = env.action_space.shape[0]
    action_low  = env.action_space.low # np.ndarray
    action_high = env.action_space.high # np.ndarray

    # 创建智能体
    agent = PPO_Agent(
        state_dim=state_dim,
        hidden_dim=HIDDEN_DIM,
        action_dim=action_dim,
        device=DEVICE,
        actor_lr=ACTER_LR,
        critic_lr=CRITIC_LR,
        discount_factor=DISCOUNT_FACTOR,
        action_low=action_low,
        action_high=action_high,
        eps=EPS,
        lamda=LAMDA,
    )
    # 加载已有模型
    agent.load_model(model_path)

    # 训练
    return_list = train_PPO(env=env, agent=agent, num_episodes=NUM_EPISODES)
    # 保存模型
    agent.save_model(model_path)

    # 关闭环境
    env.close()

    # 绘制训练曲线
    plt.figure(figsize=(10, 5))
    plt.plot(return_list)
    plt.title("REINFORCE Training Returns")
    plt.xlabel("Episode")
    plt.ylabel("Return")
    plt.grid()
    plt.savefig(plot_path)
    plt.show()