"""
PPO 损失函数

实现 Proximal Policy Optimization 算法的损失计算。
"""

from __future__ import annotations

from typing import Dict, Tuple

import torch
import torch.nn as nn
import torch.nn.functional as F


class PPOLoss(nn.Module):
    """
    PPO 损失函数
    
    包含三个组件：
    1. Policy Loss（策略损失）：使用裁剪目标函数
    2. Value Loss（价值损失）：最小化价值估计误差
    3. Entropy Loss（熵损失）：鼓励探索
    
    总损失 = policy_loss + value_coef * value_loss + entropy_coef * entropy_loss
    """
    
    def __init__(
        self,
        clip_epsilon: float = 0.2,
        value_coef: float = 0.5,
        entropy_coef: float = 0.01,
        clip_value_loss: bool = False,
    ):
        """
        初始化损失函数
        
        Args:
            clip_epsilon: PPO裁剪参数（epsilon）
            value_coef: 价值损失系数
            entropy_coef: 熵正则化系数
            clip_value_loss: 是否裁剪价值损失（可选）
        """
        super().__init__()
        
        self.clip_epsilon = clip_epsilon
        self.value_coef = value_coef
        self.entropy_coef = entropy_coef
        self.clip_value_loss = clip_value_loss
    
    def forward(
        self,
        log_probs: torch.Tensor,
        old_log_probs: torch.Tensor,
        advantages: torch.Tensor,
        values: torch.Tensor,
        returns: torch.Tensor,
        entropy: torch.Tensor,
    ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
        """
        计算 PPO 损失
        
        Args:
            log_probs: 当前策略的对数概率 [batch_size]
            old_log_probs: 旧策略的对数概率 [batch_size]
            advantages: 优势估计 [batch_size]
            values: 价值估计 [batch_size, 1] 或 [batch_size]
            returns: 目标回报 [batch_size]
            entropy: 策略熵 [batch_size]
            
        Returns:
            total_loss: 总损失
            loss_dict: 各个损失组件的字典
        """
        # 确保 values 的形状正确
        if values.dim() == 2 and values.shape[1] == 1:
            values = values.squeeze(-1)
        
        # 1. 策略损失（PPO裁剪目标）
        ratio = torch.exp(log_probs - old_log_probs)
        surr1 = ratio * advantages
        surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
        policy_loss = -torch.min(surr1, surr2).mean()
        
        # 2. 价值损失
        if self.clip_value_loss:
            # 可选：裁剪价值损失（防止价值函数更新过大）
            value_pred_clipped = values + torch.clamp(
                values - returns,
                -self.clip_epsilon,
                self.clip_epsilon,
            )
            value_losses = (values - returns).pow(2)
            value_losses_clipped = (value_pred_clipped - returns).pow(2)
            value_loss = 0.5 * torch.max(value_losses, value_losses_clipped).mean()
        else:
            value_loss = F.mse_loss(values, returns)
        
        # 3. 熵损失（鼓励探索）
        entropy_loss = -entropy.mean()
        
        # 总损失
        total_loss = (
            policy_loss 
            + self.value_coef * value_loss 
            + self.entropy_coef * entropy_loss
        )
        
        # 返回各个损失组件（用于记录）
        loss_dict = {
            "loss": total_loss.detach(),
            "policy_loss": policy_loss.detach(),
            "value_loss": value_loss.detach(),
            "entropy_loss": entropy_loss.detach(),
            "ratio_mean": ratio.mean().detach(),
            "ratio_std": ratio.std().detach(),
            "approx_kl": ((ratio - 1) - torch.log(ratio)).mean().detach(),  # 近似KL散度
        }
        
        return total_loss, loss_dict
    
    def extra_repr(self) -> str:
        """返回损失函数的字符串表示"""
        return (
            f"clip_epsilon={self.clip_epsilon}, "
            f"value_coef={self.value_coef}, "
            f"entropy_coef={self.entropy_coef}, "
            f"clip_value_loss={self.clip_value_loss}"
        )
