# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SFT损失计算模块 - 实现CHORD框架中的SFT损失逻辑
"""

import torch
import torch.nn as nn
from typing import Tuple
from verl.utils.torch_functional import logprobs_from_logits
import math

def compute_token_wise_weights(log_probs: torch.Tensor, response_mask: torch.Tensor) -> torch.Tensor:
    """
    计算逐词权重函数 φ(y_t*; π_θ) = p_t (1 - p_t)
    
    Args:
        log_probs: 对数概率 (batch_size, seq_len-1) - 形状与response_mask完全匹配
        response_mask: 响应掩码 (batch_size, seq_len-1) - 来自ctx_manager，已经正确处理了shift
        
    Returns:
        token_weights: 逐词权重 (batch_size, seq_len-1)
    """
    # tensor形状现在应该完全匹配（都是batch_size, seq_len-1）
    
    # 将对数概率转换为概率，限制在合理范围内防止数值不稳定
    probs = torch.exp(log_probs.clamp(min=-10.0, max=10.0))  # (batch_size, seq_len-1)
    
    # 计算权重 φ(y_t*; π_θ) = p_t (1 - p_t)
    # 这个函数在p_t=0.5时达到最大值0.25，在p_t接近0或1时接近0
    token_weights = probs * (1.0 - probs)  # (batch_size, seq_len-1)
    
    # 应用响应掩码，只在需要计算损失的位置保留权重
    token_weights = token_weights * response_mask
    
    return token_weights


def compute_sft_loss(
    logits: torch.Tensor,
    input_ids: torch.Tensor,
    response_mask: torch.Tensor,
    use_token_weights: bool = True,
    temperature: float = 1.0
) -> Tuple[torch.Tensor, torch.Tensor]:
    """
    计算SFT损失，支持逐词权重调整，适用于多轮对话场景
    
    Args:
        logits: 模型输出的logits (batch_size, seq_len, vocab_size) - 完整序列的logits
        input_ids: 完整的input_ids序列 (batch_size, seq_len)  
        response_mask: 响应掩码 (batch_size, seq_len) - 标识需要计算损失的位置
        use_token_weights: 是否使用逐词权重函数
        temperature: 温度参数
        
    Returns:
        sft_loss: SFT损失标量
        token_weights: 逐词权重 (如果use_token_weights=False则为None)
    """
    # 应用温度缩放
    scaled_logits = logits / temperature
    
    # 重要：ctx_manager.py已经做了正确的shift处理
    # response_mask对应logits[:-1]和input_ids[1:]的关系，所以我们需要：
    # - 使用logits[:-1] (移除最后一个位置的logits)
    # - 使用input_ids[1:] (移除第一个位置的input_ids) 
    # - 直接使用response_mask (已经是正确的形状)
    
    shift_logits = scaled_logits[..., :-1, :].contiguous()  # (batch_size, seq_len-1, vocab_size)
    shift_labels = input_ids[..., 1:].contiguous()  # (batch_size, seq_len-1)
    # response_mask已经是正确形状，不需要再次shift
    target_response_mask = response_mask.contiguous()  # (batch_size, seq_len-1)
    
    # 手动计算交叉熵损失，确保tensor形状一致
    batch_size, seq_len_minus_1, vocab_size = shift_logits.shape
    
    # 将logits和labels展平用于计算交叉熵
    flat_logits = shift_logits.view(-1, vocab_size)  # (batch_size * seq_len-1, vocab_size)
    flat_labels = shift_labels.view(-1)  # (batch_size * seq_len-1)
    flat_response_mask = target_response_mask.view(-1)  # (batch_size * seq_len-1)
    
    # 计算log softmax
    log_probs_flat = torch.log_softmax(flat_logits, dim=-1)  # (batch_size * seq_len-1, vocab_size)
    
    # 获取对应标签的log概率
    log_probs = log_probs_flat.gather(1, flat_labels.unsqueeze(1)).squeeze(1)  # (batch_size * seq_len-1)
    
    # 重新变形回原始形状
    log_probs = log_probs.view(batch_size, seq_len_minus_1)  # (batch_size, seq_len-1)
    
    # 基础SFT损失 (负对数似然)
    token_losses = -log_probs  # (batch_size, seq_len-1)
    
    if use_token_weights:
        # 计算逐词权重函数 φ(y_t*; π_θ) = p_t (1 - p_t)
        # 现在log_probs和target_response_mask的形状完全一致
        token_weights = compute_token_wise_weights(log_probs, target_response_mask)
        # 应用逐词权重
        weighted_losses = token_losses * token_weights  # (batch_size, seq_len-1)
    else:
        token_weights = None
        weighted_losses = token_losses * target_response_mask  # (batch_size, seq_len-1)
    
    # 计算平均损失 - 只在response_mask为True的位置计算
    total_loss = weighted_losses.sum()
    total_tokens = target_response_mask.sum() + 1e-8  # 避免除零
    sft_loss = total_loss / total_tokens
    
    return sft_loss, token_weights


def compute_hybrid_loss(
    rl_loss: torch.Tensor,
    sft_loss: torch.Tensor,
    mu: float,
    mode: str = "cosine",
    global_step: int = None,
    total_steps: int = None,
    mu_min: float = 0.05,
    mu_max: float = None
) -> Tuple[torch.Tensor, float]:
    """
    计算混合损失函数 L_Hybrid(θ) = (1 - μ) L_RL(θ) + μ L_SFT(θ)
    
    支持多种μ调整策略：
    - "fix": 固定μ值，不随训练步数变化
    - "cosine": 余弦退火策略，从初始mu值平滑降至mu_min
    - "linear": 线性递减策略，从初始mu值线性降至mu_min
    - "exponential": 指数递减策略，指数衰减至mu_min
    
    Args:
        rl_loss: RL损失 (Reinforcement Learning Loss)
        sft_loss: SFT损失 (Supervised Fine-Tuning Loss) 
        mu: 全局初始系数 μ (例如 0.9)
        mode: μ调整策略模式 ("fix", "cosine", "linear", "exponential")
        global_step: 当前训练步数
        total_steps: 总训练步数
        mu_min: 目标最小μ值 (默认0.05)
        mu_max: 初始最大μ值 (默认使用mu参数值)
        
    Returns:
        hybrid_loss: 混合损失
        effective_mu: 实际使用的μ值
    """
    effective_mu = mu
    
    if mu_max is None:
        mu_max = mu
    
    # 根据不同模式计算effective_mu
    if mode == "fix":
        # 固定模式：μ值始终不变
        effective_mu = mu
        
    elif mode in ["cosine", "linear", "exponential"] and global_step is not None and total_steps is not None:
        # 动态调整模式：需要步数信息
        decay_steps = total_steps / 2  # 递减发生在前半段训练
        
        if global_step <= decay_steps and decay_steps > 0:
            # 训练前半段：应用不同的递减策略
            progress = global_step / decay_steps  # 进度 [0, 1]
            
            if mode == "cosine":
                # 余弦退火：μ(t) = μ_min + 0.5 * (μ_max - μ_min) * (1 + cos(π * t / T))
                effective_mu = mu_min + 0.5 * (mu_max - mu_min) * \
                    (1 + math.cos(math.pi * progress))
                    
            elif mode == "linear":
                # 线性递减：μ(t) = μ_max - (μ_max - μ_min) * t / T
                effective_mu = mu_max - (mu_max - mu_min) * progress
                
            elif mode == "exponential":
                # 指数递减：μ(t) = μ_min + (μ_max - μ_min) * exp(-5 * t / T)
                effective_mu = mu_min + (mu_max - mu_min) * math.exp(-5 * progress)
        else:
            # 训练后半段：固定为最小值
            effective_mu = mu_min
    else:
        # 模式不支持或缺少参数：使用固定值
        if mode != "fix":
            print(f"[CHORD] 警告：模式 '{mode}' 需要global_step和total_steps参数，回退到固定模式")
        effective_mu = mu
            
    # 确保μ值在合理范围内
    effective_mu = max(0.0, min(1.0, effective_mu))
    
    # 计算混合损失
    # L_Hybrid = (1 - μ_eff) * L_RL + μ_eff * L_SFT
    hybrid_loss = (1.0 - effective_mu) * rl_loss + effective_mu * sft_loss
    
    return hybrid_loss, effective_mu


def aggregate_sft_loss(
    token_losses: torch.Tensor,
    response_mask: torch.Tensor,
    loss_agg_mode: str = "mean"
) -> torch.Tensor:
    """
    聚合SFT损失
    
    Args:
        token_losses: 每个token的损失 (batch_size, seq_len)
        response_mask: 响应掩码 (batch_size, seq_len)
        loss_agg_mode: 损失聚合模式 ("mean" 或 "sum")
        
    Returns:
        aggregated_loss: 聚合后的损失
    """
    masked_losses = token_losses * response_mask
    
    if loss_agg_mode == "mean":
        total_loss = masked_losses.sum()
        total_tokens = response_mask.sum() + 1e-8
        return total_loss / total_tokens
    elif loss_agg_mode == "sum":
        return masked_losses.sum()
    else:
        raise ValueError(f"不支持的损失聚合模式: {loss_agg_mode}")

