import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

def channel_reconstruction_loss(model_output, original_channel, lambda_recon=1.0, lambda_sparse=0.1, lambda_dict=0.01):
    """
    计算探头选择和信道重构相关的损失函数
    
    Args:
        model_output: 模型输出字典，包含以下键:
            - probe_indices: 选中的探头索引 [B, seq_len, n_probes]
            - probe_weights: 对应的探头权重 [B, seq_len, n_probes]
            - channel_reconstructed: 重构的信道特征 [B, seq_len, dict_feature_dim]
            - total_probe_scores: 所有探头的分数 [B, seq_len, total_probes]
        original_channel: 原始信道特征 [B, seq_len, channel_dim]
        lambda_recon: 重构损失的权重系数
        lambda_sparse: 稀疏性损失的权重系数
        lambda_dict: 字典正则化损失权重
        
    Returns:
        total_loss: 总损失
        loss_dict: 损失分量的字典
    """
    # 提取模型输出
    channel_reconstructed = model_output['channel_reconstructed']
    probe_weights = model_output['probe_weights']
    probe_indices = model_output['probe_indices']
    total_probe_scores = model_output.get('total_probe_scores', None)
    
    # 检查重构通道和原始通道的维度是否匹配
    if channel_reconstructed.shape[-1] != original_channel.shape[-1]:
        print(f"警告: 重构通道维度({channel_reconstructed.shape[-1]})与原始通道维度({original_channel.shape[-1]})不匹配")
        
        # 获取较小的维度，用于截断
        min_dim = min(channel_reconstructed.shape[-1], original_channel.shape[-1])
        
        # 截断两个张量到相同维度
        channel_reconstructed_resized = channel_reconstructed[..., :min_dim]
        original_channel_resized = original_channel[..., :min_dim]
        
        print(f"已调整特征维度至: {min_dim}")
    else:
        channel_reconstructed_resized = channel_reconstructed
        original_channel_resized = original_channel
    
    # 1. 计算重构损失（MSE或余弦相似度）
    # 方法1: MSE损失
    mse_loss = F.mse_loss(channel_reconstructed_resized, original_channel_resized)
    
    # 方法2: 基于余弦相似度的损失
    batch_size, seq_len, _ = original_channel_resized.shape
    cos_loss = 0
    for b in range(batch_size):
        for t in range(seq_len):
            cos_sim = F.cosine_similarity(
                channel_reconstructed_resized[b, t].unsqueeze(0), 
                original_channel_resized[b, t].unsqueeze(0)
            )
            cos_loss += (1 - cos_sim)
    cos_loss = cos_loss / (batch_size * seq_len)
    
    # 2. 计算稀疏性损失（鼓励权重分布更加集中）
    # L1正则化鼓励稀疏性
    sparsity_loss = torch.mean(torch.abs(probe_weights))
    
    # 3. 额外的熵损失（使权重分布更加确定）
    entropy_loss = -torch.mean(
        torch.sum(probe_weights * torch.log(probe_weights + 1e-8), dim=-1)
    )
    
    # 4. 探头选择多样性损失（防止总是选择相同的探头）
    diversity_loss = 0
    if total_probe_scores is not None:
        # 计算选中探头之间的相似度，鼓励选择不同的探头
        for b in range(batch_size):
            for t in range(seq_len):
                # 获取当前选中的探头索引
                selected_indices = probe_indices[b, t]
                # 计算选中探头之间的均匀度
                unique_count = len(torch.unique(selected_indices))
                diversity_loss += (1.0 - unique_count / len(selected_indices))
        diversity_loss = diversity_loss / (batch_size * seq_len)
    
    # 总损失
    recon_loss = cos_loss  # 可以选择使用mse_loss或cos_loss
    total_loss = (lambda_recon * recon_loss + 
                 lambda_sparse * (sparsity_loss + entropy_loss) + 
                 lambda_dict * diversity_loss)
    
    # 返回总损失和各损失分量
    loss_dict = {
        'total_loss': total_loss,
        'recon_loss': recon_loss,
        'mse_loss': mse_loss,
        'cos_loss': cos_loss,
        'sparsity_loss': sparsity_loss,
        'entropy_loss': entropy_loss,
        'diversity_loss': diversity_loss
    }
    
    return total_loss, loss_dict

def fast_channel_reconstruction_loss(model_output, original_channel, lambda_recon=1.0, lambda_sparse=0.1, lambda_dict=0.01):
    """
    计算探头选择和信道重构相关的损失函数（向量化版本，更高效）
    
    Args:
        model_output: 模型输出字典，包含以下键:
            - probe_indices: 选中的探头索引 [B, seq_len, n_probes]
            - probe_weights: 对应的探头权重 [B, seq_len, n_probes]
            - channel_reconstructed: 重构的信道特征 [B, seq_len, dict_feature_dim]
            - total_probe_scores: 所有探头的分数 [B, seq_len, total_probes]
        original_channel: 原始信道特征 [B, seq_len, channel_dim]
        lambda_recon: 重构损失的权重系数
        lambda_sparse: 稀疏性损失的权重系数
        lambda_dict: 字典正则化损失权重
        
    Returns:
        total_loss: 总损失
        loss_dict: 损失分量的字典
    """
    # 提取模型输出
    channel_reconstructed = model_output['channel_reconstructed']
    probe_weights = model_output['probe_weights']
    probe_indices = model_output['probe_indices']
    
    # 检查重构通道和原始通道的维度是否匹配
    if channel_reconstructed.shape[-1] != original_channel.shape[-1]:
        # 获取较小的维度，用于截断
        min_dim = min(channel_reconstructed.shape[-1], original_channel.shape[-1])
        
        # 截断两个张量到相同维度
        channel_reconstructed_resized = channel_reconstructed[..., :min_dim]
        original_channel_resized = original_channel[..., :min_dim]
    else:
        channel_reconstructed_resized = channel_reconstructed
        original_channel_resized = original_channel
    
    # 1. 计算重构损失（MSE或余弦相似度）
    # 方法1: MSE损失
    mse_loss = F.mse_loss(channel_reconstructed_resized, original_channel_resized)
    
    # 方法2: 基于余弦相似度的损失（向量化版本）
    # 将张量形状调整为 [B*seq_len, min_dim]
    batch_size, seq_len, _ = original_channel_resized.shape
    recon_flat = channel_reconstructed_resized.reshape(-1, channel_reconstructed_resized.shape[-1])
    orig_flat = original_channel_resized.reshape(-1, original_channel_resized.shape[-1])
    
    # 计算余弦相似度
    cos_sim = F.cosine_similarity(recon_flat, orig_flat)
    cos_loss = torch.mean(1 - cos_sim)
    
    # 2. 计算稀疏性损失
    # L1正则化鼓励稀疏性
    sparsity_loss = torch.mean(torch.abs(probe_weights))
    
    # 3. 额外的熵损失（使权重分布更加确定）
    entropy_loss = -torch.mean(
        torch.sum(probe_weights * torch.log(probe_weights + 1e-8), dim=-1)
    )
    
    # 4. 探头选择多样性损失（向量化版本）
    # 计算每个批次中选中的探头的唯一数量
    flat_indices = probe_indices.reshape(-1, probe_indices.shape[-1])  # [B*seq_len, n_probes]
    unique_counts = torch.zeros(flat_indices.shape[0], device=flat_indices.device)
    
    for i in range(flat_indices.shape[0]):
        unique_counts[i] = len(torch.unique(flat_indices[i]))
    
    diversity_ratio = unique_counts / flat_indices.shape[1]  # 唯一探头数量与总选中数量的比率
    diversity_loss = torch.mean(1.0 - diversity_ratio)
    
    # 总损失
    recon_loss = cos_loss  # 可以选择使用mse_loss或cos_loss
    total_loss = (lambda_recon * recon_loss + 
                 lambda_sparse * (sparsity_loss + entropy_loss) + 
                 lambda_dict * diversity_loss)
    
    # 返回总损失和各损失分量
    loss_dict = {
        'total_loss': total_loss,
        'recon_loss': recon_loss,
        'mse_loss': mse_loss,
        'cos_loss': cos_loss,
        'sparsity_loss': sparsity_loss,
        'entropy_loss': entropy_loss,
        'diversity_loss': diversity_loss
    }
    
    return total_loss, loss_dict 