import torch
import torch.nn as nn
import torch.nn.functional as F
def entropy(input_):
    bs = input_.size(0)
    epsilon = 1e-10
    entropy = -input_ * torch.log2(input_ + epsilon)
    entropy = torch.sum(entropy, dim=1)
    return entropy 


def im(outputs_test, gent=True):
    epsilon = 1e-10
    softmax_out = nn.Softmax(dim=1)(outputs_test)
    entropy_loss = torch.mean(entropy(softmax_out))
    if gent:
        msoftmax = softmax_out.mean(dim=0)
        gentropy_loss = torch.sum(-msoftmax * torch.log2(msoftmax + epsilon))
        entropy_loss -= gentropy_loss
    im_loss = entropy_loss * 1.0
    return im_loss

def entropy_(probs: torch.Tensor, eps: float = 1e-10) -> torch.Tensor:
    """计算概率分布的信息熵（自然对数，单位：纳特）
    
    Args:
        probs: 概率分布张量，形状为 [batch_size, num_classes]
        eps: 数值稳定性常数, 防止log(0)
    
    Returns:
        每个样本的熵，形状为 [batch_size]
    """
    clamped_probs = torch.clamp(probs, min=eps, max=1.0)  # 防止概率为0或1导致log溢出
    entropy = -clamped_probs * torch.log(clamped_probs)    # 使用自然对数
    return torch.sum(entropy, dim=1)

def information_maximization(
    logits: torch.Tensor, 
    use_gentropy: bool = True,
    eps: float = 1e-10
) -> torch.Tensor:
    """信息最大化损失(IM Loss),优化目标域预测的类内紧凑性和类间分离性
    
    Args:
        logits: 模型输出的logits,形状为 [batch_size, num_classes]
        use_gentropy: 是否使用全局熵约束（防止类别坍缩）
        eps: 数值稳定性常数
    
    Returns:
        IM损失值(标量)
    """
    probs = nn.functional.softmax(logits, dim=1)
    entropy_loss = torch.mean(entropy_(probs, eps))  # 平均样本熵
    
    if use_gentropy:
        # 计算全局熵：平均预测概率的熵
        m_probs = torch.mean(probs, dim=0)  # 形状 [num_classes]
        m_probs = torch.clamp(m_probs, min=eps, max=1.0)
        global_entropy = -torch.sum(m_probs * torch.log(m_probs))
        entropy_loss -= global_entropy  # 优化方向：降低全局熵（防止预测坍缩到少数类）
    
    return entropy_loss

# 高斯噪声增强层
class GaussianNoise(nn.Module):
    def __init__(self, sigma=0.1):
        super().__init__()
        self.sigma = sigma

    def forward(self, x):
        if self.training and self.sigma > 0:
            noise = torch.randn_like(x) * self.sigma
            return x + noise
        return x
