"""
Losses
"""
# pylint: disable=C0301,C0103,R0902,R0915,W0221,W0622


##
# LIBRARIES
import torch
import torch.nn.functional as F
##
def l1_loss(input, target):
    """ L1 Loss without reduce flag.

    Args:
        input (FloatTensor): Input tensor
        target (FloatTensor): Output tensor

    Returns:
        [FloatTensor]: L1 distance between input and output
    """

    return torch.mean(torch.abs(input - target))

##
def l2_loss(input, target, size_average=True):
    """ L2 Loss without reduce flag.

    Args:
        input (FloatTensor): Input tensor
        target (FloatTensor): Output tensor

    Returns:
        [FloatTensor]: L2 distance between input and output
    """
    if size_average:
        return torch.mean(torch.pow((input-target), 2))
    else:
        return torch.pow((input-target), 2)


##
def info_nce_loss(features, labels=None, temperature=0.07, device='cuda'):
    """
    InfoNCE loss for contrastive learning.

    Args:
        features (torch.Tensor): 特征向量，shape [2*B, D] 或 [B, D]
        labels (torch.Tensor, optional): 标签，用于确定正样本对。如果为None，
            则默认前半部分和后半部分是正样本对（即SimCLR风格）。
        temperature (float): 温度参数，控制分布的平滑程度
        device (str): 计算设备

    Returns:
        torch.Tensor: InfoNCE损失值
    """
    # 归一化特征，使点积结果在[-1,1]之间
    features = F.normalize(features, dim=1)

    # 计算相似度矩阵 (batch_size, batch_size)
    similarity_matrix = torch.matmul(features, features.T)

    # 创建标签掩码（确定哪些样本是正样本对）
    if labels is None:
        # SimCLR风格：假设输入是 [x1, x2, x3, x4,...]，对应的增强对是 (x1,x2), (x3,x4)...
        batch_size = features.size(0) // 2
        labels = torch.cat([torch.arange(batch_size) for _ in range(2)], dim=0)
    labels = labels.contiguous().view(-1, 1)

    # 创建掩码矩阵：相同标签为正样本（排除自身）
    mask = torch.eq(labels, labels.T).float().to(device)
    logits_mask = torch.scatter(
        torch.ones_like(mask),
        1,
        torch.arange(features.size(0)).view(-1, 1).to(device),
        0
    )
    mask = mask * logits_mask  # 排除对角线（自身）

    # 计算logits和exp_logits
    logits = similarity_matrix / temperature
    exp_logits = torch.exp(logits) * logits_mask

    # 计算InfoNCE损失
    log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))

    # 只计算正样本对的平均log概率
    mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)

    # 最终损失（取负均值）
    loss = -mean_log_prob_pos.mean()

    return loss


def semi_supervised_info_nce_loss(features, labels, normal_class=0, temperature=0.07, device='cuda'):
    """
    半监督场景下的InfoNCE损失
    - 正常样本之间互为正样本
    - 正常样本与异常样本互为负样本
    - 异常样本之间也互为负样本（可选策略）

    Args:
        features (torch.Tensor): 特征向量，shape [B, D]
        labels (torch.Tensor): 标签（0=正常，1=异常），shape [B]
        normal_class (int): 正常类别的标签值
        temperature (float): 温度参数
        device (str): 计算设备

    Returns:
        torch.Tensor: 半监督InfoNCE损失值
    """
    # 创建掩码：只有正常样本之间互为正样本
    normal_mask = (labels == normal_class)
    batch_size = features.size(0)

    # 构建正样本掩码矩阵
    mask = torch.zeros((batch_size, batch_size), dtype=torch.float32).to(device)
    for i in range(batch_size):
        if normal_mask[i]:
            for j in range(batch_size):
                if i != j and normal_mask[j]:
                    mask[i, j] = 1.0

    if mask.sum() == 0:
        return torch.tensor(0.0).to(device)

    # 归一化特征
    features = F.normalize(features, dim=1)

    # 计算相似度矩阵
    similarity_matrix = torch.matmul(features, features.T)
    logits = similarity_matrix / temperature

    # 创建负样本掩码（排除自身）
    logits_mask = torch.ones_like(mask) - torch.eye(batch_size).to(device)

    # 计算exp_logits
    exp_logits = torch.exp(logits) * logits_mask

    # 计算log概率
    log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))

    # 只计算正样本对的损失
    mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1).clamp(min=1e-6)
    loss = -mean_log_prob_pos[normal_mask].mean()

    return loss