import torch
import torch.nn as nn
import torch.nn.functional as F


class SpeakerContrastiveLoss(nn.Module):
    def __init__(self, temperature=0.07):
        super(SpeakerContrastiveLoss, self).__init__()
        self.tau = temperature

    def forward(self, audioEmbed, visualEmbed, masks=None):
        """
        audioEmbed: [S, T, 128]
        visualEmbed: [S, T, 128]
        masks: [S, T] (0/1, 表示该帧是否有效)
        """
        # 先对时间维度做平均池化，得到每个说话人的 speaker embedding
        if masks is not None:
            masks = masks.unsqueeze(-1)  # [S, T, 1]
            a_emb = (audioEmbed * masks).sum(dim=1) / (masks.sum(dim=1) + 1e-6)
            v_emb = (visualEmbed * masks).sum(dim=1) / (masks.sum(dim=1) + 1e-6)
        else:
            a_emb = audioEmbed.mean(dim=1)  # [S, 128]
            v_emb = visualEmbed.mean(dim=1) # [S, 128]

        # 归一化
        a_emb = F.normalize(a_emb, dim=-1)  # [S, 128]
        v_emb = F.normalize(v_emb, dim=-1)  # [S, 128]

        # 相似度矩阵
        logits = torch.matmul(a_emb, v_emb.t()) / self.tau  # [S, S]
        targets = torch.arange(a_emb.size(0), device=a_emb.device)

        # 双向 InfoNCE (audio→visual + visual→audio)
        loss_a2v = F.cross_entropy(logits, targets)
        loss_v2a = F.cross_entropy(logits.t(), targets)
        loss = (loss_a2v + loss_v2a) / 2
        return loss
    
class TimeContrastiveLoss(nn.Module):
    def __init__(self, temperature=1.0):
        super(TimeContrastiveLoss, self).__init__()
        self.tau = temperature

    def forward(self, audioEmbed, visualEmbed, labels, masks):
        """
        audioEmbed: [S, T, 128]
        visualEmbed: [S, T, 128]
        labels: [S, T] (0/1, 表示该 speaker 在该帧是否说话)
        masks:  [S, T] (0/1, 表示该帧是否有效)
        """
        # 取出 active speaking 的时间片段
        active_mask = (labels == 1) * masks   # [S, T]

        losses = []
        for s in range(audioEmbed.shape[0]):   # 遍历每个说话人
            act_idx = torch.nonzero(active_mask[s], as_tuple=True)[0]  # 活跃帧索引
            if len(act_idx) < 2:   # 少于两个有效帧，跳过
                continue

            a_emb = audioEmbed[s, act_idx]   # [Tact, 128]
            v_emb = visualEmbed[s, act_idx] # [Tact, 128]

            # 归一化
            a_emb = F.normalize(a_emb, dim=-1)  # [Tact, 128]
            v_emb = F.normalize(v_emb, dim=-1)  # [Tact, 128]

            # 相似度矩阵 s[i,j] = v_i · a_j
            sim_matrix = torch.matmul(v_emb, a_emb.t()) / self.tau   # [Tact, Tact]

            # 构造标签：每行的正确匹配是对角线 (i=j)
            targets = torch.arange(len(act_idx), device=audioEmbed.device)

            # CrossEntropyLoss 相当于 InfoNCE
            loss = F.cross_entropy(sim_matrix, targets)
            losses.append(loss)

        if len(losses) == 0:
            return torch.tensor(0.0, device=audioEmbed.device)
        else:
            return torch.mean(torch.stack(losses))
