import torch
import torch.nn as nn
import torch.nn.functional as F
import sys, time, numpy, os, subprocess, pandas, tqdm

from loss_multi import lossAV, lossA, lossV
from model.loconet_encoder import locoencoder

from subprocess import PIPE
import re
import swanlab
import numpy as np

class Loconet(nn.Module):

    def __init__(self):
        super(Loconet, self).__init__()
        print("@@@@ this is Loconet with 2loss")
        self.model = locoencoder()
        self.lossAV = lossAV()
        self.lossA = lossA()
        self.lossV = lossV()
        self.criterion = nn.CrossEntropyLoss()
    
    def talknce_loss(self, features_vis, features_aud):
        """
        把目标说话人的不同的口型和声音特征进行对齐
        输入：
            features_vis: [Tact, 128] 目标说话人活跃帧的 visual embedding
            features_aud: [Tact, 128] 目标说话人活跃帧的 audio embedding
        输出：
            logits: [Tact, Tact] 每行第一个是正样本，后面是负样本
            labels_nce: [Tact] 全 0，表示正样本索引
        """
        # 计算余弦相似度
        features_vis = F.normalize(features_vis, dim=1)  # [T,128]
        features_aud = F.normalize(features_aud, dim=1)  # [T,128]
        similarity_matrix = torch.matmul(features_vis, features_aud.T)  # [T,T]

        # 正样本掩码，只取对角线
        T = features_vis.shape[0]
        mask = torch.eye(T, dtype=torch.bool).to(features_vis.device)  # [T,T]

        # 拼接正负样本的预测结果
        positives = similarity_matrix[mask].view(T, 1)  # [T,1] # 正样本
        negatives = similarity_matrix[~mask].view(T, T-1)  # [T, T-1] # 负样本
        logits = torch.cat([positives, negatives], dim=1)  # [T, T]

        # 标签：正样本的索引 | 每行的第一个（索引0）是正样本
        labels_nce = torch.zeros(T, dtype=torch.long).to(features_vis.device)  # [T]

        logits = logits / 0.07
        
        nlossTalknce = self.criterion(logits, labels_nce)
        
        return nlossTalknce
    
    
    def spknce_loss(self, audioEmbed, visualEmbed, valid_speaker_indices):
        """
        跨说话人的音色-人脸对齐损失
        输入：
            audioEmbed: [S, T, 128] 说话人音频 embedding
            visualEmbed: [S, T, 128] 说话人图像 embedding
            valid_speaker_indices: Tensor([...]) 有效说话人的索引
        输出：
            logits: [N, N] N=有效说话人数
            labels: [N] 交叉熵损失用
        """
        
        # import pdb
        # pdb.set_trace()
        
        device = audioEmbed.device

        # 提取有效说话人
        valid_speakers = valid_speaker_indices.tolist()
        if len(valid_speakers) < 2:
            return torch.tensor(0.0, device=device)
        real_audioEmbed = audioEmbed[valid_speakers]
        real_visualEmbed = visualEmbed[valid_speakers]
        
        # 时间维度 pooling
        aud_pool = real_audioEmbed.mean(dim=1)  # [N,128]
        vis_pool = real_visualEmbed.mean(dim=1) # [N,128]

        # 对比学习 infonce
        aud_pool = F.normalize(aud_pool, dim=1)  # [N,128]
        vis_pool = F.normalize(vis_pool, dim=1)  # [N,128]
        
        # 计算相似度矩阵
        sim_matrix = torch.matmul(aud_pool, vis_pool.T)  # [N,N]
        
        N = sim_matrix.shape[0]
        mask = torch.eye(N, dtype=torch.bool).to(device) # 提取正样本的logits
        labels = torch.zeros(N, dtype=torch.long).to(device)

        # audio → visual
        positives = sim_matrix[mask].view(N, 1)     # [N,1]
        negatives = sim_matrix[~mask].view(N, N-1)  # [N,N-1]
        logits_av = torch.cat([positives, negatives], dim=1) / 0.07
        loss_av = self.criterion(logits_av, labels)

        # visual → audio
        sim_matrix_T = sim_matrix.T
        positives = sim_matrix_T[mask].view(N, 1)
        negatives = sim_matrix_T[~mask].view(N, N-1)
        logits_va = torch.cat([positives, negatives], dim=1) / 0.07
        loss_va = self.criterion(logits_va, labels)

        nlossSpknce = 0.5 * (loss_av + loss_va)

        return nlossSpknce
    
    def forward(self, audioFeature, visualFeature, labels, masks, valid_speaker_indices):
        """
        visualFeature: torch.Size([3, 66, 112, 112])
        audioFeature: torch.Size([1, 1, 4*66, 64])
        labels: torch.Size([1, 3, 66])
        masks: torch.Size([1, 3, 66])
        valid_speaker_indices: List[Tensor([...]),]
        """
        b, s, t = visualFeature.shape[:3]
        visualFeature = visualFeature.view(b * s, *visualFeature.shape[2:])
        labels = labels.view(b * s, *labels.shape[2:]) # torch.Size([3, 66])
        masks = masks.view(b * s, *masks.shape[2:]) # torch.Size([3, 66])

        raw_audioEmbed = self.model.forward_audio_frontend(audioFeature) # torch.Size([1, 66, 128])
        raw_visualEmbed = self.model.forward_visual_frontend(visualFeature) # torch.Size([3, 66, 128])
        raw_audioEmbed = raw_audioEmbed.repeat(s, 1, 1) # torch.Size([1, 66, 128]) -> torch.Size([3, 66, 128])

        # -----------------talknceloss-------------------
        new_labels = labels[0].reshape((-1)) # [T=66] 取出目标说话人的 label | [0,1,1,1,0,0,0,1,....]
        tri_vis = raw_visualEmbed[0].reshape(-1,128) # [66,128] 取出目标说话人的 visual
        tri_aud = raw_audioEmbed[0].reshape(-1,128) # [66,128] 取出目标说话人的 audio

        active_index = np.where(new_labels.cpu()==1) # 目标说话人的活跃帧 [Tact<66] | eg：[0,1,1,1,0,0,0,1,....] -> Tuple(array([1,2,3,7,...]),)
        if len(active_index[0]) > 0:
            tri_vis2 = torch.stack([tri_vis[i,:] for i in active_index[0]], dim=0) # 筛选出目标说话人活跃帧的visual [Tact, 128]
            tri_aud2 = torch.stack([tri_aud[j,:] for j in active_index[0]], dim=0) # 筛选出目标说话人活跃帧的audio [Tact, 128]
            nce_label = torch.ones_like(torch.Tensor(active_index[0])).to('cuda') # [Tact<66] 全 1 的向量 | [1,1,1,1,...] | active_index[0] -> array([1,2,3,7,...])
            nlossTalknce = self.talknce_loss(tri_vis2, tri_aud2)
        else:
            nlossTalknce=torch.tensor(0.0, device=tri_vis.device)
        # -----------------------------------------------
        
        # 跨模态交叉注意力
        audioEmbed, visualEmbed = self.model.forward_cross_attention(raw_audioEmbed, raw_visualEmbed) # torch.Size([3, T, 128]),torch.Size([3, T, 128])
        
        # -----------------spknce_loss-------------------
        nlossSpknce = self.spknce_loss(audioEmbed, visualEmbed, valid_speaker_indices[0])
        #  -----------------------------------------------
        
        outsAV = self.model.forward_audio_visual_backend(audioEmbed, visualEmbed, b, s) # orch.Size([198, 256])
        outsA = self.model.forward_audio_backend(audioEmbed) # torch.Size([198, 128])
        outsV = self.model.forward_visual_backend(visualEmbed) # torch.Size([198, 128])

        # 分类损失
        labels = labels.reshape((-1)) # torch.Size([198])
        masks = masks.reshape((-1)) # torch.Size([198])
        nlossAV, _, _, prec = self.lossAV.forward(outsAV, labels, masks) # 标量
        nlossA = self.lossA.forward(outsA, labels, masks) # 标量
        nlossV = self.lossV.forward(outsV, labels, masks) # 标量
        
        # nloss = nlossAV + 0.4 * nlossA + 0.4 * nlossV + 0.3 * nlossTalknce # nlossTalknce
        nloss = nlossAV + 0.4 * nlossA + 0.4 * nlossV + 0.15 * nlossSpknce # nlossSpknce

        num_frames = masks.sum()
        return nloss,nlossA,nlossV,nlossAV,nlossTalknce,nlossSpknce,prec,num_frames


def train_network(model, loader, optimizer, scheduler, device, epoch):
    model.train()
    scheduler.step(epoch - 1)

    index, top1, loss = 0, 0, 0
    lossA, lossV, lossAV = 0, 0, 0
    lossTalknce, lossSpknce = 0, 0
    lr = optimizer.param_groups[0]['lr']

    pbar = tqdm.tqdm(enumerate(loader, start=1), total=len(loader))

    for num, (audioFeature, visualFeature, labels, masks, valid_speaker_indices) in pbar: # xjh add valid_speaker_indices
        audioFeature = audioFeature.to(device)
        visualFeature = visualFeature.to(device)
        labels = labels.to(device)
        masks = masks.to(device)
        valid_speaker_indices = [vsi.to(device) for vsi in valid_speaker_indices] # xjh

        nloss,nlossA,nlossV,nlossAV,nlossTalknce,nlossSpknce,prec,num_frames = model(
            audioFeature, visualFeature, labels, masks, valid_speaker_indices # xjh
        )

        optimizer.zero_grad()
        nloss.backward()
        optimizer.step()

        top1 += prec.detach().cpu().numpy()
        lossA += nlossA.detach().cpu().numpy()
        lossV += nlossV.detach().cpu().numpy()
        lossAV += nlossAV.detach().cpu().numpy()
        loss += nloss.detach().cpu().numpy()
        index += int(num_frames.detach().cpu().item())
        lossTalknce += nlossTalknce # copy from TALKNCE
        lossSpknce += nlossSpknce # add by xjh
        
        # 更新tqdm
        pbar.set_postfix(dict(epoch=epoch,
                              lr=lr,
                              lossA=lossA / num,
                              lossV=lossV / num,
                              lossAV=lossAV / num,
                              loss=loss / num,
                              acc=(top1 / index)))
        # SwanLab 每个 batch 记录
        swanlab.log(
            {
                "batch_loss": nloss.item(),
                "batch_lossA": nlossA.item(),
                "batch_lossV": nlossV.item(),
                "batch_lossAV": nlossAV.item(),
                "batch_lossTalkNce": nlossTalknce.item(), 
                "batch_lossSpknce": nlossSpknce.item(), 
                "batch_acc": top1 / index,
                "lr": lr
            },
            step=num + (epoch - 1) * len(loader)
        )
        
    # SwanLab 每个 epoch 记录
    swanlab.log(
        {
            "epoch_loss": loss / num,
            "epoch_lossA": lossA / num,
            "epoch_lossV": lossV / num,
            "epoch_lossAV": lossAV / num,
            "epoch_lossTalknce": lossTalknce / num,
            "epoch_lossSpknce": lossSpknce / num,
            "epoch_acc": top1 / index,
            "epoch_lr": lr
        },
        step=epoch
    )
        
    return loss / num, lr


def evaluate_network(model, loader, device, epoch):
    model.eval()
    predScores = []
    workspace = './save/workspace1'
    os.makedirs(workspace, exist_ok=True)
    evalCsvSave = os.path.join(workspace, "{}_res.csv".format(epoch))
    evalOrig = "../../data/AVADataPath/csv/val_orig.csv"
    
    all_scores = []
    for audioFeature, visualFeature, labels, masks in tqdm.tqdm(loader):
        with torch.no_grad():
            audioFeature = audioFeature.to(device)
            visualFeature = visualFeature.to(device)
            labels = labels.to(device)
            masks = masks.to(device)

            b, s, t = visualFeature.shape[0], visualFeature.shape[1], visualFeature.shape[2]
            visualFeature = visualFeature.view(b * s, *visualFeature.shape[2:])
            labels = labels.view(b * s, *labels.shape[2:])
            masks = masks.view(b * s, *masks.shape[2:])

            audioEmbed = model.model.forward_audio_frontend(audioFeature)
            visualEmbed = model.model.forward_visual_frontend(visualFeature)
            
            # audioEmbed = audioEmbed.repeat(s, 1, 1)
            audioEmbed = audioEmbed.expand(s, -1, -1)
            
            audioEmbed, visualEmbed = model.model.forward_cross_attention(audioEmbed, visualEmbed)

            outsAV = model.model.forward_audio_visual_backend(audioEmbed, visualEmbed, b, s)

            labels = labels.reshape((-1))
            masks = masks.reshape((-1))
            outsAV = outsAV.view(b, s, t, -1)[:, 0, :, :].view(b * t, -1)
            labels = labels.view(b, s, t)[:, 0, :].view(b * t)
            masks = masks.view(b, s, t)[:, 0, :].view(b * t)

            _, predScore, _, _ = model.lossAV.forward(outsAV, labels, masks)
            all_scores.append(predScore[:, 1].detach().cpu())
            # predScore = predScore[:, 1].detach().cpu().numpy()
            # predScores.extend(predScore)
    
    predScores = torch.cat(all_scores).numpy()
    
    evalRes = pandas.read_csv(evalOrig)
    evalRes['score'] = pandas.Series(predScores)
    evalRes['label'] = pandas.Series(['SPEAKING_AUDIBLE'] * len(evalRes))
    evalRes.drop(['label_id', 'instance_id'], axis=1, inplace=True, errors='ignore')
    evalRes.to_csv(evalCsvSave, index=False)

    cmd = "python -O ./utils/get_ava_active_speaker_performance.py -g %s -p %s " % (evalOrig, evalCsvSave)
    process = subprocess.run(cmd, shell=True, stdout=PIPE, stderr=PIPE)
    stdout = process.stdout.decode('utf-8')
    stderr = process.stderr.decode('utf-8')

    print("命令标准输出:")
    print(stdout)
    print("命令错误输出:")
    print(stderr)

    if process.returncode != 0:
        print(f"命令执行失败，返回码: {process.returncode}")
        mAP = 0
    else:
        match = re.search(r'average precision:\s*([\d\.]+)%', stdout)
        if match:
            mAP = float(match.group(1))
        else:
            print("输出中未找到Overall Average Precision")
            mAP = 0
    return mAP

    def saveParameters(self, path):
        torch.save(self.state_dict(), path)

    def loadParameters(self, path):
        selfState = self.state_dict()
        loadedState = torch.load(path, map_location='cpu')
        if self.rank != None:
            info = self.load_state_dict(loadedState)
        else:
            new_state = {}

            for k, v in loadedState.items():
                new_state[k.replace("model.module.", "")] = v
            info = self.load_state_dict(new_state, strict=False)
        print(info)