"""
concat后降维128，其余保持与原版一致
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import sys, time, numpy, os, subprocess, pandas, tqdm

from loss_multi import lossAV, lossA, lossV
from loss_multi import lossTime, lossSub, lossWin
from model.loconet_encoder import locoencoder

from subprocess import PIPE
import re
import swanlab
import numpy as np
from accelerate import Accelerator

accelerator = Accelerator()  #🍍 全局 Accelerator

class Loconet(nn.Module):

    def __init__(self):
        super(Loconet, self).__init__()
        self.model = locoencoder()
        self.lossAV = lossAV()
        self.lossA = lossA()
        self.lossV = lossV()
        # self.lossTime = lossTime()
        # self.lossSub = lossSub()
        # self.lossWin = lossWin()
        self.criterion = nn.CrossEntropyLoss()
    
    def talknce_loss(self, features_vis, features_aud):
        
        """
        把目标说话人的不同的口型和声音特征进行对齐
        输入：
            features_vis: [Tact, 128] 目标说话人活跃帧的 visual embedding
            features_aud: [Tact, 128] 目标说话人活跃帧的 audio embedding
        输出：
            logits: [Tact, Tact] 每行第一个是正样本，后面是负样本
            labels_nce: [Tact] 全 0，表示正样本索引
        """
        # 归一化
        features_vis = F.normalize(features_vis, dim=1)  # [T,128]
        features_aud = F.normalize(features_aud, dim=1)  # [T,128]

        # 相似度矩阵
        similarity_matrix = torch.matmul(features_vis, features_aud.T)  # [T,T]

        # 正样本: 对角线 audio-visual 配对
        positives = torch.diag(similarity_matrix).unsqueeze(1)  # [T,1]

        # 负样本: 去掉对角线
        mask = ~torch.eye(similarity_matrix.size(0), dtype=torch.bool, device=similarity_matrix.device)
        negatives = similarity_matrix[mask].view(similarity_matrix.size(0), -1)  # [T, T-1]
        
        # 拼接 [positives | negatives]
        logits = torch.cat([positives, negatives], dim=1)  # [T, T]
        logits = logits / 0.07

        # 标签：正样本的索引 | 每行的第一个（索引0）是正样本
        labels_nce = torch.zeros(logits.size(0), dtype=torch.long, device=logits.device)
        
        nlossTalknce = self.criterion(logits, labels_nce)
        
        return nlossTalknce
    
    
    def forward(self, audioFeature, visualFeature, labels, masks, valid_speaker_indices):
        """
        visualFeature: torch.Size([3, 66, 112, 112])
        audioFeature: torch.Size([1, 1, 4*66, 64])
        labels: torch.Size([1, 3, 66]) - 当前帧该人有没有说话
        masks: torch.Size([1, 3, 66]) - 当前帧该人脸有没有出现
        valid_speaker_indices: List[Tensor([...]),]
        """
        device = visualFeature.device
        b, s, t = visualFeature.shape[:3]
        visualFeature = visualFeature.view(b * s, *visualFeature.shape[2:])
        labels = labels.view(b * s, *labels.shape[2:]) # torch.Size([3, 66])
        masks = masks.view(b * s, *masks.shape[2:]) # torch.Size([3, 66])

        raw_audioEmbed = self.model.forward_audio_frontend(audioFeature) # torch.Size([1, 66, 128])
        raw_visualEmbed = self.model.forward_visual_frontend(visualFeature) # torch.Size([3, 66, 128])
        raw_audioEmbed = raw_audioEmbed.repeat(s, 1, 1) # torch.Size([1, 66, 128]) -> torch.Size([3, 66, 128])

        # -----------------talknceloss-------------------
        new_labels = labels[0].reshape((-1)) # [T=66] 取出目标说话人的 label | [0,1,1,1,0,0,0,1,....]
        tri_vis = raw_visualEmbed[0].reshape(-1,128) # [66,128] 取出目标说话人的 visual
        tri_aud = raw_audioEmbed[0].reshape(-1,128) # [66,128] 取出目标说话人的 audio

        active_index = np.where(new_labels.cpu()==1) # 目标说话人的活跃帧 [Tact<66] | eg：[0,1,1,1,0,0,0,1,....] -> Tuple(array([1,2,3,7,...]),)
        if len(active_index[0]) > 0:
            tri_vis2 = torch.stack([tri_vis[i,:] for i in active_index[0]], dim=0) # 筛选出目标说话人活跃帧的visual [Tact, 128]
            tri_aud2 = torch.stack([tri_aud[j,:] for j in active_index[0]], dim=0) # 筛选出目标说话人活跃帧的audio [Tact, 128]
            nlossTalknce = self.talknce_loss(tri_vis2, tri_aud2)
        else:
            nlossTalknce=torch.tensor(0.0, device=device, requires_grad=False)
        # -----------------------------------------------
        
        # 跨模态交叉注意力
        audioEmbed, visualEmbed = self.model.forward_cross_attention(raw_audioEmbed, raw_visualEmbed) # torch.Size([3, T, 128]),torch.Size([3, T, 128])
        
        outsAV, ts_gate = self.model.forward_audio_visual_backend(audioEmbed, visualEmbed, b, s) # swanlab监控门控
        outsA = self.model.forward_audio_backend(audioEmbed) # torch.Size([198, 128])
        outsV = self.model.forward_visual_backend(visualEmbed) # torch.Size([198, 128])

        # 分类损失
        labels = labels.reshape((-1)) # torch.Size([198])
        masks = masks.reshape((-1)) # torch.Size([198])
        sumlossAV, _, _, localCorrectNum = self.lossAV.forward(outsAV, labels, masks) # 标量
        sumlossA = self.lossA.forward(outsA, labels, masks) # 标量
        sumlossV = self.lossV.forward(outsV, labels, masks) # 标量
        num_valid = masks.sum().float() # 有效帧数
        
        return sumlossA, sumlossV, sumlossAV, nlossTalknce, ts_gate, localCorrectNum, num_valid

def train_network(model, loader, optimizer, scheduler, epoch):
    model.train()
    scheduler.step(epoch - 1)

    index, top1, loss = 0, 0, 0
    lossA, lossV, lossAV = 0, 0, 0
    lossTalknce, lossSpknce = 0, 0
    lossTime, lossSub, lossWin = 0, 0, 0
    lr = optimizer.param_groups[0]['lr']

    
    epoch_loss_sum = 0 # epoch指标
    epoch_correct = 0
    epoch_frames = 0
    epoch_batches = 0
    pbar = tqdm.tqdm(enumerate(loader, start=1), total=len(loader), disable=not accelerator.is_main_process) # 🍄
    for batch_idx, (audioFeature, visualFeature, labels, masks, valid_speaker_indices) in pbar: # xjh add valid_speaker_indices

        sumlossA, sumlossV, sumlossAV, nlossTalknce, ts_gate, localCorrectNum, num_valid = model(
            audioFeature, visualFeature, labels, masks, valid_speaker_indices # xjh
        )

        # 保持 sumloss* 在本地（有梯度），只用全局 denom 归一化
        total_num_valid = accelerator.reduce(num_valid, reduction="sum")
        denom = total_num_valid + 1e-6
        nlossAV = sumlossAV / denom
        nlossA  = sumlossA  / denom
        nlossV  = sumlossV  / denom

        # 组合总 loss
        nloss = nlossAV + 0.4 * nlossA + 0.4 * nlossV + 0.3 * nlossTalknce
        
        # 梯度回传
        optimizer.zero_grad()
        accelerator.backward(nloss)
        optimizer.step()
        
        # ---------- 日志部分 ----------
        # 全局 loss：reduce 本地 sumloss*，然后 / 全局 denom
        global_lossAV = accelerator.reduce(sumlossAV, reduction="sum") / denom
        global_lossA  = accelerator.reduce(sumlossA,  reduction="sum") / denom
        global_lossV  = accelerator.reduce(sumlossV,  reduction="sum") / denom
        global_loss = global_lossAV + 0.4 * global_lossA + 0.4 * global_lossV + 0.3 * nlossTalknce
        # localCorrectNum 表示本卡正确预测数量（标量）
        total_correct = accelerator.reduce(localCorrectNum, reduction="sum")
        total_frames  = total_num_valid  # 已 reduce 得到全局有效帧数

        if accelerator.is_main_process:
            # batch-level值：由于我们用 reduce 得到全局值，这里直接用 nloss.item()（标量）
            epoch_loss_sum += global_loss.item()
            epoch_batches += 1
            # 累计准确率统计
            epoch_correct += total_correct.item()
            epoch_frames += int(total_frames.item())
            # tqdm 显示
            pbar.set_postfix(dict(epoch=epoch, lr=optimizer.param_groups[0]['lr'],
                                  acc=(epoch_correct / max(1, epoch_frames))))
            # 记录到 swanlab（只在主进程）
            swanlab.log(
                {
                    "batch_loss": global_loss.item(),
                    "batch_lossA": global_lossA.item(),
                    "batch_lossV": global_lossV.item(),
                    "batch_lossAV": global_lossAV.item(),
                    "batch_lossTalkNce": nlossTalknce.item() if isinstance(nlossTalknce, torch.Tensor) else float(nlossTalknce),
                    "batch_acc": (epoch_correct / max(1, epoch_frames)),
                    "lr": optimizer.param_groups[0]['lr'],
                    "ts_gate": ts_gate,
                },
                step=batch_idx + (epoch - 1) * len(loader)
            )

    # epoch-level logging（主进程）
    if accelerator.is_main_process:
        swanlab.log(
            {
                "epoch_loss": epoch_loss_sum / max(1, epoch_batches),
                "epoch_acc": epoch_correct / max(1, epoch_frames),
                "epoch_lr": optimizer.param_groups[0]['lr']
            },
            step=epoch
        )
        return epoch_loss_sum / max(1, epoch_batches), optimizer.param_groups[0]['lr']
    else:
        return None, None


def evaluate_network(model, loader, epoch):
    model.eval()
    predScores = []
    workspace = './save/workspace1'
    os.makedirs(workspace, exist_ok=True)
    evalCsvSave = os.path.join(workspace, "{}_res.csv".format(epoch))
    evalOrig = "../../data/AVADataPath/csv/val_orig.csv"
    
    pbar = tqdm.tqdm(loader, disable=not accelerator.is_main_process)
    for audioFeature, visualFeature, labels, masks in pbar:
        with torch.no_grad():
            b, s, t = visualFeature.shape[0], visualFeature.shape[1], visualFeature.shape[2]
            visualFeature = visualFeature.view(b * s, *visualFeature.shape[2:])
            labels = labels.view(b * s, *labels.shape[2:])
            masks = masks.view(b * s, *masks.shape[2:])

            audioEmbed = model.model.forward_audio_frontend(audioFeature)
            visualEmbed = model.model.forward_visual_frontend(visualFeature)
            
            audioEmbed = audioEmbed.repeat(s, 1, 1)
            
            audioEmbed, visualEmbed = model.model.forward_cross_attention(audioEmbed, visualEmbed)

            outsAV, *_ = model.model.forward_audio_visual_backend(audioEmbed, visualEmbed, b, s)

            labels = labels.reshape((-1))
            masks = masks.reshape((-1))
            outsAV = outsAV.view(b, s, t, -1)[:, 0, :, :].view(b * t, -1)
            labels = labels.view(b, s, t)[:, 0, :].view(b * t)
            masks = masks.view(b, s, t)[:, 0, :].view(b * t)

            _, predScore, _, _ = model.lossAV.forward(outsAV, labels, masks)
            
            batch_pred = predScore[:, 1].detach()
            
            # ---------------- 关键处理：pad + gather ----------------
            # 每个进程当前 batch 的真实长度
            local_len = torch.tensor([batch_pred.shape[0]], device=batch_pred.device)

            # 所有进程的长度
            all_lens = accelerator.gather(local_len)
            max_len = all_lens.max().item()

            # pad 到 max_len
            pad_len = max_len - batch_pred.shape[0]
            if pad_len > 0:
                batch_pred = torch.cat([batch_pred, torch.zeros(pad_len, device=batch_pred.device)], dim=0)

            # gather 所有进程的 batch_pred 和 local_len
            gathered = accelerator.gather(batch_pred)
            all_lens = accelerator.gather(local_len)

            if accelerator.is_main_process:
                # 按各进程长度裁剪，去掉 padding
                offset = 0
                for L in all_lens.tolist():
                    predScores.append(gathered[offset:offset+L].cpu())
                    offset += max_len
            accelerator.wait_for_everyone()
            
    if accelerator.is_main_process:
        predScores = torch.cat(predScores).cpu().numpy()  # 裁剪对齐
    else:
        predScores = []
    
    # 🍄 只在主进程做指标计算（计算测试集全集，需要聚合每个gpu分片结果）
    if accelerator.is_main_process:
        print(f"正在主进程上进行最终的指标计算.....")
        evalRes = pandas.read_csv(evalOrig) # len=768307
        
        # assert len(predScores) == len(evalRes), f">>> ERROR: len(predScores)={len(predScores)} -- don't match -- len(evalRes)={len(evalRes)}" # 确保长度一致
        # evalRes['score'] = pandas.Series(predScores)
        evalRes['score'] = pandas.Series(predScores[:len(evalRes)]) # 当batch数不够gpu_num整除时，accelerate会复制最后一个batch给未拿到batch的gpu作为填充，因此此处直接截断即可
        evalRes['label'] = pandas.Series(['SPEAKING_AUDIBLE'] * len(evalRes))
        evalRes.drop(['label_id', 'instance_id'], axis=1, inplace=True, errors='ignore')
        evalRes.to_csv(evalCsvSave, index=False)

        cmd = "python -O ./utils/get_ava_active_speaker_performance.py -g %s -p %s " % (evalOrig, evalCsvSave)
        process = subprocess.run(cmd, shell=True, stdout=PIPE, stderr=PIPE)
        stdout = process.stdout.decode('utf-8')
        stderr = process.stderr.decode('utf-8')

        print("命令标准输出:")
        print(stdout)
        print("命令错误输出:")
        print(stderr)

        if process.returncode != 0:
            print(f"命令执行失败，返回码: {process.returncode}")
            mAP = 0
        else:
            match = re.search(r'average precision:\s*([\d\.]+)%', stdout)
            if match:
                mAP = float(match.group(1))
            else:
                print("输出中未找到Overall Average Precision")
                mAP = 0
    else:
        mAP = 0 # 🍄 非主进程返回 0 占位
    
    return mAP


    # --------------其他的工具函数--------------
    def saveParameters(self, path):
        torch.save(self.state_dict(), path)

    def loadParameters(self, path):
        selfState = self.state_dict()
        loadedState = torch.load(path, map_location='cpu')
        if self.rank != None:
            info = self.load_state_dict(loadedState)
        else:
            new_state = {}

            for k, v in loadedState.items():
                new_state[k.replace("model.module.", "")] = v
            info = self.load_state_dict(new_state, strict=False)
        print(info)