"""
原版复现
"""

import time, os, torch, argparse, warnings, glob, pandas, json

from utils.tools import *
from loconet_withloss import Loconet, train_network, evaluate_network
from dataLoader_multiperson import train_loader, val_loader
import swanlab
from load_data import load_entity_data
from eval import ValDataPrep
# 🍄 使用 accelerate
from accelerate import Accelerator  # 🍄
import shutil

modelSavePath = './save/exps/4G-2Track-concat128-Raw'
MAX_EPOCH = 30

# SEED = 20250901
SEED = 20210617 # 原版

class TrainCollator(object):

    def __call__(self, data):
        audiofeatures = [item[0] for item in data]
        visualfeatures = [item[1] for item in data]
        labels = [item[2] for item in data]
        masks = [item[3] for item in data]
        valid_speaker_indices = [item[4] for item in data]  # xjh
        
        cut_limit = 200
        # pad audio
        lengths = torch.tensor([t.shape[1] for t in audiofeatures])
        max_len = max(lengths)
        padded_audio = torch.stack([
            torch.cat([i, i.new_zeros((i.shape[0], max_len - i.shape[1], i.shape[2]))], 1)
            for i in audiofeatures
        ], 0)

        if max_len > cut_limit * 4:
            padded_audio = padded_audio[:, :, :cut_limit * 4, ...]

        # pad video
        lengths = torch.tensor([t.shape[1] for t in visualfeatures])
        max_len = max(lengths)
        padded_video = torch.stack([
            torch.cat(
                [i, i.new_zeros((i.shape[0], max_len - i.shape[1], i.shape[2], i.shape[3]))], 1)
            for i in visualfeatures
        ], 0)
        padded_labels = torch.stack(
            [torch.cat([i, i.new_zeros((i.shape[0], max_len - i.shape[1]))], 1) for i in labels], 0)
        padded_masks = torch.stack(
            [torch.cat([i, i.new_zeros((i.shape[0], max_len - i.shape[1]))], 1) for i in masks], 0)

        if max_len > cut_limit:
            padded_video = padded_video[:, :, :cut_limit, ...]
            padded_labels = padded_labels[:, :, :cut_limit, ...]
            padded_masks = padded_masks[:, :, :cut_limit, ...]

        valid_speaker_indices = [torch.tensor(v, dtype=torch.long) for v in valid_speaker_indices]
        
        # return padded_audio, padded_video, padded_labels, padded_masks
        return padded_audio, padded_video, padded_labels, padded_masks, valid_speaker_indices


class TrainDataPrep:
    def __init__(self, entity_data, ts_to_entity):
        self.entity_data = entity_data
        self.ts_to_entity = ts_to_entity

    def train_dataloader(self):
        loader = train_loader(
            trialFileName="../../data/AVADataPath/csv/train_loader.csv",
            audioPath="../../data/AVADataPath/clips_audios/train",
            visualPath="../../data/AVADataPath/clips_videos/train",
            entity_data=self.entity_data,
            ts_to_entity=self.ts_to_entity,
            num_speakers=3,
        )

        collator = TrainCollator()
        trainLoader = torch.utils.data.DataLoader(
            loader,
            batch_size=1, # 🍄 每个设备/进程上的 batch_siz
            pin_memory=False,
            num_workers=6,
            collate_fn=collator,
            shuffle=True 
        )
        return trainLoader

    
def load_checkpoint(model, optimizer, scheduler, ckpt_path, accelerator):
    """
    加载 checkpoint，并自动处理 DDP 前缀问题。

    Args:
        model: 已经 accelerator.prepare 过的模型
        optimizer: 优化器
        scheduler: 学习率调度器
        ckpt_path: checkpoint 路径
        accelerator: HuggingFace Accelerator 实例

    Returns:
        start_epoch (int): 从哪个 epoch 开始继续
        best_mAP (float): 历史最好 mAP
        run_id (str | None): run_id（如果存在的话）
    """
    start_epoch = 1
    best_mAP = 0.0
    run_id = None

    if os.path.exists(ckpt_path):
        checkpoint = torch.load(ckpt_path, map_location=accelerator.device)

        # 🍈 unwrap_model，避免加载到 DDP 包裹上
        raw_model = accelerator.unwrap_model(model)
        state_dict = checkpoint["model_state_dict"]

        # 判断是否有 "module." 前缀
        if any(k.startswith("module.") for k in state_dict.keys()):
            state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}

        # 加载模型权重
        missing, unexpected = raw_model.load_state_dict(state_dict, strict=False)
        if accelerator.is_main_process:
            if missing:
                accelerator.print(f"⚠️ Missing keys: {missing}")
            if unexpected:
                accelerator.print(f"⚠️ Unexpected keys: {unexpected}")

        # 🍈 优化器 & scheduler
        if "optimizer_state_dict" in checkpoint:
            optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        if "scheduler_state_dict" in checkpoint:
            scheduler.load_state_dict(checkpoint["scheduler_state_dict"])

        start_epoch = checkpoint.get("epoch", 0) + 1
        best_mAP = checkpoint.get("best_mAP", 0.0)

        # 额外 run_id 文件（可选）
        run_id_file = os.path.join(os.path.dirname(ckpt_path), "swanlab_runid.txt")
        if os.path.exists(run_id_file):
            with open(run_id_file, "r") as f:
                run_id = f.read().strip()

        if accelerator.is_main_process:
            accelerator.print(f"✅ Resumed from {ckpt_path} (epoch {start_epoch-1})")
    
    else: # 不存在last.ckpt
        accelerator.print(">>> ❌ No checkpoint found, training from scratch 🚀")

    return start_epoch, best_mAP, run_id


def main(train_entity_data, train_ts_to_entity, val_entity_data, val_ts_to_entity, accelerator):
    make_deterministic(seed=SEED)
    warnings.filterwarnings("ignore")

    device = accelerator.device  # 🍄
    accelerator.print("Using device:", device)  # 🍄

    # -------------------
    # 训练数据
    # -------------------
    train_data = TrainDataPrep(train_entity_data, train_ts_to_entity)
    trainLoader = train_data.train_dataloader()
    
    # -------------------
    # 测试数据
    # -------------------
    val_data = ValDataPrep(val_entity_data, val_ts_to_entity)
    valLoader = val_data.val_dataloader()

    # -------------------
    # 模型 & 优化器
    # -------------------
    os.makedirs(modelSavePath, exist_ok=True)
    model = Loconet()

    optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
    
    # 🍄 使用 accelerator.prepare 将 model, optimizer, dataloaders 包装为适合多卡的对象
    model, optimizer, trainLoader, valLoader = accelerator.prepare(
        model, optimizer, trainLoader, valLoader
    )  # 🍄

    # -------------------
    # Resume 检查点
    # -------------------
    last_ckpt = os.path.join(modelSavePath, "last.ckpt")
    start_epoch, best_mAP, run_id = load_checkpoint(model, optimizer, scheduler, last_ckpt, accelerator)
    # 打印参数量
    accelerator.print(time.strftime("%m-%d %H:%M:%S") + " Model para number = %.2fM" %
        (sum(param.numel() for param in model.parameters()) / 1024 / 1024))
    
    # -------------------
    # Swanlab init
    # -------------------
    if accelerator.is_main_process:
        if run_id is not None:
            swanlab.init(project="icassp2026", id=run_id, resume="allow")
            accelerator.print(f"✅ Resume Swanlab run {run_id}")
        else:
            run = swanlab.init(project="icassp2026")
            run_id = run.id
            with open(run_id_file, "w") as f:
                f.write(run_id)
            accelerator.print(f"❌ New Swanlab run {run_id} 🚀")
    accelerator.wait_for_everyone()  # 🍄
    
    # -------------------
    # 训练 + 验证循环
    # -------------------
    for epoch in range(start_epoch, MAX_EPOCH+1):
        # ----------------------------------------------------------------
        # 训练一轮
        accelerator.print(f"---------开始训练 epoch-{epoch} ---------")
        loss, lr = train_network(model, trainLoader, optimizer, scheduler, epoch)
        
        # 保存 checkpoint
        if accelerator.is_main_process:
            # 保存当前epoch的ckpt
            cur_ckpt = os.path.join(modelSavePath, f"epoch-{epoch}.ckpt")
            torch.save({
                "epoch": epoch,
                "model_state_dict": model.state_dict(),
                "optimizer_state_dict": optimizer.state_dict(),
                "scheduler_state_dict": scheduler.state_dict(),
            }, cur_ckpt)
            
            # 更新 last.ckpt（只做文件拷贝，不再重复保存）
            last_ckpt = os.path.join(modelSavePath, "last.ckpt")
            shutil.copy(cur_ckpt, last_ckpt)
        
        accelerator.wait_for_everyone()  # 🍄
        
        # 清理显存
        torch.cuda.empty_cache() # add by xjh | always be kiiled

        # ----------------------------------------------------------------
        accelerator.print(f"---------开始验证 epoch-{epoch} ---------")
        # 🍄 多卡验证一轮
        with torch.no_grad():
            raw_model = accelerator.unwrap_model(model)
            raw_model.eval()
            # 每个进程都会跑 evaluate_network，内部 pad+gather 聚合
            mAP = evaluate_network(raw_model, valLoader, epoch)
            raw_model.train()
        accelerator.wait_for_everyone()

        # ✅ 只在主进程记录和保存
        if accelerator.is_main_process:
            accelerator.print(f">>> mAP = {mAP}")
            swanlab.log({"epoch_eval_mAP": mAP}, step=epoch)
        accelerator.wait_for_everyone()
        
    # --------关闭swanlab-----------    
    if accelerator.is_main_process:
        swanlab.finish()
        
if __name__ == '__main__':
    
    accelerator = Accelerator()  #🍄
    
    # 验证数据
    val_entity_data, val_ts_to_entity, val_speech_data, val_entity_list = load_entity_data(
        ori_file="../../data/AVADataPath/csv/val_orig.csv",
        video_root="../../data/AVADataPath/clips_videos/val"
    )
    
    # 训练数据
    train_entity_data, train_ts_to_entity, train_speech_data, train_entity_list = load_entity_data(
        ori_file="../../data/AVADataPath/csv/train_orig.csv",
        video_root="../../data/AVADataPath/clips_videos/train"
    )
    
    main(train_entity_data, train_ts_to_entity, val_entity_data, val_ts_to_entity, accelerator)
