import time, os, torch, argparse, warnings, glob, pandas, json

from utils.tools import *
import torch.nn as nn
from loconet_withloss import Loconet, train_network, evaluate_network
from dataLoader_multiperson import train_loader, val_loader
import swanlab
import csv
from load_data import load_entity_data

# modelSavePath = './save/exps/exp_train_SpkEmb' # after cross 08/31 0:51 提前结束-效果训练集上还可以，相比baseline提了一点-先试试下面的
modelSavePath = './save/exps/SpkEmbbefo_gateADD' # before cross 08/31 19:07 - 19.55M 参数量
MAX_EPOCH = 30


class TrainCollator(object):

    def __call__(self, data):
        audiofeatures = [item[0] for item in data]
        visualfeatures = [item[1] for item in data]
        labels = [item[2] for item in data]
        masks = [item[3] for item in data]
        valid_speaker_indices = [item[4] for item in data]  # xjh
        
        cut_limit = 200
        # pad audio
        lengths = torch.tensor([t.shape[1] for t in audiofeatures])
        max_len = max(lengths)
        padded_audio = torch.stack([
            torch.cat([i, i.new_zeros((i.shape[0], max_len - i.shape[1], i.shape[2]))], 1)
            for i in audiofeatures
        ], 0)

        if max_len > cut_limit * 4:
            padded_audio = padded_audio[:, :, :cut_limit * 4, ...]

        # pad video
        lengths = torch.tensor([t.shape[1] for t in visualfeatures])
        max_len = max(lengths)
        padded_video = torch.stack([
            torch.cat(
                [i, i.new_zeros((i.shape[0], max_len - i.shape[1], i.shape[2], i.shape[3]))], 1)
            for i in visualfeatures
        ], 0)
        padded_labels = torch.stack(
            [torch.cat([i, i.new_zeros((i.shape[0], max_len - i.shape[1]))], 1) for i in labels], 0)
        padded_masks = torch.stack(
            [torch.cat([i, i.new_zeros((i.shape[0], max_len - i.shape[1]))], 1) for i in masks], 0)

        if max_len > cut_limit:
            padded_video = padded_video[:, :, :cut_limit, ...]
            padded_labels = padded_labels[:, :, :cut_limit, ...]
            padded_masks = padded_masks[:, :, :cut_limit, ...]

        valid_speaker_indices = [torch.tensor(v, dtype=torch.long) for v in valid_speaker_indices]
        
        # return padded_audio, padded_video, padded_labels, padded_masks
        return padded_audio, padded_video, padded_labels, padded_masks, valid_speaker_indices


class TrainDataPrep():
    def __init__(self, entity_data, ts_to_entity):
        self.entity_data = entity_data
        self.ts_to_entity = ts_to_entity

    def train_dataloader(self):
        loader = train_loader(
            trialFileName="../../data/AVADataPath/csv/train_loader.csv",
            audioPath="../../data/AVADataPath/clips_audios/train",
            visualPath="../../data/AVADataPath/clips_videos/train",
            entity_data=self.entity_data,
            ts_to_entity=self.ts_to_entity,
            num_speakers=3,
        )

        collator = TrainCollator()
        trainLoader = torch.utils.data.DataLoader(
            loader,
            batch_size=1,
            pin_memory=False,
            num_workers=6,
            collate_fn=collator,
            shuffle=True 
        )
        return trainLoader

    
class ValDataPrep:
    def __init__(self, entity_data, ts_to_entity):
        self.entity_data = entity_data
        self.ts_to_entity = ts_to_entity

    def val_dataloader(self):
        loader = val_loader(
            trialFileName="../../data/AVADataPath/csv/val_loader.csv",
            audioPath="../../data/AVADataPath/clips_audios/val",
            visualPath="../../data/AVADataPath/clips_videos/val",
            entity_data=self.entity_data,
            ts_to_entity=self.ts_to_entity,
            num_speakers=3,
        )
        valLoader = torch.utils.data.DataLoader(
            loader,
            batch_size=1,
            shuffle=False,
            num_workers=16
        )
        return valLoader


def main(train_entity_data, train_ts_to_entity, val_entity_data, val_ts_to_entity):
    make_deterministic(seed=20210617)
    warnings.filterwarnings("ignore")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using device:", device)

    # -------------------
    # 训练数据
    # -------------------
    train_data = TrainDataPrep(train_entity_data, train_ts_to_entity)
    trainLoader = train_data.train_dataloader()
    
    # -------------------
    # 测试数据
    # -------------------
    val_data = ValDataPrep(val_entity_data, val_ts_to_entity)
    valLoader = val_data.val_dataloader()

    # -------------------
    # 模型 & 优化器
    # -------------------
    os.makedirs(modelSavePath, exist_ok=True)
    model = Loconet().to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=5e-5)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)

    # -------------------
    # Resume 检查点
    # -------------------
    start_epoch = 1
    best_mAP = 0.0
    run_id_file = os.path.join(modelSavePath, "swanlab_runid.txt")
    run_id = None

    # 是否存在 last.ckpt
    last_ckpt = os.path.join(modelSavePath, "last.ckpt")
    if os.path.exists(last_ckpt):
        print(f"Loading checkpoint {last_ckpt}")
        checkpoint = torch.load(last_ckpt, map_location=device)

        # 模型
        model.load_state_dict(checkpoint["model_state_dict"], strict=False)
        # 优化器 & scheduler
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        scheduler.load_state_dict(checkpoint["scheduler_state_dict"])

        start_epoch = checkpoint["epoch"] + 1
        best_mAP = checkpoint.get("best_mAP", 0.0)

        # Swanlab run id
        if os.path.exists(run_id_file):
            with open(run_id_file, "r") as f:
                run_id = f.read().strip()

    
    # -------------------
    # Swanlab init
    # -------------------
    if run_id is not None:
        swanlab.init(project="icassp2026", id=run_id, resume="allow")
        print(f"✅ Resume Swanlab run {run_id}")
    else:
        run = swanlab.init(project="icassp2026")
        run_id = run.id
        with open(run_id_file, "w") as f:
            f.write(run_id)
        print(f"🆕 New Swanlab run {run_id}")
    
    
    # -------------------
    # 训练 + 验证循环
    # -------------------
    for epoch in range(start_epoch, MAX_EPOCH+1):
        # 训练一轮
        loss, lr = train_network(model, trainLoader, optimizer, scheduler, device, epoch)
        
        # 保存 last checkpoint
        torch.save({
            "epoch": epoch,
            "model_state_dict": model.state_dict(),
            "optimizer_state_dict": optimizer.state_dict(),
            "scheduler_state_dict": scheduler.state_dict(),
        }, last_ckpt)
        
        # 保存当前epoch的ckpt
        cur_ckpt = os.path.join(modelSavePath, f"epoch-{epoch}.ckpt")
        torch.save({
            "epoch": epoch,
            "model_state_dict": model.state_dict(),
            "optimizer_state_dict": optimizer.state_dict(),
            "scheduler_state_dict": scheduler.state_dict(),
        }, cur_ckpt)
        
        # 清理显存
        torch.cuda.empty_cache() # add by xjh | always be kiiled
        
#         # 验证一轮
#         with torch.no_grad():
#             model.eval()
#             mAP = evaluate_network(model, valLoader, device, epoch)
#             print(f">>> mAP = {mAP}")
#         model.train()

#         torch.cuda.empty_cache() # 验证后释放显存
        
#         # 记录日志
#         swanlab.log({"epoch_eval_mAP": mAP}, step=epoch-1)

#         # 保存 best checkpoint
#         if mAP > best_mAP:
#             best_mAP = mAP
#             torch.save({
#                 "epoch": epoch,
#                 "model_state_dict": model.state_dict(),
#                 "optimizer_state_dict": optimizer.state_dict(),
#                 "scheduler_state_dict": scheduler.state_dict(),
#                 "best_mAP": best_mAP,
#             }, os.path.join(modelSavePath, "best.ckpt"))
#             print(f"✅ Best model updated at Epoch {epoch}, mAP={best_mAP:.4f}")

    swanlab.finish()

if __name__ == '__main__':
    
    # 验证数据
    val_entity_data, val_ts_to_entity, val_speech_data, val_entity_list = load_entity_data(
        ori_file="../../data/AVADataPath/csv/val_orig.csv",
        video_root="../../data/AVADataPath/clips_videos/val"
    )
    
    # 训练数据
    train_entity_data, train_ts_to_entity, train_speech_data, train_entity_list = load_entity_data(
        ori_file="../../data/AVADataPath/csv/train_orig.csv",
        video_root="../../data/AVADataPath/clips_videos/train"
    )
    
    main(train_entity_data, train_ts_to_entity, val_entity_data, val_ts_to_entity)
