import time, os, torch, argparse, warnings, glob, pandas, json

from utils.tools import *
from dataLoader_multiperson import val_loader
from loconet_withloss import Loconet, evaluate_network
from load_data import load_entity_data
from accelerate import Accelerator  #🍄

modelSavePath = './save/exps/4GPU_L2_3track/' # before cross 08/31 00:12 - 18.86M 参数量

class ValDataPrep:
    def __init__(self, entity_data, ts_to_entity):
        self.entity_data = entity_data
        self.ts_to_entity = ts_to_entity

    def val_dataloader(self):
        loader = val_loader(
            trialFileName="../../data/AVADataPath/csv/val_loader.csv",
            audioPath="../../data/AVADataPath/clips_audios/val",
            visualPath="../../data/AVADataPath/clips_videos/val",
            entity_data=self.entity_data,
            ts_to_entity=self.ts_to_entity,
            num_speakers=3,
        )
        
        #😡 移除手动 DistributedSampler，让 Accelerate 自动处理分布式数据加载
        valLoader = torch.utils.data.DataLoader(
            loader,
            batch_size=1,
            shuffle=False,
            num_workers=16,
            pin_memory=True
        )
        return valLoader


def load_ddp_checkpoint(path, device):
    """加载DDP训练的权重，并去掉'module.'前缀"""
    state_dict = torch.load(path, map_location=device)

    new_state_dict = {}
    for k, v in state_dict.items():
        # 去掉前缀 "model.module."
        if k.startswith("model.module."):
            new_k = k[len("model.module."):]
        # 有些情况是直接 "module."
        elif k.startswith("module."):
            new_k = k[len("module."):]
        else:
            new_k = k
        new_state_dict[new_k] = v
    return new_state_dict

def main(entity_data, ts_to_entity, accelerator):
    warnings.filterwarnings("ignore")

    device = accelerator.device  #🍄 保持和accelerate一致
    data = ValDataPrep(entity_data, ts_to_entity)

    model_dir = modelSavePath
    mo = glob.glob(os.path.join(model_dir, 'model_*.model'))
    mo.sort()
    modelfiles = mo
    accelerator.print("modelfiles:", modelfiles)
    
    if len(modelfiles) == 0:
        accelerator.print(f"在目录 {model_dir} 中未找到模型文件。")
        quit()

    accelerator.print(f"在目录 {model_dir} 中找到 {len(modelfiles)} 个模型文件。开始逐个评估...")
    for model_path in modelfiles:
        filename = os.path.basename(model_path)
        epoch_str = os.path.splitext(filename)[0].split('_')[-1]
        accelerator.print(epoch_str)
        epoch = int(epoch_str)

        # 初始化模型
        model = Loconet().to(device)
        
        # 加载参数
        state_dict = load_ddp_checkpoint(model_path, accelerator.device)  #🍄
        model.load_state_dict(state_dict)
        
        # 包装模型和数据加载器
        val_loader = data.val_dataloader()
        model, val_loader = accelerator.prepare(model, val_loader)  #🍄

        # 评估
        # mAP = evaluate_network(model, data.val_dataloader(), device, epoch)
        mAP = evaluate_network(accelerator.unwrap_model(model), val_loader, device, epoch)  #🍄
        accelerator.print(f"模型 {filename} (Epoch {epoch}) 的评估结果: mAP = {mAP}")

    accelerator.print("\n所有模型评估完成。")


if __name__ == '__main__':
    
    accelerator = Accelerator()  #🍄

    ori_file="../../data/AVADataPath/csv/val_orig.csv"
    video_root="../../data/AVADataPath/clips_videos/val"

    entity_data, ts_to_entity, speech_data, entity_list = load_entity_data(ori_file, video_root)
    accelerator.print(len(entity_list), "个有效实体")
    
    main(entity_data, ts_to_entity, accelerator)