import time, os, torch, argparse, warnings, glob, pandas, json

from utils.tools import *
from dataLoader_multiperson import val_loader
# from loconet import loconet
from loconet_withloss import loconet


class DataPrep():

    def __init__(self,world_size, rank,entity_data,ts_to_entity):
        self.world_size = world_size
        self.rank = rank
        self.entity_data = entity_data
        self.ts_to_entity = ts_to_entity

    def val_dataloader(self):
        loader = val_loader(trialFileName="../../data/AVADataPath/csv/val_loader.csv", \
                            audioPath="../../data/AVADataPath/clips_audios/val", \
                            visualPath="../../data/AVADataPath/clips_videos/val", \
                            entity_data=self.entity_data, \
                            ts_to_entity=self.ts_to_entity, \
                            num_speakers=3,
                            )
        valLoader = torch.utils.data.DataLoader(loader,
                                                batch_size=1,
                                                shuffle=False,
                                                num_workers=16)
        return valLoader




def main(gpu,world_size,entity_data,ts_to_entity):
    rank = gpu
    warnings.filterwarnings("ignore")
    data = DataPrep(world_size, rank,  entity_data,ts_to_entity)

    s = loconet()
    model_dir = "./save/exps/exp1"
    mo = glob.glob(os.path.join(model_dir, 'model_*.model'))
    mo.sort()
    modelfiles = mo
    print("modelfiles:", modelfiles)
    if len(modelfiles) == 0:
        print(f"在目录 {model_dir} 中未找到模型文件。")
        quit()
    print(f"在目录 {model_dir} 中找到 {len(modelfiles)} 个模型文件。开始逐个评估...")
    for model_path in modelfiles:
        filename = os.path.basename(model_path)
        epoch_str = os.path.splitext(filename)[0].split('_')[-1]
        print(epoch_str)
        epoch = int(epoch_str)
        s.loadParameters(model_path)
        mAP = s.evaluate_network(epoch=epoch, loader=data.val_dataloader())
        print(f"模型 {filename} (Epoch {epoch}) 的评估结果: mAP = {mAP}")
    print("\n所有模型评估完成。")


if __name__ == '__main__':
    entity_data = {}
    speech_data = {}
    ts_to_entity = {}


    def csv_to_list(csv_path):
        as_list = None
        with open(csv_path, 'r') as f:
            reader = csv.reader(f)
            as_list = list(reader)
        return as_list


    def postprocess_speech_label(speech_label):
        speech_label = int(speech_label)
        if speech_label == 2:  # 2 = SPEAKING_NOT_AUDIBLE
            speech_label = 0
        return speech_label


    def cache_entity_data(csv_file_path):
        entity_set = set()

        csv_data = csv_to_list(csv_file_path)
        # 去除csv文件标题
        csv_data.pop(0)
        # 一行一行处理
        for csv_row in csv_data:
            # 视频名
            video_id = csv_row[0]
            # 实体名
            entity_id = csv_row[-3]
            # 时间戳
            timestamp = csv_row[1]

            # 将2（说话但是无语音转化为）0（未说话）
            speech_label = postprocess_speech_label(csv_row[-2])
            # 元数据：("person_1", "900.0", 1)

            # 存储实体数据
            if video_id not in entity_data.keys():
                # 加入视频id，创建一个空字典
                entity_data[video_id] = {}
            if entity_id not in entity_data[video_id].keys():
                # 加入实体id，创建空字典
                entity_data[video_id][entity_id] = {}
                # 添加到实体列表中
                #   ("video_1", "person_1"),
                #   ("video_1", "person_2"),
                #   ("video_2", "person_3"),
                entity_set.add((video_id, entity_id))
                # 每个视频的每个实体下有元数据：("900.0", 1)
            entity_data[video_id][entity_id][timestamp] = speech_label

            # 存储语音元数据
            if video_id not in speech_data.keys():
                # 加入视频id，创建一个空字典
                speech_data[video_id] = {}
            if timestamp not in speech_data[video_id].keys():
                # 每个视频的每个时间戳有语音标签：(1),代表该时间戳是否有人说话
                speech_data[video_id][timestamp] = speech_label

            # 若有一个人说话，该时间戳的说话标签即为1
            new_speech_label = max(
                speech_data[video_id][timestamp], speech_label)
            speech_data[video_id][timestamp] = new_speech_label

        return entity_set


    def entity_list_postprocessing(entity_set, video_root):
        print('初始化实体列表，总实体数：', len(entity_set))

        # 过滤磁盘上不存在的实体
        # 获取视频根目录下所有文件/文件夹的名称，并转换为集合

        # 如果实体ID在磁盘上不存在，则从实体集合中移除该实体
        for video_id, entity_id in entity_set.copy():
            exist_entity = os.path.join(video_root, video_id, entity_id)
            if not os.path.exists(exist_entity):
                entity_set.remove((video_id, entity_id))
        print('过滤未下载实体后，总实体数：', len(entity_set))

        # 检查实体在磁盘上的图片数量是否与元数据中的时间戳数量一致
        for video_id, entity_id in entity_set.copy():
            dir = os.path.join(video_root, video_id, entity_id)
            if len(os.listdir(dir)) != len(entity_data[video_id][entity_id]):
                entity_set.remove((video_id, entity_id))
        print('过滤图片不完整实体后，总实体数：', len(entity_set))

        # 将过滤后的实体集合转换为列表，并按字典序排序
        entity_list = sorted(list(entity_set))

        # 构建时间戳到实体的映射
        for video_id, entity_id in entity_set:
            if video_id not in ts_to_entity.keys():
                ts_to_entity[video_id] = {}

            # 获取实体元数据（多帧）
            ent_min_data = entity_data[video_id][entity_id].keys()
            for timestamp in ent_min_data:
                if timestamp not in ts_to_entity[video_id].keys():
                    ts_to_entity[video_id][timestamp] = []
                # 将实体添加至该视频的时间戳
                ts_to_entity[video_id][timestamp].append(entity_id)
        return entity_list


    def clean_entity_data(entity_list, entity_data):
        valid_entities = set(entity_list)
        for video_id in list(entity_data.keys()):
            for entity_id in list(entity_data[video_id].keys()):
                if (video_id, entity_id) not in valid_entities:
                    del entity_data[video_id][entity_id]

            if not entity_data[video_id]:
                del entity_data[video_id]
        return entity_data


    dataPath = "../../data/AVADataPath/"
    ori_file = "../../data/AVADataPath/csv/val_orig.csv"
    trianFileName = "../../data/AVADataPath/csv/train_loader.csv"
    valFileName = "../../data/AVADataPath/csv/val_loader.csv"
    video_root = "../../data/AVADataPath/clips_videos/val"
    audio_root = "../../data/AVADataPath/clips_audios/val"

    cache_dir = os.path.join(dataPath, "json", "val")
    entity_data_cache_file = os.path.join(cache_dir, "entity_data.json")
    ts_to_entity_cache_file = os.path.join(cache_dir, "ts_to_entity.json")
    speech_data_cache_file = os.path.join(cache_dir, "speech_data.json")
    entity_list_cache_file = os.path.join(cache_dir, "entity_list.json")

    if (os.path.exists(entity_data_cache_file) and
            os.path.exists(ts_to_entity_cache_file) and
            os.path.exists(speech_data_cache_file) and
            os.path.exists(entity_list_cache_file)):
        print("Loading cached data...")
        with open(entity_data_cache_file, 'r') as f:
            entity_data = json.load(f)
        with open(ts_to_entity_cache_file, 'r') as f:
            ts_to_entity = json.load(f)
        with open(speech_data_cache_file, 'r') as f:
            speech_data = json.load(f)
        with open(entity_list_cache_file, 'r') as f:
            # entity_list 存储为列表的列表，加载后转换回元组的列表
            entity_list = [tuple(item) for item in json.load(f)]
        print("Cached data loaded successfully.")
    else:
        print("Cached data not found. Generating data...")
        # 确保在生成数据前初始化这些变量
        entity_data = {}
        speech_data = {}
        ts_to_entity = {}

        entity_set = cache_entity_data(ori_file)
        entity_list = entity_list_postprocessing(entity_set, video_root)
        entity_data = clean_entity_data(entity_list, entity_data)

        # 确保缓存目录存在
        os.makedirs(cache_dir, exist_ok=True)

        # 保存生成的数据
        print("Saving generated data to cache...")
        with open(entity_data_cache_file, 'w') as f:
            json.dump(entity_data, f)
        with open(ts_to_entity_cache_file, 'w') as f:
            json.dump(ts_to_entity, f)
        with open(speech_data_cache_file, 'w') as f:
            json.dump(speech_data, f)
        with open(entity_list_cache_file, 'w') as f:
            # 将元组转换为列表以便JSON序列化
            json.dump([list(item) for item in entity_list], f)
        print("Data saved to cache.")

    gpu = p = 0
    world_size = 6

    main(gpu, world_size, entity_data, ts_to_entity,)
