import os
import copy
import torch
import random
import numpy as np
from collections import defaultdict
from datetime import timedelta
import time
import logging

logger = logging.getLogger(__name__)


def get_time_dif(start_time):
    """
    获取已经使用的时间
    :param start_time:
    :return:
    """
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))


def set_seed(seed):
    """
    设置随机种子
    :param seed:
    :return:
    """
    random.seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.cuda.manual_seed_all(seed)


def load_model_and_parallel(model, gpu_ids, ckpt_path=None, strict=True):
    """
    加载模型 & 放置到 GPU 中（单卡 / 多卡）
    """
    gpu_ids = gpu_ids.split(',')

    # set to device to the first cuda
    device = torch.device("cpu" if gpu_ids[0] == '-1' else "cuda:" + gpu_ids[0])

    if ckpt_path is not None:
        # 确保 ckpt_path 指向具体的模型权重文件 model.pt
        ckpt_file = os.path.join(ckpt_path, 'model.pt')
        try:
            model.load_state_dict(torch.load(ckpt_file, map_location=torch.device('cpu')), strict=strict)
            print(f"Successfully loaded model from {ckpt_file}")
        except FileNotFoundError:
            print(f"Error: The file {ckpt_file} was not found.")
        except Exception as e:
            print(f"An error occurred while loading the model: {e}")

    model.to(device)

    if len(gpu_ids) > 1:
        logger.info(f'Use multi gpus in: {gpu_ids}')
        gpu_ids = [int(x) for x in gpu_ids]
        model = torch.nn.DataParallel(model, device_ids=gpu_ids)
    else:
        logger.info(f'Use single gpu in: {gpu_ids}')

    return model, device


def get_model_path_list(model_dir):
    model_path_list = sorted(
        [os.path.join(model_dir, i) for i in os.listdir(model_dir) if "checkpoint" in i],
        # 优化路径解析逻辑，使用os.path模块替代字符串分割
        key=lambda x: (
            os.path.basename(os.path.dirname(x)),  # 提取checkpoint上级目录名
            int(os.path.basename(x).split('-')[-1])  # 提取checkpoint编号
        )
    )
    return model_path_list


def swa(model, model_dir, swa_start=1):
    """
    swa 滑动平均模型，一般在训练平稳阶段再使用 SWA
    """
    model_path_list = get_model_path_list(model_dir)
    model_path_list = [x for x in model_path_list if int(x.split('-')[-1]) >= swa_start]
    print('model_path_list:{}'.format(model_path_list))

    for _ckpt in model_path_list:
        if not os.path.exists(_ckpt):
            print(f"Checkpoint {_ckpt} does not exist!")
        else:
            print(f"Checkpoint {_ckpt} exists.")

    assert 1 <= swa_start < len(model_path_list) - 1, \
        f'Using swa, swa start should smaller than {len(model_path_list) - 1} and bigger than 0'

    swa_model = copy.deepcopy(model)
    swa_n = 0.

    with torch.no_grad():
        for _ckpt in model_path_list:
            if os.path.isdir(_ckpt):  # 判断是否为文件夹
                _ckpt = os.path.join(_ckpt, 'model.pt')  # 拼接文件夹和 model.pt 的路径
            try:
                model.load_state_dict(torch.load(_ckpt, map_location=torch.device('cpu')))
                tmp_para_dict = dict(model.named_parameters())

                alpha = 1. / (swa_n + 1.)

                for name, para in swa_model.named_parameters():
                    para.copy_(tmp_para_dict[name].data.clone() * alpha + para.data.clone() * (1. - alpha))

                swa_n += 1
            except Exception as e:
                print(f"Error loading checkpoint {_ckpt}: {e}. Skipping...")

    # Save swa model
    swa_model_dir = os.path.join(model_dir, 'swa_model')
    if not os.path.exists(swa_model_dir):
        os.makedirs(swa_model_dir)

    swa_model_path = os.path.join(swa_model_dir, 'model.pt')

    torch.save(swa_model.state_dict(), swa_model_path)

    return swa_model


def vote(entities_list, threshold=0.9):
    """
    实体级别的投票方式  (entity_type, entity_start, entity_end, entity_text)
    :param entities_list: 所有模型预测出的一个文件的实体
    :param threshold:大于70%模型预测出来的实体才能被选中
    :return:
    """
    threshold_nums = int(len(entities_list)*threshold)
    entities_dict = defaultdict(int)
    entities = defaultdict(list)

    for _entities in entities_list:
        for _type in _entities:
            for _ent in _entities[_type]:
                entities_dict[(_type, _ent[0], _ent[1])] += 1

    for key in entities_dict:
        if entities_dict[key] >= threshold_nums:
            entities[key[0]].append((key[1], key[2]))

    return entities

def ensemble_vote(entities_list, threshold=0.9):
    """
    针对 ensemble model 进行的 vote
    实体级别的投票方式  (entity_type, entity_start, entity_end, entity_text)
    """
    threshold_nums = int(len(entities_list)*threshold)
    entities_dict = defaultdict(int)

    entities = defaultdict(list)

    for _entities in entities_list:
        for _id in _entities:
            for _ent in _entities[_id]:
                entities_dict[(_id, ) + _ent] += 1

    for key in entities_dict:
        if entities_dict[key] >= threshold_nums:
            entities[key[0]].append(key[1:])

    return entities
