import os
import re
from tqdm import tqdm
from ..utils import get_logger, get_class
from ..utils.train_tools import load_model_parameter
import torch
from os import path


class BasicEvaluator(object):
    def __init__(self, opt):
        self.opt = opt
        self.model_path = opt["eval"]["model_path"]
        self.main_logger = get_logger(opt["global"]["logger_name"])  # 主日志
        self.device_ids, self.device = self.get_device()
        self.model = get_class(opt=opt["model"])
        self.model.to(self.device)
        self.model.eval()
        self.img_key = opt["model"]["input_name"]

        self.dataset = get_class(opt=opt["dataset"])
        self.dataloader = get_class(opt=opt["dataloader"], dataset=self.dataset)

    def eval(self):
        # 获取模型的路径并对路径排序
        def sort_rule(str_name):
            epoch = re.findall(r'epoch(\d+)\.pth', str_name)
            if len(epoch) == 0:
                return 0
            else:
                return int(epoch[0])

        model_pth_list = []
        if path.isfile(self.model_path):
            model_pth_list.append(self.model_path)
        else:
            model_pth_list = [path.join(self.model_path, model_name) for model_name in os.listdir(self.model_path) if model_name.endswith(".pth")]
        model_pth_list = sorted(model_pth_list, key=sort_rule)
        self.main_logger.info(f"在选择路径中发现 {len(model_pth_list)} 个模型")

        for now_path in model_pth_list:
            self.model, info_msg = load_model_parameter(self.model, now_path)
            self.main_logger.info(info_msg)
            tqdm_bar = tqdm(enumerate(self.dataloader), desc=str(path.basename(now_path)), total=len(self.dataloader))
            for ii, data in tqdm_bar:
                img = data[self.img_key].to(self.device)
                out = self.model(img)
                metric = get_class(self.opt["metric"], out, data, msg_show=False)
                print(metric())




    def get_device(self):
        device_ids = []
        msg_info = "used device information:\r\n"
        device_msg = self.opt["eval"].get("device", None)

        if isinstance(device_msg, list) or isinstance(device_msg, tuple) and device_msg != []:  # 指定使用GPU
            device_ids = list(device_msg)
            device = f"cuda:{device_ids[0]}"
            for device_id in device_msg:
                cuda_msg = torch.cuda.get_device_properties(device_id)
                msg_info += f"\tuse  cuda:{device_id}-【{cuda_msg.name}】 with {cuda_msg.total_memory / (1 << 20):.0f}MiB\r\n"
            if len(device_ids) == 1:
                device_ids = None
        elif isinstance(device_msg, int):
            device_ids = None
            device = f"cuda:{device_msg}"
        elif device_msg is not None and device_msg.lower() == "cpu":  # 指定使用cpu
            device_ids = None
            device = "cpu"
            msg_info += "CPU only"
        else:  # 未指定 则自适应调整
            msg_info = "未指定驱动设备，自适应选择下列设备进行训练：\r\n"
            if torch.cuda.is_available() and torch.cuda.device_count() > 0:
                for device_id in range(torch.cuda.device_count()):
                    device_ids.append(device_id)
                    cuda_msg = torch.cuda.get_device_properties(device_id)
                    msg_info += f"\tuse  cuda:{device_id}-【{cuda_msg.name}】 with {cuda_msg.total_memory / (1 << 20):.0f}MiB\r\n"
                device = f"cuda:{device_ids[0]}"
                if len(device_ids) == 1:
                    device_ids = None
            else:
                device_ids = None
                device = "cpu"
                msg_info += "CPU only"
        self.main_logger.warning(msg_info.strip())
        return device_ids, device


