import gc
import inspect
import logging
from pathlib import Path
from typing import Union, List

import numpy as np
import torch
from PIL import Image
from ais_bench.infer.interface import InferSession
from ultralytics.engine.results import Results
from ultralytics.cfg import get_cfg, get_save_dir
from ultralytics.utils import (
    ARGV,
    ASSETS,
    LOGGER, emojis,
)

import predictor

logger = logging.getLogger(__name__)


class AscendYOLOModel:
    """
    昇腾yolo模型调用
    """

    def __init__(self, model_path: str = None, task: str = 'detect', om_model_path: str = None, device_id: int = 0):
        """
        初始化创建model
        :param model_path: 原模型权重
        :param task: 任务类型
        :param om_model_path: om昇腾模型路径
        :param device_id: 设备id
        """
        self.predictor = None
        self.model = model_path
        self.task = task
        # 获取om模型
        self.om = InferSession(device_id, om_model_path)

    def __call__(
        self,
        source: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor] = None,
        stream: bool = False,
        **kwargs,
    ) -> list:
        return self.predict(source, stream, **kwargs)

    def predict(
            self,
            source: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor] = None,
            stream: bool = False,
            predictor=None,
            **kwargs,
    ) -> List[Results]:
        """
        推理预测
        """
        if source is None:
            source = ASSETS
            LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.")

        is_cli = (ARGV[0].endswith("yolo") or ARGV[0].endswith("ultralytics")) and any(
            x in ARGV for x in ("predict", "track", "mode=predict", "mode=track")
        )

        custom = {"conf": 0.25, "batch": 1, "save": is_cli, "mode": "predict"}  # method defaults
        args = {**custom, **kwargs}  # highest priority args on the right
        prompts = args.pop("prompts", None)  # for SAM-type models

        if not self.predictor:
            self.predictor = predictor or self._smart_load("predictor")(overrides=args, _callbacks=None)
            self.predictor.setup_model(model=self.model, verbose=is_cli, om=self.om)
        else:  # only update args if predictor is already setup
            self.predictor.args = get_cfg(self.predictor.args, args)
            if "project" in args or "name" in args:
                self.predictor.save_dir = get_save_dir(self.predictor.args)
        if prompts and hasattr(self.predictor, "set_prompts"):  # for SAM-type models
            self.predictor.set_prompts(prompts)
        return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)

    def _smart_load(self, key: str):
        """Load model/trainer/validator/predictor."""
        try:
            return self.task_map[self.task][key]
        except Exception as e:
            name = self.__class__.__name__
            mode = inspect.stack()[1][3]  # get the function name.
            raise NotImplementedError(
                emojis(f"WARNING ⚠️ '{name}' model does not support '{mode}' mode for '{self.task}' task yet.")
            ) from e

    @property
    def task_map(self):
        """Map head to model, trainer, validator, and predictor classes."""
        return {
            "classify": {
                "predictor": predictor.classification_predictor.ClassificationPredictor
            },
            "detect": {
                "predictor": predictor.detection_predictor.DetectionPredictor
            },
            "segment": {
                "predictor": predictor.segmentation_predictor.SegmentationPredictor
            }
        }
