#!/usr/bin/env python3
"""
ModelScope模型加载器

支持从本地路径加载ModelScope模型
提供统一的模型加载接口
"""

import logging
import os
from pathlib import Path
from typing import Any, Dict, Optional

# 尝试导入ModelScope
try:
    from modelscope import snapshot_download
    from modelscope.pipelines import pipeline
    from modelscope.utils.config import Config
    MODELSCOPE_AVAILABLE = True
except ImportError:
    MODELSCOPE_AVAILABLE = False
    logging.warning("ModelScope未安装，无法使用ModelScope模型加载器")

from ...config.model_path_config import ModelPathConfig, get_model_path_manager

logger = logging.getLogger(__name__)

class ModelScopeLoader:
    """
    ModelScope模型加载器

    支持从本地路径加载ModelScope模型
    提供统一的模型加载接口
    """

    def __init__(self):
        self.model_path_manager = get_model_path_manager()
        self.loaded_models: Dict[str, Any] = {}

    def load_model_from_local(
        self,
        model_type: str,
        task: str = "text-classification",
        device: str = "cpu",
        **kwargs
    ) -> Optional[Any]:
        """
        从本地路径加载模型

        Args:
            model_type: 模型类型 ("structbert", "palm2", "palm2_base")
            task: 任务类型
            device: 设备类型
            **kwargs: 其他参数

        Returns:
            Optional[Any]: 加载的模型，失败返回None
        """
        if not MODELSCOPE_AVAILABLE:
            logger.error("ModelScope未安装，无法加载模型")
            return None

        # 检查是否已经加载
        model_key = f"{model_type}_{task}_{device}"
        if model_key in self.loaded_models:
            logger.info(f"模型已加载: {model_key}")
            return self.loaded_models[model_key]

        try:
            # 获取模型路径
            model_path = self.model_path_manager.get_model_path(model_type)
            if not model_path:
                logger.error(f"模型路径未找到: {model_type}")
                return None

            # 获取模型配置
            config = self.model_path_manager.get_model_config(model_type)
            if not config:
                logger.error(f"模型配置未找到: {model_type}")
                return None

            logger.info(f"从本地路径加载模型: {model_path}")

            # 使用本地路径加载模型
            model = pipeline(
                task=task,
                model=model_path,  # 使用本地路径而不是模型名称
                device=device,
                **kwargs
            )

            # 缓存模型
            self.loaded_models[model_key] = model

            logger.info(f"模型加载成功: {model_type}")
            return model

        except Exception as e:
            logger.error(f"模型加载失败: {model_type}, 错误: {e}")
            return None

    def load_model_with_fallback(
        self,
        model_type: str,
        task: str = "text-classification",
        device: str = "cpu",
        **kwargs
    ) -> Optional[Any]:
        """
        加载模型，如果本地没有则从远程下载

        Args:
            model_type: 模型类型
            task: 任务类型
            device: 设备类型
            **kwargs: 其他参数

        Returns:
            Optional[Any]: 加载的模型，失败返回None
        """
        if not MODELSCOPE_AVAILABLE:
            logger.error("ModelScope未安装，无法加载模型")
            return None

        # 首先尝试从本地加载
        model = self.load_model_from_local(model_type, task, device, **kwargs)
        if model:
            return model

        # 如果本地没有，尝试从远程下载
        logger.info(f"本地模型未找到，尝试从远程下载: {model_type}")
        return self._download_and_load_model(model_type, task, device, **kwargs)

    def _download_and_load_model(
        self,
        model_type: str,
        task: str = "text-classification",
        device: str = "cpu",
        **kwargs
    ) -> Optional[Any]:
        """
        下载并加载模型

        Args:
            model_type: 模型类型
            task: 任务类型
            device: 设备类型
            **kwargs: 其他参数

        Returns:
            Optional[Any]: 加载的模型，失败返回None
        """
        try:
            config = self.model_path_manager.get_model_config(model_type)
            if not config:
                logger.error(f"模型配置未找到: {model_type}")
                return None

            # 设置下载目录 - 使用本地缓存根目录，让ModelScope自动创建组织结构
            download_dir = os.path.dirname(config.local_cache_dir)

            logger.info(f"开始下载模型: {config.model_name}")
            logger.info(f"下载目录: {download_dir}")

            # 下载模型
            model_path = snapshot_download(
                model_id=config.model_name,
                revision=config.model_revision,
                cache_dir=download_dir
            )

            logger.info(f"模型下载完成: {model_path}")

            # 加载下载的模型
            model = pipeline(
                task=task,
                model=model_path,
                device=device,
                **kwargs
            )

            # 缓存模型
            model_key = f"{model_type}_{task}_{device}"
            self.loaded_models[model_key] = model

            logger.info(f"模型下载并加载成功: {model_type}")
            return model

        except Exception as e:
            logger.error(f"模型下载失败: {model_type}, 错误: {e}")
            return None

    def get_loaded_model(self, model_type: str, task: str = "text-classification", device: str = "cpu") -> Optional[Any]:
        """
        获取已加载的模型

        Args:
            model_type: 模型类型
            task: 任务类型
            device: 设备类型

        Returns:
            Optional[Any]: 已加载的模型
        """
        model_key = f"{model_type}_{task}_{device}"
        return self.loaded_models.get(model_key)

    def unload_model(self, model_type: str, task: str = "text-classification", device: str = "cpu") -> bool:
        """
        卸载模型

        Args:
            model_type: 模型类型
            task: 任务类型
            device: 设备类型

        Returns:
            bool: 卸载是否成功
        """
        model_key = f"{model_type}_{task}_{device}"
        if model_key in self.loaded_models:
            del self.loaded_models[model_key]
            logger.info(f"模型已卸载: {model_key}")
            return True
        return False

    def unload_all_models(self):
        """卸载所有模型"""
        self.loaded_models.clear()
        logger.info("所有模型已卸载")

    def get_loaded_models_info(self) -> Dict[str, Any]:
        """
        获取已加载模型的信息

        Returns:
            Dict[str, Any]: 模型信息
        """
        info = {
            "loaded_models": list(self.loaded_models.keys()),
            "model_count": len(self.loaded_models),
            "cache_info": self.model_path_manager.get_cache_info()
        }
        return info

# 全局实例
_modelscope_loader = None

def get_modelscope_loader() -> ModelScopeLoader:
    """获取全局ModelScope加载器实例"""
    global _modelscope_loader
    if _modelscope_loader is None:
        _modelscope_loader = ModelScopeLoader()
    return _modelscope_loader
