"""
AI模型管理器
用于统一管理和访问各种类型的AI模型

支持的模型类型包括：
1. CHAT - 聊天对话模型
2. CODE_GENERATION - 代码生成模型
3. IMAGE_RECOGNITION - 图像识别模型
4. SPEECH_RECOGNITION - 语音识别模型
5. VIDEO_ANALYSIS - 视频分析模型
6. TIME_SERIES_FORECASTING - 时间序列预测模型
7. RECOMMENDATION_SYSTEM - 推荐系统模型
8. ANOMALY_DETECTION - 异常检测模型
"""

import time
import logging
from typing import Dict, Any, Optional, Union
from enum import Enum
from langchain_core.language_models import BaseLanguageModel
from langchain_openai import ChatOpenAI, OpenAI
from langchain_community.llms import Ollama
from src.research_core.model_monitoring import model_monitoring


# 配置日志
logger = logging.getLogger(__name__)


class ModelType(Enum):
    """模型类型枚举"""
    CHAT = "chat"
    CODE_GENERATION = "code_generation"
    IMAGE_RECOGNITION = "image_recognition"
    SPEECH_RECOGNITION = "speech_recognition"
    VIDEO_ANALYSIS = "video_analysis"
    TIME_SERIES_FORECASTING = "time_series_forecasting"
    RECOMMENDATION_SYSTEM = "recommendation_system"
    ANOMALY_DETECTION = "anomaly_detection"


class ModelManager:
    """AI模型管理器"""
    
    _instance = None
    _initialized = False
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(ModelManager, cls).__new__(cls)
        return cls._instance
    
    def __init__(self):
        if not self._initialized:
            self.models: Dict[ModelType, Any] = {}
            self.model_names: Dict[ModelType, str] = {}
            self._initialize_models()
            self.__class__._initialized = True
    
    def _initialize_models(self):
        """初始化所有模型，包含容错处理"""
        try:
            from src.research_core.model import get_llm
            chat_model = get_llm()
            self.models[ModelType.CHAT] = chat_model
            self.model_names[ModelType.CHAT] = chat_model.__class__.__name__
        except Exception as e:
            logger.error(f"初始化聊天模型失败: {e}")
            # 使用默认模型作为回退
            self.models[ModelType.CHAT] = None
            self.model_names[ModelType.CHAT] = "Unavailable"
        
        try:
            from src.research_core.code_generation_model import get_code_generation_model
            code_model = get_code_generation_model()
            self.models[ModelType.CODE_GENERATION] = code_model
            self.model_names[ModelType.CODE_GENERATION] = code_model.__class__.__name__
        except Exception as e:
            logger.error(f"初始化代码生成模型失败: {e}")
            self.models[ModelType.CODE_GENERATION] = self.models.get(ModelType.CHAT)
            self.model_names[ModelType.CODE_GENERATION] = self.model_names.get(ModelType.CHAT, "Unavailable")
        
        try:
            from src.research_core.image_model import get_image_recognition_model
            image_model = get_image_recognition_model()
            self.models[ModelType.IMAGE_RECOGNITION] = image_model
            self.model_names[ModelType.IMAGE_RECOGNITION] = image_model.__class__.__name__
        except Exception as e:
            logger.error(f"初始化图像识别模型失败: {e}")
            self.models[ModelType.IMAGE_RECOGNITION] = self.models.get(ModelType.CHAT)
            self.model_names[ModelType.IMAGE_RECOGNITION] = self.model_names.get(ModelType.CHAT, "Unavailable")
        
        try:
            from src.research_core.speech_model import get_speech_model
            speech_model = get_speech_model()
            self.models[ModelType.SPEECH_RECOGNITION] = speech_model
            self.model_names[ModelType.SPEECH_RECOGNITION] = speech_model.__class__.__name__
        except Exception as e:
            logger.error(f"初始化语音识别模型失败: {e}")
            self.models[ModelType.SPEECH_RECOGNITION] = self.models.get(ModelType.CHAT)
            self.model_names[ModelType.SPEECH_RECOGNITION] = self.model_names.get(ModelType.CHAT, "Unavailable")
    
    def get_model(self, model_type: ModelType, context: Optional[Dict[str, Any]] = None) -> Any:
        """
        获取指定类型的模型，支持自适应选择
        
        Args:
            model_type: 模型类型
            context: 上下文信息，用于自适应选择
            
        Returns:
            Any: 模型实例
        """
        # 在这里延迟导入以避免循环导入
        if context:
            try:
                from src.research_core.adaptive_model_manager import get_adaptive_model_manager
                adaptive_manager = get_adaptive_model_manager()
                
                # 使用自适应管理器选择最优模型
                task_type_map = {
                    ModelType.CHAT: "chat",
                    ModelType.CODE_GENERATION: "code_generation",
                    ModelType.IMAGE_RECOGNITION: "image_recognition",
                    ModelType.SPEECH_RECOGNITION: "speech_recognition",
                    ModelType.VIDEO_ANALYSIS: "video_analysis",
                    ModelType.TIME_SERIES_FORECASTING: "time_series_forecasting",
                    ModelType.RECOMMENDATION_SYSTEM: "recommendation_system",
                    ModelType.ANOMALY_DETECTION: "anomaly_detection"
                }
                
                task_type = task_type_map.get(model_type, "chat")
                adaptive_model = adaptive_manager.select_optimal_model(task_type, context)
                if adaptive_model:
                    return adaptive_model
            except ImportError:
                logger.warning("无法导入自适应模型管理器，使用默认模型")
        
        # 回退到默认模型
        return self.models.get(model_type)


# 全局实例
model_manager = ModelManager()

def get_model_manager() -> ModelManager:
    """获取模型管理器实例"""
    return model_manager
