"""
模型客户端框架 - 支持多种模型后端的无缝切换和资源动态适配
支持vLLM、llama.cpp、OpenAI等模型服务，集成资源监控和自动降级
"""

from typing import Dict, Any, Optional, List, Callable, Set
from abc import ABC, abstractmethod
from enum import Enum
import asyncio
import time
import httpx
from utils.logger import Logger
from utils.exceptions import ModelError
from utils.constants import MODEL_SWITCH_THRESHOLD, MODEL_RESOURCE_THRESHOLDS, RECOMMENDED_MODELS
from config.model_config import model_config_manager, ModelConfig
from app.core.resource_monitor import get_resource_monitor, get_model_resource_info, get_recommended_models_by_size
# 注意：移除model_manager导入以避免循环依赖

# 从共享types文件导入类型定义
from middleware.types import ModelType, CloudLLMType, CloudLLMClientProtocol

from middleware.model_adapter import get_model_adapter

# 定义模型后端枚举
class ModelBackend(Enum):
    VLLM = "vllm"
    OLLAMA = "ollama"
    LLAMA_CPP = "llama_cpp"
    FALLBACK = "fallback"
    CLOUD = "cloud"  # 云端LLM服务

logger = Logger.get_logger("model_client")


# 模型状态枚举
class ModelState(Enum):
    """模型状态枚举"""
    READY = "ready"
    LOADING = "loading"
    ERROR = "error"
    STANDBY = "standby"
    TOO_LARGE = "too_large"  # 模型过大，无法在当前资源下加载


class ModelType(Enum):
    """支持的模型类型"""
    VLLM = "vllm"
    LLAMA_CPP = "llama_cpp"
    OPENAI = "openai"
    CLAUDE = "claude"
    FALLBACK = "fallback"  # 降级方案
    OLLAMA = "ollama"  # Ollama支持


class ModelConfig:
    """模型配置管理 - 支持基于系统资源的动态配置"""
    
    # 默认配置
    DEFAULT_CONFIGS = {
        ModelType.VLLM: {
            "base_url": "http://localhost:8080",
            "timeout": 30.0,
            "max_retries": 3,
            "model_name": "qwen2-7b-instruct",
            "max_tokens": 512,
            "temperature": 0.7
        },
        ModelType.LLAMA_CPP: {
            "base_url": "http://localhost:8081",
            "timeout": 45.0,
            "max_retries": 2,
            "model_path": "./models/qwen2-7b-instruct-q4_0.gguf",
            "n_gpu_layers": 35,
            "ctx_size": 4096
        },
        ModelType.OLLAMA: {
            "base_url": "http://localhost:11434",
            "timeout": 60.0,
            "max_retries": 3,
            "model_name": "llama3",
            "max_tokens": 1024,
            "temperature": 0.7
        },
        ModelType.FALLBACK: {
            "enabled": True,
            "strategy": "simple_concat",  # 简单拼接策略
            "max_reference_texts": 3
        }
    }
    
    def __init__(self, model_type: ModelType = ModelType.FALLBACK, custom_config: Dict = None):
        self.model_type = model_type
        self.config = self.DEFAULT_CONFIGS.get(model_type, {}).copy()
        
        # 根据系统资源动态选择适合的模型
        if model_type == ModelType.OLLAMA:
            monitor = get_resource_monitor()
            model_size = monitor.get_recommended_model_size()
            
            # 根据系统资源状态选择合适的模型
            if model_size == "large" and RECOMMENDED_MODELS.get("large"):
                self.config["model_name"] = RECOMMENDED_MODELS["large"][0]
            elif model_size == "medium" and RECOMMENDED_MODELS.get("medium"):
                self.config["model_name"] = RECOMMENDED_MODELS["medium"][0]
            else:
                self.config["model_name"] = RECOMMENDED_MODELS.get("small", ["phi3:3.8b-instruct-q4_K_M"])[0]
                
            logger.info(f"根据系统资源状态，选择Ollama模型: {self.config['model_name']} (size: {model_size})")
        
        if custom_config:
            self.config.update(custom_config)
        
        logger.info(f"模型配置初始化: {model_type.value}, config_keys: {list(self.config.keys())}")
    
    def is_compatible_with_system(self) -> bool:
        """检查当前配置是否兼容系统资源"""
        if self.model_type == ModelType.OLLAMA and "model_name" in self.config:
            monitor = get_resource_monitor()
            return monitor.can_load_model(self.config["model_name"])
        return True


class BaseModelClient(ABC):
    """模型客户端抽象基类 - 集成资源监控和自动降级"""
    
    def __init__(self, model_type: ModelType, config: Optional[ModelConfig] = None):
        self.model_type = model_type
        self.config = config or model_config_manager.get_config()
        self._initialized = False
        self._health_check_interval = self.config.deployment_config.get('health_check_interval', 30)
        self._last_health_check = 0
        self.is_healthy = True
        self.last_error = None
        self._state: ModelState = ModelState.READY
        self._last_used_time: float = 0
        self._total_tokens_processed: int = 0
        self._request_count: int = 0
        
        # 资源使用监控
        self._monitor = get_resource_monitor()
        
    @property
    def state(self) -> ModelState:
        """获取模型状态"""
        return self._state
    
    @state.setter
    def state(self, value: ModelState):
        """设置模型状态"""
        if self._state != value:
            logger.info(f"模型状态变更: {self._state.value} -> {value.value}")
            self._state = value
    
    @abstractmethod
    async def generate(self, prompt: str, **kwargs) -> str:
        """生成文本 - 子类实现"""
        pass
    
    @abstractmethod
    async def health_check(self) -> bool:
        """健康检查 - 子类实现"""
        pass
    
    async def _initialize(self):
        """初始化客户端 - 子类可重写"""
        pass
    
    def build_qa_prompt(self, query: str, contexts: List[str]) -> str:
        """构建问答提示词"""
        if not contexts:
            return f"请回答以下问题：{query}"
        
        context_str = "\n".join([f"{i+1}. {ctx}" for i, ctx in enumerate(contexts[:3])])
        
        return f"""基于以下参考信息，请回答用户问题：

参考信息：
{context_str}

用户问题：{query}

请提供一个简洁准确的答案，如果参考信息不足以回答问题，请明确说明。"""
    
    async def safe_generate(self, prompt: str, **kwargs) -> Optional[str]:
        """安全生成 - 带错误处理和资源监控"""
        try:
            # 检查是否应该切换模型
            if await self.check_if_should_switch():
                logger.warning("检测到资源紧张，通过模型适配器切换模型")
                adapter = get_model_adapter()
                return await adapter.generate(prompt, **kwargs)
                
            return await self.generate(prompt, **kwargs)
        except Exception as e:
            self.is_healthy = False
            self.last_error = str(e)
            self.state = ModelState.ERROR
            logger.error(f"模型生成失败 [{self.model_type.value}]: {str(e)}")
            
            # 尝试使用模型适配器
            try:
                adapter = get_model_adapter()
                return await adapter.generate(prompt, **kwargs)
            except Exception:
                return None
    
    async def check_if_should_switch(self) -> bool:
        """检查是否应该切换模型"""
        return await self._monitor.should_switch_model()
    
    async def _update_usage_stats(self, tokens_processed: int):
        """更新使用统计"""
        self._last_used_time = time.time()
        self._total_tokens_processed += tokens_processed
        self._request_count += 1
    
    async def get_memory_usage(self) -> Optional[float]:
        """获取当前内存使用情况（MB）"""
        return None


class VllmClient(BaseModelClient):
    """vLLM 模型客户端 - 支持资源监控和自动切换"""
    
    def __init__(self, config: Optional[ModelConfig] = None):
        config = config or model_config_manager.get_config("vllm-7b")
        super().__init__(ModelType.VLLM, config)
        self.service_url = self.config.deployment_config.get('service_url', 'http://localhost:8000')
        self.client: Optional[httpx.AsyncClient] = None
        
        # 检查模型是否可以加载
        model_info = get_model_resource_info(self.config.model_name)
        if model_info:
            can_load = self._monitor.can_load_model(self.config.model_name)
            if not can_load:
                logger.warning(f"vLLM模型 {self.config.model_name} 太大，不适合当前系统资源")
                self.state = ModelState.TOO_LARGE
    
    async def initialize(self):
        """初始化 vLLM 客户端"""
        # 框架模式下不实际连接
        logger.info(f"vLLM client initialized (framework mode): {self.service_url}")
        logger.info(f"vLLM config: {self.config.backend_config}")
        
        # 初始化HTTP客户端
        try:
            self.client = httpx.AsyncClient(timeout=self.config.timeout)
        except Exception as e:
            logger.error(f"初始化HTTP客户端失败: {str(e)}")
            self.state = ModelState.ERROR
        
        self._initialized = True
    
    async def generate(self, prompt: str, **kwargs) -> str:
        """使用 vLLM 生成文本 - 集成资源监控"""
        if not self._initialized:
            await self.initialize()
        
        # 检查模型状态
        if self.state in [ModelState.ERROR, ModelState.TOO_LARGE]:
            logger.warning(f"vLLM模型处于异常状态: {self.state.value}，尝试使用模型适配器")
            adapter = get_model_adapter()
            return await adapter.generate(prompt, **kwargs)
        
        # 检查是否应该切换模型
        if await self.check_if_should_switch():
            logger.warning("检测到资源紧张，尝试切换到更适合的模型")
            adapter = get_model_adapter()
            return await adapter.generate(prompt, **kwargs)
        
        # 记录内存使用情况
        memory_usage = await self._monitor.get_memory_usage()
        available_memory = await self._monitor.get_available_memory_mb()
        logger.info(f"vLLM generation (framework mode): prompt_length={len(prompt)}, "
                   f"内存使用: {memory_usage}%, 可用: {available_memory}MB")
        
        # 模拟不同参数的效果
        temperature = kwargs.get('temperature', self.config.temperature)
        max_tokens = kwargs.get('max_tokens', self.config.max_tokens)
        
        # 模拟响应延迟
        await asyncio.sleep(0.5)
        
        # 更新使用统计
        await self._update_usage_stats(len(prompt) // 4)  # 粗略估计token数
        self.state = ModelState.READY
        
        return f"[vLLM模拟结果] 基于提示词: {prompt[:100]}... (temp={temperature}, max_tokens={max_tokens})"
    
    async def health_check(self) -> bool:
        """vLLM 健康检查 - 集成资源状态检查"""
        try:
            # 检查系统资源状态
            memory_usage = await self._monitor.get_memory_usage()
            if memory_usage > MODEL_SWITCH_THRESHOLD:
                logger.warning(f"vLLM健康检查: 内存使用率过高 ({memory_usage}%)，标记为不健康")
                self.is_healthy = False
                return False
            
            self.is_healthy = True
            self.state = ModelState.READY
            return True
        except Exception as e:
            logger.error(f"vLLM健康检查失败: {str(e)}")
            self.is_healthy = False
            self.state = ModelState.ERROR
            return False
    
    async def close(self):
        """关闭客户端连接"""
        if self.client:
            await self.client.aclose()
            self.client = None
        logger.info("vLLM客户端已关闭")


class LlamaCppClient(BaseModelClient):
    """llama.cpp模型客户端 - 支持资源监控和自动切换"""
    
    def __init__(self, config: Optional[ModelConfig] = None):
        config = config or model_config_manager.get_config("llama-cpp-7b")
        super().__init__(ModelType.LLAMA_CPP, config)
        self.service_url = self.config.deployment_config.get('service_url', 'http://localhost:8080')
        self.client: Optional[httpx.AsyncClient] = None
        logger.info(f"llama.cpp客户端初始化: {self.service_url}")
        
        # 检查模型是否可以加载
        model_info = get_model_resource_info(self.config.model_name)
        if model_info:
            can_load = self._monitor.can_load_model(self.config.model_name)
            if not can_load:
                logger.warning(f"llama.cpp模型 {self.config.model_name} 太大，不适合当前系统资源")
                self.state = ModelState.TOO_LARGE
    
    async def initialize(self):
        """初始化 llama.cpp 客户端"""
        logger.info(f"llama.cpp client initialized (framework mode): {self.service_url}")
        logger.info(f"llama.cpp config: {self.config.backend_config}")
        
        # 初始化HTTP客户端
        try:
            self.client = httpx.AsyncClient(timeout=self.config.timeout)
        except Exception as e:
            logger.error(f"初始化HTTP客户端失败: {str(e)}")
            self.state = ModelState.ERROR
        
        self._initialized = True
    
    async def generate(self, prompt: str, **kwargs) -> str:
        """使用 llama.cpp 生成文本 - 集成资源监控"""
        if not self._initialized:
            await self.initialize()
            
        # 检查模型状态
        if self.state in [ModelState.ERROR, ModelState.TOO_LARGE]:
            logger.warning(f"llama.cpp模型处于异常状态: {self.state.value}，尝试使用模型适配器")
            adapter = get_model_adapter()
            return await adapter.generate(prompt, **kwargs)
        
        # 检查是否应该切换模型
        if await self.check_if_should_switch():
            logger.warning("检测到资源紧张，尝试切换到更适合的模型")
            adapter = get_model_adapter()
            return await adapter.generate(prompt, **kwargs)
        
        # 记录内存使用情况
        memory_usage = await self._monitor.get_memory_usage()
        logger.info(f"llama.cpp generation (framework mode): prompt_length={len(prompt)}, "
                   f"内存使用: {memory_usage}%")
        
        # 模拟不同参数的效果
        temperature = kwargs.get('temperature', self.config.temperature)
        max_tokens = kwargs.get('max_tokens', self.config.max_tokens)
        
        # 模拟生成延迟
        await asyncio.sleep(0.2)
        
        # 更新使用统计
        await self._update_usage_stats(len(prompt) // 4)  # 粗略估计token数
        self.state = ModelState.READY
        
        return f"[llama.cpp模拟结果] 基于提示词: {prompt[:100]}... (temp={temperature}, max_tokens={max_tokens})"
    
    async def health_check(self) -> bool:
        """llama.cpp 健康检查 - 集成资源状态检查"""
        try:
            # 检查系统资源状态
            memory_usage = await self._monitor.get_memory_usage()
            if memory_usage > MODEL_SWITCH_THRESHOLD:
                logger.warning(f"llama.cpp健康检查: 内存使用率过高 ({memory_usage}%)，标记为不健康")
                self.is_healthy = False
                return False
            
            self.is_healthy = True
            self.state = ModelState.READY
            return True
        except Exception as e:
            logger.error(f"llama.cpp健康检查失败: {str(e)}")
            self.is_healthy = False
            self.state = ModelState.ERROR
            return False
    
    async def close(self):
        """关闭客户端连接"""
        if self.client:
            await self.client.aclose()
            self.client = None
        logger.info("llama.cpp客户端已关闭")


class FallbackClient(BaseModelClient):
    """降级客户端 - 支持云端LLM服务作为最终fallback"""
    
    def __init__(self, config: Optional[ModelConfig] = None):
        config = config or model_config_manager.get_config("fallback")
        super().__init__(ModelType.FALLBACK, config)
        self.strategy = config.backend_config.get("strategy", "simple_concat")
        self.max_texts = config.backend_config.get("max_reference_texts", 3)
        logger.info(f"降级客户端初始化: strategy={self.strategy}")
        self.state = ModelState.READY  # 降级客户端始终可用
        
        # 初始化云端客户端作为最终fallback
        self._cloud_clients: Dict[Any, Any] = {}
    
    async def generate(self, prompt: str, contexts: List[str] = None, **kwargs) -> str:
        """降级生成策略 - 尝试云端LLM服务作为最终fallback"""
        logger.info(f"降级生成策略: {self.strategy}, contexts={len(contexts) if contexts else 0}")
        
        # 尝试使用云端服务作为最后的fallback
        try:
            logger.info("尝试使用云端LLM服务作为最后的fallback")
            cloud_config = kwargs.get("cloud_config", {})
            cloud_client = await self._get_available_cloud_client(cloud_config)
            if cloud_client and await cloud_client.health_check():
                # 修正方法名为generate
                return await cloud_client.generate(prompt, **kwargs)
            else:
                logger.warning("云端服务不可用，返回本地降级响应")
        except Exception as cloud_error:
            logger.warning(f"云端服务调用失败: {str(cloud_error)}")
        
        # 使用本地降级策略
        if self.strategy == "simple_concat":
            return self._simple_concat_generate(prompt, contexts)
        else:
            return self._template_generate(prompt, contexts)
    
    async def _get_available_cloud_client(self, cloud_config: Dict) -> Optional[Any]:
        """获取可用的云端客户端"""
        # 动态导入避免循环导入
        from middleware.cloud_llm_client import CloudLLMType, CloudLLMFactory
        
        # 按优先级尝试云端服务
        for cloud_type in [CloudLLMType.DASHSCOPE, CloudLLMType.OPENAI]:
            try:
                if cloud_type not in self._cloud_clients:
                    # 使用CloudLLMFactory创建客户端
                    self._cloud_clients[cloud_type] = await CloudLLMFactory.create_client(cloud_type.value)
                
                client = self._cloud_clients[cloud_type]
                if client and await client.health_check():
                    logger.info(f"找到可用的云端客户端: {cloud_type.value}")
                    return client
            except Exception as e:
                logger.warning(f"云端客户端 {cloud_type.value} 不可用: {str(e)}")
        
        return None
    
    def _simple_concat_generate(self, query: str, contexts: List[str]) -> str:
        """简单拼接策略"""
        if not contexts:
            return f"根据现有信息，关于'{query}'的内容有限，无法提供详细回答。如需立即处理，可配置云端LLM服务密钥。"
        
        # 限制参考文本数量
        valid_texts = contexts[:self.max_texts]
        context_str = "；".join(valid_texts)
        
        return f"根据相关信息：{context_str}。总结回答：{query}"
    
    def _template_generate(self, query: str, contexts: List[str]) -> str:
        """模板化生成策略"""
        if not contexts:
            return f"抱歉，没有找到关于'{query}'的相关信息。如需立即处理，可配置云端LLM服务密钥。"
        
        # 构建结构化回答
        answer_parts = []
        answer_parts.append(f"关于'{query}'，参考信息如下：")
        
        for i, ctx in enumerate(contexts[:self.max_texts], 1):
            answer_parts.append(f"{i}. {ctx}")
        
        answer_parts.append(f"\n综合以上信息，可以得出关于'{query}'的相关结论。")
        
        return "\n".join(answer_parts)
    
    async def health_check(self) -> bool:
        """降级客户端始终健康"""
        return True
    
    async def close(self):
        """关闭客户端连接"""
        # 关闭所有云端客户端
        for cloud_client in self._cloud_clients.values():
            await cloud_client.close()
        self._cloud_clients.clear()
        logger.info("降级客户端已关闭")


class OllamaClient(BaseModelClient):
    """Ollama 模型客户端 - 支持资源监控和动态模型切换"""
    
    def __init__(self, config: Optional[ModelConfig] = None):
        config = config or model_config_manager.get_config("ollama-llama3")
        super().__init__(ModelType.OLLAMA, config)
        self.service_url = self.config.deployment_config.get('service_url', 'http://localhost:11434')
        self.client: Optional[httpx.AsyncClient] = None
        logger.info(f"Ollama客户端初始化: {self.service_url}")
        self._model_manager = get_model_manager()
        
        # 检查模型是否可以加载
        model_name = self.config.model_name
        model_info = get_model_resource_info(model_name)
        if model_info:
            can_load = self._monitor.can_load_model(model_name)
            if not can_load:
                logger.warning(f"Ollama模型 {model_name} 太大，不适合当前系统资源")
                self.state = ModelState.TOO_LARGE
    
    async def initialize(self):
        """初始化 Ollama 客户端"""
        logger.info(f"Ollama client initialized (framework mode): {self.service_url}")
        logger.info(f"Ollama config: {self.config.backend_config}")
        
        # 初始化HTTP客户端
        try:
            self.client = httpx.AsyncClient(timeout=self.config.timeout)
        except Exception as e:
            logger.error(f"初始化HTTP客户端失败: {str(e)}")
            self.state = ModelState.ERROR
        
        self._initialized = True
    
    async def generate(self, prompt: str, **kwargs) -> str:
        """使用 Ollama 生成文本 - 集成资源监控和动态模型切换"""
        if not self._initialized:
            await self.initialize()
            
        # 检查模型状态
        if self.state in [ModelState.ERROR, ModelState.TOO_LARGE]:
            logger.warning(f"Ollama模型处于异常状态: {self.state.value}，尝试使用模型适配器")
            adapter = get_model_adapter()
            return await adapter.generate(prompt, **kwargs)
        
        # 检查是否应该切换模型
        if await self.check_if_should_switch():
            logger.warning("检测到资源紧张，尝试切换到更适合的模型")
            adapter = get_model_adapter()
            return await adapter.generate(prompt, **kwargs)
        
        # 获取系统资源状态
        memory_usage = await self._monitor.get_memory_usage()
        available_memory = await self._monitor.get_available_memory_mb()
        
        logger.info(f"Ollama generation (framework mode): prompt_length={len(prompt)}, "
                   f"内存使用: {memory_usage}%, 可用: {available_memory}MB")
        
        # 模拟不同参数的效果
        temperature = kwargs.get('temperature', self.config.temperature)
        max_tokens = kwargs.get('max_tokens', self.config.max_tokens)
        
        # 构造请求参数
        request_params = {
            "model": self.config.model_name,
            "prompt": prompt,
            "stream": False,
            "options": {
                "temperature": temperature,
                "max_tokens": max_tokens,
                "top_p": kwargs.get("top_p", 0.9),
            }
        }
        
        # 记录请求开始时间，用于性能监控
        start_time = time.time()
        
        try:
            # 模拟响应延迟，根据模型大小调整
            model_info = get_model_resource_info(self.config.model_name)
            delay = 0.3 if model_info and model_info.get("type") == "small" else \
                   0.8 if model_info and model_info.get("type") == "medium" else 1.5
            await asyncio.sleep(delay)
            
            # 更新使用统计
            await self._update_usage_stats(len(prompt) // 4)  # 粗略估计token数
            self.state = ModelState.READY
            
            # 记录请求耗时
            end_time = time.time()
            logger.debug(f"Ollama请求完成，耗时: {(end_time - start_time):.2f}秒")
            
            # 记录模型内存使用
            if model_info:
                await self._monitor.record_model_memory(self.config.model_name, model_info.get("memory_mb", 0))
            
            return f"[Ollama模拟结果] {self.config.model_name} 处理了您的请求: {prompt[:100]}... (temp={temperature}, max_tokens={max_tokens})"
            
        except Exception as e:
            logger.error(f"Ollama生成失败: {str(e)}")
            self.state = ModelState.ERROR
            # 尝试使用模型适配器
            adapter = get_model_adapter()
            return await adapter.generate(prompt, **kwargs)
    
    async def health_check(self) -> bool:
        """Ollama 健康检查 - 集成资源状态检查"""
        try:
            # 检查系统资源状态
            memory_usage = await self._monitor.get_memory_usage()
            if memory_usage > MODEL_SWITCH_THRESHOLD:
                logger.warning(f"Ollama健康检查: 内存使用率过高 ({memory_usage}%)，标记为不健康")
                self.is_healthy = False
                return False
            
            # 框架模式下模拟检查
            logger.debug("Ollama健康检查 - 框架模式")
            self.is_healthy = True
            self.state = ModelState.READY
            return True
        except Exception as e:
            logger.error(f"Ollama健康检查失败: {str(e)}")
            self.is_healthy = False
            self.state = ModelState.ERROR
            return False
    
    async def get_memory_usage(self) -> Optional[float]:
        """获取Ollama模型内存使用情况"""
        try:
            model_info = get_model_resource_info(self.config.model_name)
            if model_info:
                return model_info.get("memory_mb", 0)
            return None
        except Exception:
            return None
    
    async def close(self):
        """关闭客户端连接"""
        if self.client:
            await self.client.aclose()
            self.client = None
        logger.info("Ollama客户端已关闭")


class ModelClientFactory:
    """模型客户端工厂 - 支持基于资源的动态客户端选择"""
    
    _clients: Dict[str, BaseModelClient] = {}  # 使用更精确的缓存键
    _current_client: Optional[BaseModelClient] = None
    _cloud_clients: Dict[Any, Any] = {}  # 云端客户端缓存 - 使用Any避免类型注解中的循环导入
    
    @classmethod
    def create_client(cls, model_type: ModelType, config: Optional[ModelConfig] = None, **kwargs) -> BaseModelClient:
        """创建模型客户端 - 考虑系统资源"""
        # 生成更精确的缓存键
        cache_key = f"{model_type.value}_{str(config)}_{kwargs.get('model_name', '')}"
        
        if cache_key in cls._clients:
            logger.debug(f"返回缓存的客户端: {model_type.value}")
            return cls._clients[cache_key]
        
        # 获取模型适配器进行优化
        adapter = get_model_adapter()
        
        # 检查是否应该使用云端服务
        try:
            # 注意：should_use_cloud_service现在是同步方法，直接调用
            should_use_cloud = adapter.should_use_cloud_service()
            
            if should_use_cloud:
                logger.info("系统资源不足，推荐使用云端LLM服务")
        except Exception as e:
            logger.error(f"检查云端服务可用性时出错: {str(e)}")
            should_use_cloud = False
            # 可以返回一个包装了云端服务的客户端
        
        # 创建本地客户端
        if model_type == ModelType.VLLM:
            client = VllmClient(config=config, **kwargs)
        elif model_type == ModelType.LLAMA_CPP:
            client = LlamaCppClient(config=config, **kwargs)
        elif model_type == ModelType.OLLAMA:
            # 为Ollama选择适合当前资源的模型
            model_name = kwargs.get('model_name')
            if not model_name and config:
                model_name = config.model_name
                
            if model_name:
                monitor = get_resource_monitor()
                if not monitor.can_load_model(model_name):
                    # 选择更小的模型
                    smaller_model = monitor.get_recommended_models_by_size("small")
                    if smaller_model:
                        logger.warning(f"模型 {model_name} 太大，切换到更小的模型: {smaller_model[0]}")
                        if config:
                            config.model_name = smaller_model[0]
                        kwargs['model_name'] = smaller_model[0]
            
            client = OllamaClient(config=config, **kwargs)
        elif model_type == ModelType.FALLBACK:
            client = FallbackClient(config=config, **kwargs)
        else:
            raise ValueError(f"Unsupported model type: {model_type}")
        
        cls._clients[cache_key] = client
        logger.info(f"创建模型客户端: {model_type.value}")
        return client
    
    @classmethod
    def get_client(cls, model_type: ModelType) -> Optional[BaseModelClient]:
        """获取模型客户端"""
        # 查找匹配的客户端
        for key, client in cls._clients.items():
            if client.model_type == model_type:
                return client
        return None
    
    @classmethod
    async def get_optimal_client(cls, model_type: Optional[ModelType] = None) -> BaseModelClient:
        """获取最佳客户端，支持指定类型或由模型管理器推荐"""
        # 如果指定了类型，使用指定类型
        if model_type:
            return cls.get_client(model_type) or await cls.create_client(model_type)
            
        # 使用模型管理器推荐最佳模型类型
        best_model_type = await cls._model_manager.get_optimal_model_type()
        if best_model_type and isinstance(best_model_type, ModelType):
            logger.info(f"模型管理器推荐的最佳模型类型: {best_model_type.value}")
            return cls.get_client(best_model_type) or await cls.create_client(best_model_type)
        
        # 回退到默认逻辑
        return await cls.get_available_client()
    
    @classmethod
    def set_current_client(cls, client: BaseModelClient):
        """设置当前客户端"""
        cls._current_client = client
        logger.info(f"Set current model client: {client.model_type.value}")
    
    @classmethod
    def get_current_client(cls) -> Optional[BaseModelClient]:
        """获取当前客户端"""
        return cls._current_client
    
    @classmethod
    def create_from_config(cls, config_name: Optional[str] = None) -> BaseModelClient:
        """根据配置创建客户端"""
        config = model_config_manager.get_config(config_name)
        
        # 根据后端类型映射到模型类型
        backend_to_type = {
            "vllm": ModelType.VLLM,
            "llama_cpp": ModelType.LLAMA_CPP,
            "ollama": ModelType.OLLAMA,
            "fallback": ModelType.FALLBACK
        }
        
        model_type = backend_to_type.get(config.backend.value)
        if not model_type:
            raise ValueError(f"Unsupported backend: {config.backend.value}")
        
        # 使用模型适配器优化配置
        adapter = get_model_adapter()
        optimized_config = adapter.get_optimal_config(config)
        if optimized_config:
            config = optimized_config
        
        return cls.create_client(model_type, config=config)
    
    @classmethod
    async def get_available_client(cls) -> BaseModelClient:
        """获取可用的模型客户端（基于资源状态自动选择和降级）"""
        # 始终优先使用在线AI API (Fallback)，不依赖于资源状态
        # 优先级设置：Fallback（在线AI API）始终排在第一位
        logger.info("设置优先使用在线AI API (Fallback) 作为首选模型客户端")
        priorities = [ModelType.FALLBACK]  # 始终将在线AI API放在第一位
        
        # 添加其他本地模型作为后备选项
        try:
            # 获取资源监控器
            monitor = get_resource_monitor()
            memory_usage = await monitor.get_memory_usage()
            
            # 根据资源状态添加其他本地模型
            if memory_usage <= MODEL_SWITCH_THRESHOLD:
                # 内存充足时添加其他本地模型
                priorities.extend([ModelType.OLLAMA, ModelType.LLAMA_CPP, ModelType.VLLM])
            else:
                # 内存紧张时只添加轻量级模型
                logger.warning(f"内存使用率过高 ({memory_usage}%)，仅添加轻量级本地模型作为后备")
                priorities.append(ModelType.OLLAMA)
        except Exception as e:
            logger.warning(f"资源监控异常: {str(e)}，仅使用在线AI API")
            # 出现异常时，仍然保留在线AI API作为主要选项
        
        # 尝试按优先级获取可用客户端
        for model_type in priorities:
            try:
                # 为Ollama客户端选择适合当前资源的模型
                kwargs = {}
                if model_type == ModelType.OLLAMA:
                    recommended_size = monitor.get_recommended_model_size()
                    models = monitor.get_recommended_models_by_size(recommended_size)
                    if models:
                        kwargs['model_name'] = models[0]
                        logger.info(f"根据系统资源，选择Ollama模型: {models[0]} (size: {recommended_size})")
                
                client = cls.create_client(model_type, **kwargs)
                if await client.health_check():
                    # 再次检查资源状态，确保客户端不会导致资源不足
                    if model_type != ModelType.FALLBACK:
                        model_info = get_model_resource_info(kwargs.get('model_name', client.config.model_name))
                        if model_info and not monitor.can_load_model(model_info.get('name', '')):
                            logger.warning(f"客户端 {model_type.value} 的模型太大，跳过")
                            continue
                    
                    logger.info(f"选择可用模型客户端: {model_type.value}")
                    cls.set_current_client(client)
                    return client
                else:
                    logger.warning(f"模型客户端不可用: {model_type.value}")
            except Exception as e:
                logger.warning(f"创建{model_type.value}客户端失败: {str(e)}")
        
        # 默认返回降级客户端
        logger.warning("所有模型客户端不可用，使用降级客户端")
        fallback_client = cls.create_client(ModelType.FALLBACK)
        cls.set_current_client(fallback_client)
        return fallback_client
    
    @classmethod
    async def get_cloud_client(cls, cloud_type: CloudLLMType, config: Dict = None) -> CloudLLMClientProtocol:
        """获取云端客户端"""
        # 动态导入避免循环导入
        from middleware.cloud_llm_client import CloudLLMClient
        
        if cloud_type not in cls._cloud_clients:
            cls._cloud_clients[cloud_type] = CloudLLMClient.create_client(cloud_type, config or {})
        return cls._cloud_clients[cloud_type]
    
    @classmethod
    async def clean_unused_clients(cls):
        """清理未使用的客户端"""
        current_time = time.time()
        max_idle_time = 3600  # 1小时
        
        # 清理本地客户端
        for key, client in list(cls._clients.items()):
            if hasattr(client, '_last_used_time'):
                idle_time = current_time - client._last_used_time
                if idle_time > max_idle_time and client != cls._current_client:
                    logger.info(f"清理闲置客户端: {key}, 闲置时间: {idle_time}秒")
                    if hasattr(client, 'close'):
                        await client.close()
                    del cls._clients[key]
        
        # 清理云端客户端
        for cloud_type, client in list(cls._cloud_clients.items()):
            if hasattr(client, '_last_used_time'):
                idle_time = current_time - client._last_used_time
                if idle_time > max_idle_time:
                    logger.info(f"清理闲置云端客户端: {cloud_type.value}, 闲置时间: {idle_time}秒")
                    await client.close()
                    del cls._cloud_clients[cloud_type]


# 全局模型客户端 - 直接尝试使用CloudLLMClient
from middleware.cloud_llm_client import CloudLLMFactory, CloudLLMType
import asyncio

def init_cloud_model_client():
    """初始化云端模型客户端，适应已运行的事件循环"""
    try:
        # 尝试使用现有的事件循环或创建新的事件循环
        try:
            loop = asyncio.get_event_loop()
            if loop.is_running():
                # 在运行中的事件循环中，我们需要创建一个新的循环
                # 但不能在同一线程中运行两个循环，所以我们使用Fallback客户端
                logger.warning("事件循环已在运行，无法初始化云端模型客户端")
                return ModelClientFactory.create_client(ModelType.FALLBACK)
            else:
                # 使用现有的非运行循环
                client = loop.run_until_complete(CloudLLMFactory.create_client(CloudLLMType.DASHSCOPE.value))
        except RuntimeError:
            # 没有事件循环，创建一个新的
            client = asyncio.run(CloudLLMFactory.create_client(CloudLLMType.DASHSCOPE.value))
            
        if client:
            logger.info("成功初始化阿里云通义千问(DASHSCOPE)客户端")
            return client
        else:
            logger.warning("无法初始化阿里云通义千问客户端，尝试其他云端服务")
            try:
                loop = asyncio.get_event_loop()
                if not loop.is_running():
                    client = loop.run_until_complete(CloudLLMFactory.create_client())
                else:
                    client = None
            except RuntimeError:
                client = asyncio.run(CloudLLMFactory.create_client())
                
            if client:
                logger.info("成功初始化其他云端LLM客户端")
                return client
    except Exception as e:
        logger.error(f"初始化云端LLM客户端失败: {str(e)}")
    
    # 如果所有云端服务都不可用，仍然使用FallbackClient作为最后的备选
    logger.warning("所有云端LLM服务都不可用，使用FallbackClient")
    return ModelClientFactory.create_client(ModelType.FALLBACK)

# 初始化全局模型客户端
model_client = init_cloud_model_client()
ModelClientFactory.set_current_client(model_client)

# 初始化云端LLM
async def initialize_model_clients():
    """初始化所有模型客户端"""
    from middleware.cloud_llm_client import init_cloud_llm
    
    # 首先初始化云端LLM服务
    await init_cloud_llm()
    
    # 然后尝试获取云端客户端
    cloud_client = await ModelClientFactory.get_cloud_client()
    if cloud_client:
        global model_client
        model_client = cloud_client
        logger.info("已成功切换到云端模型客户端")
    else:
        logger.warning("无法初始化云端模型客户端，继续使用Fallback客户端")



# 异步获取模型客户端的辅助函数
async def get_model_client(backend: Optional[ModelBackend] = None, **kwargs) -> BaseModelClient:
    """获取模型客户端，支持指定后端或自动选择可用后端，集成资源监控"""
    # 首先检查资源状态，决定是否使用模型适配器
    adapter = get_model_adapter()
    if await adapter.should_switch_model():
        logger.info("系统资源状态需要模型切换，使用模型适配器选择最佳客户端")
        return await adapter.get_optimal_client()
    
    if backend:
        # 根据指定的后端类型创建客户端
        backend_to_type = {
            ModelBackend.VLLM: ModelType.VLLM,
            ModelBackend.LLAMA_CPP: ModelType.LLAMA_CPP,
            ModelBackend.OLLAMA: ModelType.OLLAMA,
            ModelBackend.FALLBACK: ModelType.FALLBACK,
            ModelBackend.CLOUD: None  # 云端后端特殊处理
        }
        
        if backend == ModelBackend.CLOUD:
            # 处理云端后端
            cloud_type = kwargs.get('cloud_type', CloudLLMType.OPENAI)
            cloud_client = await ModelClientFactory.get_cloud_client(cloud_type, kwargs.get('cloud_config', {}))
            if await cloud_client.health_check():
                logger.info(f"使用云端LLM服务: {cloud_type.value}")
                return cloud_client
            else:
                logger.warning(f"指定的云端后端{cloud_type.value}不可用，尝试自动选择")
        else:
            model_type = backend_to_type.get(backend)
            if model_type:
                # 为Ollama添加模型选择逻辑
                client_kwargs = {}
                if model_type == ModelType.OLLAMA:
                    monitor = get_resource_monitor()
                    recommended_size = monitor.get_recommended_model_size()
                    models = monitor.get_recommended_models_by_size(recommended_size)
                    if models:
                        client_kwargs['model_name'] = models[0]
                        logger.info(f"根据系统资源，选择{backend.value}模型: {models[0]} (size: {recommended_size})")
                
                client = ModelClientFactory.create_client(model_type, **client_kwargs)
                # 检查健康状态和资源兼容性
                if await client.health_check() and client.config.is_compatible_with_system():
                    ModelClientFactory.set_current_client(client)
                    return client
                logger.warning(f"指定的后端{backend.value}不可用或资源不兼容，尝试自动选择")
    
    # 自动选择可用后端
    return await ModelClientFactory.get_available_client()


# 模型资源监控和清理的定期任务
async def start_model_management_tasks():
    """启动模型管理任务：定期清理未使用的客户端，监控资源使用"""
    async def cleanup_task():
        while True:
            try:
                await ModelClientFactory.clean_unused_clients()
            except Exception as e:
                logger.error(f"清理客户端任务失败: {str(e)}")
            await asyncio.sleep(300)  # 每5分钟执行一次
    
    # 启动清理任务
    asyncio.create_task(cleanup_task())
    logger.info("模型管理任务已启动")


async def test_model_clients():
    """测试模型客户端 - 框架验证"""
    logger.info("开始模型客户端框架测试...")
    
    # 测试不同模型类型
    test_configs = [
        (ModelType.FALLBACK, {}),
        (ModelType.VLLM, {"base_url": "http://localhost:8080"}),
        (ModelType.LLAMA_CPP, {"base_url": "http://localhost:8081"})
    ]
    
    for model_type, config in test_configs:
        try:
            client = ModelClientFactory.create_client(model_type, config)
            
            # 健康检查
            is_healthy = await client.health_check()
            logger.info(f"{model_type.value} 健康状态: {is_healthy}")
            
            # 测试生成
            test_query = "什么是分布式系统？"
            test_contexts = ["分布式系统是由多个独立计算机组成的系统", "这些计算机通过网络协同工作"]
            
            prompt = client.build_qa_prompt(test_query, test_contexts)
            result = await client.safe_generate(prompt, contexts=test_contexts)
            
            logger.info(f"{model_type.value} 测试结果: {result[:100]}...")
            
        except Exception as e:
            logger.error(f"{model_type.value} 测试失败: {str(e)}")
    
    logger.info("模型客户端框架测试完成")


if __name__ == "__main__":
    # 运行框架测试
    asyncio.run(test_model_clients())