"""
AI Model Service
AI模型服务 - 统一AI模型接口，支持豆包、阿里云、OpenAI
"""

import asyncio
import uuid
from typing import Optional, Dict, Any, List, Union
from datetime import datetime, timedelta
from abc import ABC, abstractmethod
import httpx
import structlog
from tenacity import (
    retry,
    stop_after_attempt,
    wait_exponential,
    retry_if_exception_type,
)

from ..core.config import settings
from ..core.cache import cache_result, create_cache_key
from ..core.exceptions import (
    AIModelNotAvailableError,
    AIModelRequestError,
    AIModelRateLimitError,
    ValidationError,
    ConfigurationError,
)

# 配置结构化日志
logger = structlog.get_logger(__name__)


class AIProvider(ABC):
    """AI提供商抽象基类"""

    def __init__(self, name: str, api_key: str, base_url: str):
        self.name = name
        self.api_key = api_key
        self.base_url = base_url.rstrip("/")

    @abstractmethod
    async def generate_content(
        self, prompt: str, content_type: str, **kwargs
    ) -> Dict[str, Any]:
        """生成内容"""
        pass

    @abstractmethod
    async def validate_parameters(self, content_type: str, **kwargs) -> Dict[str, Any]:
        """验证参数"""
        pass

    @abstractmethod
    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        pass


class DoubaoProvider(AIProvider):
    """豆包大模型提供商（火山引擎）"""

    def __init__(self, api_key: str, base_url: str):
        super().__init__("doubao", api_key, base_url)
        self.client = httpx.AsyncClient(
            base_url=self.base_url,
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            },
            timeout=120.0,
        )

    async def generate_content(
        self, prompt: str, content_type: str, **kwargs
    ) -> Dict[str, Any]:
        """使用豆包生成内容"""
        logger.info(
            "豆包生成内容", content_type=content_type, prompt_length=len(prompt)
        )

        # 获取模型ID
        model_id = self._get_model_id(content_type, **kwargs)

        # 构建请求
        request_data = self._build_request_data(prompt, content_type, **kwargs)

        try:
            response = await self.client.post(f"/chat/completions", json=request_data)
            response.raise_for_status()

            result = response.json()
            return self._parse_response(result, content_type)

        except httpx.HTTPStatusError as e:
            if e.response.status_code == 429:
                raise AIModelRateLimitError("豆包请求频率限制")
            elif e.response.status_code >= 500:
                raise AIModelRequestError(f"豆包服务错误: {e.response.status_code}")
            else:
                raise AIModelRequestError(f"豆包请求失败: {e.response.status_code}")
        except httpx.RequestError as e:
            raise AIModelRequestError(f"豆包网络请求失败: {str(e)}")

    async def validate_parameters(self, content_type: str, **kwargs) -> Dict[str, Any]:
        """验证豆包参数"""
        validated = {}

        if content_type == "image":
            # 豆包图片生成参数验证
            if "size" in kwargs:
                size = kwargs["size"]
                valid_sizes = ["512x512", "1024x1024", "2048x2048", "4096x4096"]
                if size not in valid_sizes:
                    raise ValidationError(f"豆包不支持图片尺寸: {size}")
                validated["size"] = size

            if "quality" in kwargs:
                quality = kwargs["quality"]
                if quality not in ["standard", "hd"]:
                    raise ValidationError(f"豆包不支持图片质量: {quality}")
                validated["quality"] = quality

        return validated

    def get_model_info(self) -> Dict[str, Any]:
        """获取豆包模型信息"""
        return {
            "provider": "doubao",
            "name": "豆包大模型",
            "supported_types": ["image", "text"],
            "models": {"image": "doubao-image-generation", "text": "doubao-lite-4k"},
            "features": ["text-to-image", "text-generation"],
            "rate_limit": "100 requests per minute",
        }

    def _get_model_id(self, content_type: str, **kwargs) -> str:
        """获取模型ID"""
        model_map = {
            "image": "doubao-image-generation",
            "text": kwargs.get("model", "doubao-lite-4k"),
            "video": "doubao-video-generation",
            "audio": "doubao-audio-generation",
        }
        return model_map.get(content_type, "doubao-lite-4k")

    def _build_request_data(
        self, prompt: str, content_type: str, **kwargs
    ) -> Dict[str, Any]:
        """构建请求数据"""
        base_data = {
            "model": self._get_model_id(content_type, **kwargs),
            "messages": [{"role": "user", "content": prompt}],
            "max_tokens": kwargs.get("max_tokens", 2000),
            "temperature": kwargs.get("temperature", 0.7),
            "top_p": kwargs.get("top_p", 0.9),
        }

        # 添加内容类型特定参数
        if content_type == "image":
            base_data.update(
                {
                    "n": kwargs.get("batch_size", 1),
                    "size": kwargs.get("size", "1024x1024"),
                    "quality": kwargs.get("quality", "standard"),
                    "style": kwargs.get("style", "vivid"),
                }
            )

        return base_data

    def _parse_response(
        self, response: Dict[str, Any], content_type: str
    ) -> Dict[str, Any]:
        """解析响应数据"""
        if content_type == "image":
            # 解析图片生成响应
            if "data" in response and len(response["data"]) > 0:
                return {
                    "success": True,
                    "content_type": "image",
                    "images": [
                        {
                            "url": item.get("url"),
                            "revised_prompt": item.get("revised_prompt", ""),
                            "seed": item.get("seed"),
                        }
                        for item in response["data"]
                    ],
                    "model": response.get("model", ""),
                    "usage": response.get("usage", {}),
                }
        else:
            # 解析文本生成响应
            if "choices" in response and len(response["choices"]) > 0:
                choice = response["choices"][0]
                return {
                    "success": True,
                    "content_type": content_type,
                    "content": choice.get("message", {}).get("content", ""),
                    "model": response.get("model", ""),
                    "usage": response.get("usage", {}),
                }

        raise AIModelRequestError("无法解析豆包响应数据")


class AliyunProvider(AIProvider):
    """阿里云大模型提供商"""

    def __init__(self, api_key: str, base_url: str):
        super().__init__("ali", api_key, base_url)
        self.client = httpx.AsyncClient(
            base_url=self.base_url,
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            },
            timeout=120.0,
        )

    async def generate_content(
        self, prompt: str, content_type: str, **kwargs
    ) -> Dict[str, Any]:
        """使用阿里云生成内容"""
        logger.info(
            "阿里云生成内容", content_type=content_type, prompt_length=len(prompt)
        )

        # 获取模型ID
        model_id = self._get_model_id(content_type, **kwargs)

        # 构建请求
        request_data = self._build_request_data(prompt, content_type, **kwargs)

        try:
            response = await self.client.post(
                f"/services/{model_id}/text2image", json=request_data
            )
            response.raise_for_status()

            result = response.json()
            return self._parse_response(result, content_type)

        except httpx.HTTPStatusError as e:
            if e.response.status_code == 429:
                raise AIModelRateLimitError("阿里云请求频率限制")
            elif e.response.status_code >= 500:
                raise AIModelRequestError(f"阿里云服务错误: {e.response.status_code}")
            else:
                raise AIModelRequestError(f"阿里云请求失败: {e.response.status_code}")
        except httpx.RequestError as e:
            raise AIModelRequestError(f"阿里云网络请求失败: {str(e)}")

    async def validate_parameters(self, content_type: str, **kwargs) -> Dict[str, Any]:
        """验证阿里云参数"""
        validated = {}

        if content_type == "image":
            # 阿里云图片生成参数验证
            if "size" in kwargs:
                size = kwargs["size"]
                # 阿里云支持的分辨率
                if size not in ["512x512", "768x768", "1024x1024", "1536x1536"]:
                    raise ValidationError(f"阿里云不支持图片尺寸: {size}")
                validated["size"] = size

        return validated

    def get_model_info(self) -> Dict[str, Any]:
        """获取阿里云模型信息"""
        return {
            "provider": "ali",
            "name": "阿里云大模型",
            "supported_types": ["image", "text"],
            "models": {"image": "wanx-v1", "text": "qwen-turbo"},
            "features": ["text-to-image", "text-generation"],
            "rate_limit": "60 requests per minute",
        }

    def _get_model_id(self, content_type: str, **kwargs) -> str:
        """获取模型ID"""
        model_map = {
            "image": "wanx-v1",
            "text": kwargs.get("model", "qwen-turbo"),
            "video": "ali-video-generation",
            "audio": "ali-audio-generation",
        }
        return model_map.get(content_type, "qwen-turbo")

    def _build_request_data(
        self, prompt: str, content_type: str, **kwargs
    ) -> Dict[str, Any]:
        """构建请求数据"""
        if content_type == "image":
            return {
                "model": self._get_model_id(content_type, **kwargs),
                "input": {
                    "prompt": prompt,
                    "negative_prompt": kwargs.get("negative_prompt", ""),
                    "size": kwargs.get("size", "1024x1024"),
                    "n": kwargs.get("batch_size", 1),
                    "steps": kwargs.get("steps", 50),
                    "scale": kwargs.get("guidance_scale", 7.5),
                    "seed": kwargs.get("seed"),
                },
                "parameters": {
                    "style": kwargs.get("style", "<auto>"),
                    "format": kwargs.get("format", "png"),
                },
            }
        else:
            return {
                "model": self._get_model_id(content_type, **kwargs),
                "input": {"prompt": prompt, "history": kwargs.get("history", [])},
                "parameters": {
                    "result_format": "message",
                    "top_p": kwargs.get("top_p", 0.9),
                    "temperature": kwargs.get("temperature", 0.7),
                    "max_tokens": kwargs.get("max_tokens", 2000),
                },
            }

    def _parse_response(
        self, response: Dict[str, Any], content_type: str
    ) -> Dict[str, Any]:
        """解析响应数据"""
        if content_type == "image":
            # 解析阿里云图片生成响应
            if "output" in response and "results" in response["output"]:
                return {
                    "success": True,
                    "content_type": "image",
                    "images": [
                        {"url": result.get("url"), "seed": result.get("seed")}
                        for result in response["output"]["results"]
                    ],
                    "model": response.get("model", ""),
                    "usage": response.get("usage", {}),
                }
        else:
            # 解析阿里云文本生成响应
            if "output" in response and "choices" in response["output"]:
                return {
                    "success": True,
                    "content_type": content_type,
                    "content": response["output"]["choices"][0]
                    .get("message", {})
                    .get("content", ""),
                    "model": response.get("model", ""),
                    "usage": response.get("usage", {}),
                }

        raise AIModelRequestError("无法解析阿里云响应数据")


class OpenAIProvider(AIProvider):
    """OpenAI模型提供商"""

    def __init__(self, api_key: str, base_url: str = "https://api.openai.com/v1"):
        super().__init__("openai", api_key, base_url)
        self.client = httpx.AsyncClient(
            base_url=self.base_url,
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            },
            timeout=120.0,
        )

    async def generate_content(
        self, prompt: str, content_type: str, **kwargs
    ) -> Dict[str, Any]:
        """使用OpenAI生成内容"""
        logger.info(
            "OpenAI生成内容", content_type=content_type, prompt_length=len(prompt)
        )

        # 获取模型ID
        model_id = self._get_model_id(content_type, **kwargs)

        # 构建请求
        request_data = self._build_request_data(prompt, content_type, **kwargs)

        try:
            if content_type == "image":
                response = await self.client.post(
                    "/images/generations", json=request_data
                )
            else:
                response = await self.client.post(
                    "/chat/completions", json=request_data
                )

            response.raise_for_status()
            result = response.json()
            return self._parse_response(result, content_type)

        except httpx.HTTPStatusError as e:
            if e.response.status_code == 429:
                raise AIModelRateLimitError("OpenAI请求频率限制")
            elif e.response.status_code >= 500:
                raise AIModelRequestError(f"OpenAI服务错误: {e.response.status_code}")
            else:
                error_detail = e.response.json().get("error", {}).get("message", "")
                raise AIModelRequestError(f"OpenAI请求失败: {error_detail}")
        except httpx.RequestError as e:
            raise AIModelRequestError(f"OpenAI网络请求失败: {str(e)}")

    async def validate_parameters(self, content_type: str, **kwargs) -> Dict[str, Any]:
        """验证OpenAI参数"""
        validated = {}

        if content_type == "image":
            # OpenAI图片生成参数验证
            if "size" in kwargs:
                size = kwargs["size"]
                valid_sizes = [
                    "256x256",
                    "512x512",
                    "1024x1024",
                    "1792x1024",
                    "1024x1792",
                ]
                if size not in valid_sizes:
                    raise ValidationError(f"OpenAI不支持图片尺寸: {size}")
                validated["size"] = size

            if "quality" in kwargs:
                quality = kwargs["quality"]
                if quality not in ["standard", "hd"]:
                    raise ValidationError(f"OpenAI不支持图片质量: {quality}")
                validated["quality"] = quality

        return validated

    def get_model_info(self) -> Dict[str, Any]:
        """获取OpenAI模型信息"""
        return {
            "provider": "openai",
            "name": "OpenAI GPT/DALL-E",
            "supported_types": ["image", "text", "audio"],
            "models": {
                "image": "dall-e-3",
                "text": "gpt-4-turbo-preview",
                "audio": "tts-1",
            },
            "features": ["text-to-image", "text-generation", "text-to-speech"],
            "rate_limit": "50 requests per minute",
        }

    def _get_model_id(self, content_type: str, **kwargs) -> str:
        """获取模型ID"""
        model_map = {
            "image": kwargs.get("model", "dall-e-3"),
            "text": kwargs.get("model", "gpt-4-turbo-preview"),
            "audio": kwargs.get("model", "tts-1"),
            "video": "gpt-4-turbo-preview",  # 使用文本模型生成视频描述
        }
        return model_map.get(content_type, "gpt-4-turbo-preview")

    def _build_request_data(
        self, prompt: str, content_type: str, **kwargs
    ) -> Dict[str, Any]:
        """构建请求数据"""
        if content_type == "image":
            return {
                "model": self._get_model_id(content_type, **kwargs),
                "prompt": prompt,
                "n": kwargs.get("batch_size", 1),
                "size": kwargs.get("size", "1024x1024"),
                "quality": kwargs.get("quality", "standard"),
                "style": kwargs.get("style", "vivid"),
                "response_format": "url",
            }
        else:
            return {
                "model": self._get_model_id(content_type, **kwargs),
                "messages": [{"role": "user", "content": prompt}],
                "max_tokens": kwargs.get("max_tokens", 2000),
                "temperature": kwargs.get("temperature", 0.7),
                "top_p": kwargs.get("top_p", 0.9),
                "frequency_penalty": kwargs.get("frequency_penalty", 0.0),
                "presence_penalty": kwargs.get("presence_penalty", 0.0),
            }

    def _parse_response(
        self, response: Dict[str, Any], content_type: str
    ) -> Dict[str, Any]:
        """解析响应数据"""
        if content_type == "image":
            # 解析OpenAI图片生成响应
            if "data" in response and len(response["data"]) > 0:
                return {
                    "success": True,
                    "content_type": "image",
                    "images": [
                        {
                            "url": item.get("url"),
                            "revised_prompt": item.get("revised_prompt", ""),
                        }
                        for item in response["data"]
                    ],
                    "model": response.get("model", ""),
                    "usage": response.get("usage", {}),
                }
        else:
            # 解析OpenAI文本生成响应
            if "choices" in response and len(response["choices"]) > 0:
                choice = response["choices"][0]
                return {
                    "success": True,
                    "content_type": content_type,
                    "content": choice.get("message", {}).get("content", ""),
                    "model": response.get("model", ""),
                    "usage": response.get("usage", {}),
                }

        raise AIModelRequestError("无法解析OpenAI响应数据")


class AIModelService:
    """AI模型服务类 - 统一接口"""

    def __init__(self):
        self.providers: Dict[str, AIProvider] = {}
        self._initialize_providers()

    def _initialize_providers(self) -> None:
        """初始化AI提供商"""
        # 豆包提供商
        if settings.ai_provider.ark_api_key:
            self.providers["doubao"] = DoubaoProvider(
                api_key=settings.ai_provider.ark_api_key,
                base_url=settings.ai_provider.ark_base_url,
            )

        # 阿里云提供商
        if settings.ai_provider.ali_api_key:
            self.providers["ali"] = AliyunProvider(
                api_key=settings.ai_provider.ali_api_key,
                base_url=settings.ai_provider.ali_base_url,
            )

        # OpenAI提供商
        if settings.ai_provider.openai_api_key:
            self.providers["openai"] = OpenAIProvider(
                api_key=settings.ai_provider.openai_api_key
            )

        if not self.providers:
            logger.warning("没有配置任何AI模型提供商")

    @retry(
        stop=stop_after_attempt(3),
        wait=wait_exponential(multiplier=1, min=4, max=10),
        retry=retry_if_exception_type((AIModelRequestError, httpx.RequestError)),
    )
    async def generate_content(
        self, prompt: str, content_type: str, provider: str = None, **kwargs
    ) -> Dict[str, Any]:
        """
        生成内容 - 主要接口

        Args:
            prompt: 生成提示词
            content_type: 内容类型 (image, video, audio, text)
            provider: AI提供商，如果为None则使用默认提供商
            **kwargs: 其他参数

        Returns:
            Dict[str, Any]: 生成结果

        Raises:
            AIModelNotAvailableError: AI模型不可用
            AIModelRequestError: AI模型请求失败
            ValidationError: 参数验证失败
        """
        if not provider:
            provider = settings.ai_provider.default_provider

        # 获取提供商
        ai_provider = self.providers.get(provider)
        if not ai_provider:
            available_providers = list(self.providers.keys())
            raise AIModelNotAvailableError(
                f"AI提供商 '{provider}' 不可用，可用提供商: {available_providers}"
            )

        logger.info(
            "生成内容请求",
            provider=provider,
            content_type=content_type,
            prompt_length=len(prompt),
        )

        try:
            # 验证参数
            validated_params = await ai_provider.validate_parameters(
                content_type, **kwargs
            )

            # 合并参数
            generation_params = {**kwargs, **validated_params}

            # 生成内容
            result = await ai_provider.generate_content(
                prompt, content_type, **generation_params
            )

            logger.info(
                "内容生成成功",
                provider=provider,
                content_type=content_type,
                success=result.get("success", False),
            )

            return result

        except ValidationError:
            raise
        except AIModelRateLimitError:
            raise
        except Exception as e:
            logger.error(
                "内容生成失败",
                provider=provider,
                content_type=content_type,
                error=str(e),
            )
            raise AIModelRequestError(f"内容生成失败: {str(e)}")

    async def generate_content_with_fallback(
        self,
        prompt: str,
        content_type: str,
        preferred_providers: List[str] = None,
        **kwargs,
    ) -> Dict[str, Any]:
        """
        生成内容 - 带备用提供商

        Args:
            prompt: 生成提示词
            content_type: 内容类型
            preferred_providers: 优先使用的提供商列表
            **kwargs: 其他参数

        Returns:
            Dict[str, Any]: 生成结果

        Raises:
            AIModelNotAvailableError: 所有提供商都不可用
        """
        if not preferred_providers:
            preferred_providers = ["doubao", "ali", "openai"]

        # 过滤掉不可用的提供商
        available_providers = [
            provider for provider in preferred_providers if provider in self.providers
        ]

        if not available_providers:
            raise AIModelNotAvailableError("没有可用的AI提供商")

        # 尝试每个提供商
        last_error = None
        for provider in available_providers:
            try:
                logger.info(
                    "尝试AI提供商",
                    provider=provider,
                    attempt=available_providers.index(provider) + 1,
                )
                result = await self.generate_content(
                    prompt, content_type, provider, **kwargs
                )

                # 记录成功使用的提供商
                result["provider_used"] = provider
                return result

            except (
                AIModelNotAvailableError,
                AIModelRequestError,
                AIModelRateLimitError,
            ) as e:
                last_error = e
                logger.warning(f"AI提供商 {provider} 失败", error=str(e))
                continue

        # 所有提供商都失败
        raise AIModelNotAvailableError(
            f"所有AI提供商都不可用，最后错误: {str(last_error)}"
        )

    def get_available_providers(self) -> List[str]:
        """
        获取可用的AI提供商列表

        Returns:
            List[str]: 可用的提供商名称列表
        """
        return list(self.providers.keys())

    def get_provider_info(self, provider: str) -> Optional[Dict[str, Any]]:
        """
        获取AI提供商信息

        Args:
            provider: 提供商名称

        Returns:
            Optional[Dict[str, Any]]: 提供商信息，不存在返回None
        """
        ai_provider = self.providers.get(provider)
        if ai_provider:
            return ai_provider.get_model_info()
        return None

    def get_all_providers_info(self) -> Dict[str, Dict[str, Any]]:
        """
        获取所有AI提供商的信息

        Returns:
            Dict[str, Dict[str, Any]]: 所有提供商的信息
        """
        return {
            name: provider.get_model_info() for name, provider in self.providers.items()
        }

    async def validate_generation_parameters(
        self, content_type: str, provider: str = None, **kwargs
    ) -> Dict[str, Any]:
        """
        验证生成参数

        Args:
            content_type: 内容类型
            provider: AI提供商，如果为None则使用默认提供商
            **kwargs: 参数

        Returns:
            Dict[str, Any]: 验证后的参数

        Raises:
            ValidationError: 参数验证失败
        """
        if not provider:
            provider = settings.ai_provider.default_provider

        ai_provider = self.providers.get(provider)
        if not ai_provider:
            raise AIModelNotAvailableError(f"AI提供商 '{provider}' 不可用")

        return await ai_provider.validate_parameters(content_type, **kwargs)

    def estimate_generation_cost(
        self, content_type: str, provider: str = None, **kwargs
    ) -> int:
        """
        估算生成成本（单位：分）

        Args:
            content_type: 内容类型
            provider: AI提供商
            **kwargs: 参数

        Returns:
            int: 估算成本（分）
        """
        if not provider:
            provider = settings.ai_provider.default_provider

        # 基础成本
        base_costs = {
            "doubao": {
                "image": 10,  # 1角
                "text": 1,  # 1分
                "video": 50,  # 5角
                "audio": 20,  # 2角
            },
            "ali": {
                "image": 15,  # 1.5角
                "text": 2,  # 2分
                "video": 60,  # 6角
                "audio": 25,  # 2.5角
            },
            "openai": {
                "image": 20,  # 2角
                "text": 5,  # 5分
                "video": 100,  # 1元
                "audio": 30,  # 3角
            },
        }

        base_cost = base_costs.get(provider, {}).get(content_type, 10)

        # 根据参数调整成本
        multiplier = 1.0

        # 批量生成
        batch_size = kwargs.get("batch_size", 1)
        multiplier *= batch_size

        # 高质量
        if kwargs.get("quality") == "high":
            multiplier *= 1.5

        # 大尺寸
        if content_type == "image" and kwargs.get("size") in ["2048x2048", "4096x4096"]:
            multiplier *= 2.0

        return int(base_cost * multiplier)

    @cache_result(cache_name="content_metadata", ttl=300)  # 5分钟缓存
    async def health_check(self, provider: str = None) -> Dict[str, Any]:
        """
        健康检查 - 带缓存版本

        Args:
            provider: 特定提供商，如果为None则检查所有

        Returns:
            Dict[str, Any]: 健康检查结果
        """
        if provider:
            # 检查特定提供商
            ai_provider = self.providers.get(provider)
            if not ai_provider:
                return {
                    "provider": provider,
                    "status": "unavailable",
                    "error": "Provider not configured",
                }

            try:
                # 尝试简单的模型信息获取
                info = ai_provider.get_model_info()
                return {"provider": provider, "status": "healthy", "info": info}
            except Exception as e:
                return {"provider": provider, "status": "unhealthy", "error": str(e)}
        else:
            # 检查所有提供商
            results = {}
            for name, provider in self.providers.items():
                try:
                    info = provider.get_model_info()
                    results[name] = {"status": "healthy", "info": info}
                except Exception as e:
                    results[name] = {"status": "unhealthy", "error": str(e)}
            return results

    async def test_connection(self, provider: str) -> Dict[str, Any]:
        """
        测试AI提供商连接

        Args:
            provider: 提供商名称 (doubao, ali, openai)

        Returns:
            Dict[str, Any]: 连接测试结果
        """
        try:
            ai_provider = self.providers.get(provider)
            if not ai_provider:
                return {
                    "success": False,
                    "error": f"Provider {provider} not configured",
                }

            # 尝试进行简单的连接测试
            if hasattr(ai_provider, "test_connection"):
                # 如果提供商有自己的连接测试方法
                return await ai_provider.test_connection()
            else:
                # 尝试获取模型信息作为连接测试
                try:
                    model_info = ai_provider.get_model_info()
                    return {
                        "success": True,
                        "message": f"Connection to {provider} successful",
                        "model_info": model_info,
                    }
                except Exception as e:
                    return {
                        "success": False,
                        "error": f"Connection test failed: {str(e)}",
                    }

        except Exception as e:
            logger.error(
                f"Connection test failed for provider: {provider}", error=str(e)
            )
            return {"success": False, "error": str(e)}

            return {
                "overall_status": "healthy"
                if any(r["status"] == "healthy" for r in results.values())
                else "unhealthy",
                "providers": results,
            }

    async def close(self) -> None:
        """关闭所有HTTP客户端连接"""
        for provider in self.providers.values():
            if hasattr(provider, "client"):
                await provider.client.aclose()


# 全局AI模型服务实例
ai_model_service = AIModelService()

# 导出主要组件
__all__ = ["AIModelService", "ai_model_service"]
