"""
统一AI服务 - 支持多个LLM提供商
支持: GLM-4-Flash, Qwen-turbo, Hunyuan-lite

优先级策略:
- 单客户场景: Hunyuan-lite (最快) > Qwen-turbo > GLM-4-Flash
- 多客户复杂场景: Qwen-turbo (最准) > Hunyuan-lite > GLM-4-Flash
"""

import asyncio
import json
import logging
from typing import Any, Literal

from app.core.config import settings

logger = logging.getLogger(__name__)

ScenarioType = Literal["simple", "complex"]


class UnifiedAIService:
    """统一AI服务类 - 场景感知的多提供商自动选择与降级"""

    def __init__(self):
        """初始化,检查所有可用的提供商"""
        self.available_providers = self._check_available_providers()
        logger.info(f"UnifiedAIService initialized with providers: {self.available_providers}")

    def _check_available_providers(self) -> list[str]:
        """检查所有可用的AI提供商"""
        providers = []
        if settings.HUNYUAN_SECRET_ID and settings.HUNYUAN_SECRET_KEY:
            providers.append("hunyuan")
        if settings.QWEN_API_KEY:
            providers.append("qwen")
        if settings.GLM_API_KEY:
            providers.append("glm")
        return providers

    def _get_provider_priority(self, scenario: ScenarioType = "simple") -> list[str]:
        """
        根据场景获取提供商优先级列表

        Args:
            scenario: "simple" (单客户) 或 "complex" (多客户复杂)

        Returns:
            优先级列表 (只包含可用的提供商)
        """
        if scenario == "simple":
            # 单客户场景: 速度优先
            priority = ["hunyuan", "qwen", "glm"]
        else:
            # 复杂场景: GLM优先 (免费+快速+准确)
            priority = ["glm", "hunyuan", "qwen"]

        # 只返回可用的提供商
        return [p for p in priority if p in self.available_providers]

    async def call_llm(
        self,
        prompt: str,
        temperature: float = 0.1,
        max_tokens: int = 1000,
        scenario: ScenarioType = "simple"
    ) -> str:
        """
        调用LLM API (统一接口,支持场景感知和自动降级)

        Args:
            prompt: 提示词
            temperature: 温度参数 (0-1, 越低越稳定)
            max_tokens: 最大token数
            scenario: 场景类型 ("simple"单客户 或 "complex"多客户复杂)

        Returns:
            str: AI响应文本

        Raises:
            RuntimeError: 所有提供商都调用失败
        """
        if not self.available_providers:
            raise RuntimeError("No AI provider configured. Please set GLM_API_KEY, QWEN_API_KEY or HUNYUAN credentials in .env")

        provider_chain = self._get_provider_priority(scenario)
        logger.info(f"Scenario: {scenario}, Provider chain: {provider_chain}")

        errors = []

        for provider in provider_chain:
            try:
                logger.info(f"Trying provider: {provider.upper()}")

                if provider == "glm":
                    result = await self._call_glm(prompt, temperature, max_tokens)
                elif provider == "qwen":
                    result = await self._call_qwen(prompt, temperature, max_tokens)
                elif provider == "hunyuan":
                    result = await self._call_hunyuan(prompt, temperature, max_tokens)
                else:
                    continue

                logger.info(f"Successfully called {provider.upper()}")
                return result

            except Exception as e:
                error_msg = f"{provider.upper()} failed: {type(e).__name__}: {str(e)}"
                logger.warning(error_msg)
                errors.append(error_msg)
                # 继续尝试下一个提供商
                continue

        # 所有提供商都失败
        all_errors = "; ".join(errors)
        logger.error(f"All providers failed. Errors: {all_errors}")
        raise RuntimeError(f"AI解析失败,所有提供商均不可用: {all_errors}")

    async def _call_glm(self, prompt: str, temperature: float, max_tokens: int) -> str:
        """
        调用智谱AI GLM-4.5-Air API

        使用OpenAI SDK兼容接口
        模型: glm-4-air (免费额度: 0.16元/千tokens)
        """
        try:
            from openai import OpenAI
        except ImportError:
            raise RuntimeError("openai library not installed. Run: pip install openai")

        client = OpenAI(
            api_key=settings.GLM_API_KEY,
            base_url="https://open.bigmodel.cn/api/paas/v4/"
        )

        # OpenAI SDK是同步的,在异步环境中运行需要使用 run_in_executor
        loop = asyncio.get_event_loop()

        def _sync_call():
            response = client.chat.completions.create(
                model="glm-4-air",  # 更新为 GLM-4.5-Air
                messages=[
                    {"role": "system", "content": "你是一个专业的房产CRM数据提取助手,擅长从自然语言中提取客户信息和房源需求。"},
                    {"role": "user", "content": prompt}
                ],
                temperature=temperature,
                max_tokens=max_tokens,
            )
            return response.choices[0].message.content or ""

        result = await loop.run_in_executor(None, _sync_call)
        return result

    async def _call_qwen(self, prompt: str, temperature: float, max_tokens: int) -> str:
        """
        调用阿里云通义千问 Qwen-turbo API

        使用OpenAI SDK兼容接口 (推荐方式)
        """
        try:
            from openai import OpenAI
        except ImportError:
            raise RuntimeError("openai library not installed. Run: pip install openai")

        client = OpenAI(
            api_key=settings.QWEN_API_KEY,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
        )

        loop = asyncio.get_event_loop()

        def _sync_call():
            response = client.chat.completions.create(
                model="qwen-turbo",
                messages=[
                    {"role": "system", "content": "你是一个专业的房产CRM数据提取助手,擅长从自然语言中提取客户信息和房源需求。"},
                    {"role": "user", "content": prompt}
                ],
                temperature=temperature,
                max_tokens=max_tokens,
            )
            return response.choices[0].message.content or ""

        result = await loop.run_in_executor(None, _sync_call)
        return result

    async def _call_hunyuan(self, prompt: str, temperature: float, max_tokens: int) -> str:
        """
        调用腾讯混元 Hunyuan-lite API

        使用腾讯云SDK
        """
        try:
            from tencentcloud.common import credential
            from tencentcloud.common.profile.client_profile import ClientProfile
            from tencentcloud.common.profile.http_profile import HttpProfile
            from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
        except ImportError:
            raise RuntimeError("tencentcloud-sdk-python not installed. Run: pip install tencentcloud-sdk-python")

        cred = credential.Credential(settings.HUNYUAN_SECRET_ID, settings.HUNYUAN_SECRET_KEY)
        httpProfile = HttpProfile()
        httpProfile.endpoint = "hunyuan.tencentcloudapi.com"

        clientProfile = ClientProfile()
        clientProfile.httpProfile = httpProfile

        client = hunyuan_client.HunyuanClient(cred, "", clientProfile)

        loop = asyncio.get_event_loop()

        def _sync_call():
            req = models.ChatCompletionsRequest()
            params = {
                "Model": "hunyuan-lite",
                "Messages": [
                    {
                        "Role": "system",
                        "Content": "你是一个专业的房产CRM数据提取助手,擅长从自然语言中提取客户信息和房源需求。"
                    },
                    {
                        "Role": "user",
                        "Content": prompt
                    }
                ],
                "TopP": 0.8,
                "Temperature": temperature,
            }
            req.from_json_string(json.dumps(params))

            resp = client.ChatCompletions(req)
            # 解析响应
            result = json.loads(resp.to_json_string())
            if "Choices" in result and len(result["Choices"]) > 0:
                return result["Choices"][0]["Message"]["Content"]
            else:
                raise RuntimeError("Hunyuan API returned empty response")

        result = await loop.run_in_executor(None, _sync_call)
        return result


# 全局单例
_ai_service_instance: UnifiedAIService | None = None


def get_ai_service() -> UnifiedAIService:
    """获取AI服务单例"""
    global _ai_service_instance
    if _ai_service_instance is None:
        _ai_service_instance = UnifiedAIService()
    return _ai_service_instance
