"""
智谱AI模型适配器
支持GLM-4系列模型
"""

import httpx
import json
from typing import Dict, Any, List, Optional
import logging
from .base_adapter import BaseModelAdapter

logger = logging.getLogger(__name__)


class ZhipuAdapter(BaseModelAdapter):
    """智谱AI模型适配器"""

    def __init__(self, api_key: str, api_url: str = "https://open.bigmodel.cn/api/paas/v4/chat/completions", model: str = "glm-4"):
        super().__init__(api_key, api_url, model)

    async def generate_completion(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.2,
        max_tokens: int = 4000,
        timeout: float = 300.0,
        **kwargs
    ) -> str:
        """生成文本完成"""
        try:
            # 构建请求数据
            request_data = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                **kwargs
            }

            # 智谱AI支持JSON格式，但某些模型可能不支持
            json_supported_models = [
                "glm-4", "glm-4-plus", "glm-4-flash",
                "glm-4-air", "glm-4-airx", "glm-4-long"
            ]
            try_json_format = self.model in json_supported_models

            if try_json_format:
                request_data["response_format"] = {"type": "json_object"}

            async with httpx.AsyncClient(timeout=timeout) as client:
                response = await client.post(
                    self.api_url,
                    headers={
                        "Authorization": f"Bearer {self.api_key}",
                        "Content-Type": "application/json"
                    },
                    json=request_data
                )

                if response.status_code != 200:
                    error_detail = ""
                    try:
                        error_response = response.json()
                        error_detail = f", 错误详情: {error_response}"

                        # 检查是否是JSON格式不支持的错误
                        if ("response_format" in str(error_response) or
                                "json_object" in str(error_response).lower()) and try_json_format:
                            logger.warning(
                                "智谱AI模型不支持JSON格式，尝试不使用response_format参数")
                            # 移除JSON格式要求重试
                            request_data.pop("response_format", None)
                            response = await client.post(
                                self.api_url,
                                headers={
                                    "Authorization": f"Bearer {self.api_key}",
                                    "Content-Type": "application/json"
                                },
                                json=request_data
                            )
                            if response.status_code == 200:
                                result = response.json()
                                if self._validate_response_format(result):
                                    content = result["choices"][0]["message"]["content"]
                                    logger.info(
                                        f"智谱AI API调用成功（无JSON格式），返回内容长度: {len(content)}")
                                    return content
                    except:
                        error_detail = f", 响应内容: {response.text[:200]}"

                    raise Exception(
                        f"智谱AI API调用失败: 状态码={response.status_code}{error_detail}")

                result = response.json()

                # 验证响应格式
                if not self._validate_response_format(result):
                    raise Exception("智谱AI API返回格式异常：缺少必要字段")

                content = result["choices"][0]["message"]["content"]

                logger.info(f"智谱AI API调用成功，返回内容长度: {len(content)}")
                return content

        except Exception as e:
            logger.error(f"智谱AI API调用失败: {str(e)}")
            raise

    def _validate_response_format(self, result: Dict[str, Any]) -> bool:
        """验证响应格式是否正确"""
        try:
            if "choices" not in result or not result["choices"]:
                return False

            choice = result["choices"][0]
            if "message" not in choice:
                return False

            message = choice["message"]
            if "content" not in message:
                return False

            return True
        except (KeyError, IndexError, TypeError):
            return False

    def get_provider_name(self) -> str:
        """获取提供商名称"""
        return "智谱AI"

    def get_supported_models(self) -> List[str]:
        """获取支持的模型列表"""
        # 智谱AI适配器支持任意GLM模型名称
        # 返回常见的模型作为参考，但不限制使用其他GLM模型
        common_models = [
            # GLM-4系列
            "glm-4",
            "glm-4-plus",
            "glm-4-flash",
            "glm-4-air",
            "glm-4-airx",
            "glm-4-long",
            "glm-4v",
            "glm-4v-plus",
            # GLM-5系列（未来模型）
            "glm-5",
            "glm-5-plus",
            "glm-5-flash",
            "glm-5-air",
            "glm-5-long",
            "glm-5v",
            # ChatGLM系列
            "chatglm3",
            "chatglm3-6b",
            "chatglm4",
            "chatglm4-9b",
            "chatglm5",
            # CodeGeeX系列
            "codegeex4",
            "codegeex5",
            # 通用别名
            "bigmodel"
        ]

        # 如果当前模型不在常见列表中，也添加进去（支持未来模型）
        if self.model and self.model not in common_models:
            common_models.append(self.model)
            logger.info(f"添加未知智谱AI模型到支持列表: {self.model}")

        return common_models

    def validate_config(self) -> bool:
        """验证配置是否有效"""
        if not self.api_key or self.api_key.strip() == "":
            logger.error("智谱AI API密钥未配置")
            return False

        if not self.api_url or self.api_url.strip() == "":
            logger.error("智谱AI API地址未配置")
            return False

        if not self.model or self.model.strip() == "":
            logger.error("智谱AI模型名称未配置")
            return False

        # 检查是否为占位符
        placeholder_keys = ["your-zhipu-api-key-here", "your-api-key-here"]
        if self.api_key in placeholder_keys:
            logger.error("检测到智谱AI API密钥为占位符，请配置真实的API密钥")
            return False

        return True

    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        model_info = {
            "glm-4": {
                "description": "智谱AI GLM-4 通用对话模型",
                "context_length": 128000,
                "max_output": 4096,
                "supports_json": True
            },
            "glm-4-plus": {
                "description": "智谱AI GLM-4-Plus 增强版模型",
                "context_length": 128000,
                "max_output": 4096,
                "supports_json": True
            },
            "glm-4-flash": {
                "description": "智谱AI GLM-4-Flash 快速响应模型",
                "context_length": 128000,
                "max_output": 4096,
                "supports_json": True
            },
            "glm-4-air": {
                "description": "智谱AI GLM-4-Air 轻量级模型",
                "context_length": 128000,
                "max_output": 4096,
                "supports_json": True
            },
            "glm-4-airx": {
                "description": "智谱AI GLM-4-AirX 增强轻量级模型",
                "context_length": 128000,
                "max_output": 4096,
                "supports_json": True
            },
            "glm-4-long": {
                "description": "智谱AI GLM-4-Long 长文本模型",
                "context_length": 1000000,
                "max_output": 4096,
                "supports_json": True
            },
            "glm-4v": {
                "description": "智谱AI GLM-4V 多模态模型",
                "context_length": 128000,
                "max_output": 4096,
                "supports_json": True,
                "supports_vision": True
            },
            "glm-4v-plus": {
                "description": "智谱AI GLM-4V-Plus 增强多模态模型",
                "context_length": 128000,
                "max_output": 4096,
                "supports_json": True,
                "supports_vision": True
            }
        }

        return model_info.get(self.model, {
            "description": f"智谱AI {self.model} 模型",
            "context_length": 128000,
            "max_output": 4096,
            "supports_json": True
        })
