"""
OpenAI兼容模型适配器
支持OpenAI、DeepSeek等使用OpenAI API格式的模型
"""

import httpx
import json
from typing import Dict, Any, List, Optional
import logging
from .base_adapter import BaseModelAdapter

logger = logging.getLogger(__name__)


class OpenAIAdapter(BaseModelAdapter):
    """OpenAI兼容模型适配器"""

    def __init__(self, api_key: str, api_url: str, model: str = "gpt-3.5-turbo"):
        super().__init__(api_key, api_url, model)

    async def generate_completion(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.2,
        max_tokens: int = 4000,
        timeout: float = 300.0,
        **kwargs
    ) -> str:
        """生成文本完成"""
        try:
            # 构建请求数据
            request_data = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                **kwargs
            }

            # 尝试添加JSON格式要求，某些模型可能不支持
            try_json_format = True
            # DeepSeek reasoner模型不支持JSON格式
            if self.model and "reasoner" in self.model.lower():
                try_json_format = False
                logger.info(f"检测到reasoner模型 {self.model}，跳过JSON格式设置")

            if try_json_format:
                request_data["response_format"] = {"type": "json_object"}

            async with httpx.AsyncClient(timeout=timeout) as client:
                response = await client.post(
                    self.api_url,
                    headers={
                        "Authorization": f"Bearer {self.api_key}",
                        "Content-Type": "application/json"
                    },
                    json=request_data
                )

                if response.status_code != 200:
                    error_detail = ""
                    try:
                        error_response = response.json()
                        error_detail = f", 错误详情: {error_response}"

                        # 检查是否是JSON格式不支持的错误
                        if ("response_format" in str(error_response) or
                                "json_object" in str(error_response).lower()) and try_json_format:
                            logger.warning(
                                "模型不支持JSON格式，尝试不使用response_format参数")
                            # 移除JSON格式要求重试
                            request_data.pop("response_format", None)
                            response = await client.post(
                                self.api_url,
                                headers={
                                    "Authorization": f"Bearer {self.api_key}",
                                    "Content-Type": "application/json"
                                },
                                json=request_data
                            )
                            if response.status_code == 200:
                                result = response.json()
                                if self._validate_response_format(result):
                                    content = result["choices"][0]["message"]["content"]
                                    logger.info(
                                        f"OpenAI兼容API调用成功（无JSON格式），返回内容长度: {len(content)}")
                                    return content
                    except:
                        error_detail = f", 响应内容: {response.text[:200]}"

                    raise Exception(
                        f"API调用失败: 状态码={response.status_code}{error_detail}")

                result = response.json()

                # 验证响应格式
                if not self._validate_response_format(result):
                    raise Exception("API返回格式异常：缺少必要字段")

                # 处理deepseek-reasoner的特殊响应格式
                message = result["choices"][0]["message"]
                content = message.get("content", "")

                # 如果是deepseek-reasoner模型，可能包含reasoning_content字段
                if "reasoning_content" in message and self.model and "reasoner" in self.model.lower():
                    reasoning_content = message.get("reasoning_content", "")
                    logger.info(
                        f"检测到deepseek-reasoner响应，思考内容长度: {len(reasoning_content)}, 最终答案长度: {len(content)}")
                    # 对于测试用例生成，我们主要需要最终答案（content字段）
                    # reasoning_content包含思考过程，会在后续的推理响应处理器中被过滤
                else:
                    logger.info(f"OpenAI兼容API调用成功，返回内容长度: {len(content)}")

                return content

        except Exception as e:
            logger.error(f"OpenAI兼容API调用失败: {str(e)}")
            raise

    def _validate_response_format(self, result: Dict[str, Any]) -> bool:
        """验证响应格式是否正确"""
        try:
            if "choices" not in result or not result["choices"]:
                return False

            choice = result["choices"][0]
            if "message" not in choice:
                return False

            message = choice["message"]
            if "content" not in message:
                return False

            return True
        except (KeyError, IndexError, TypeError):
            return False

    def get_provider_name(self) -> str:
        """获取提供商名称"""
        return "OpenAI兼容"

    def get_supported_models(self) -> List[str]:
        """获取支持的模型列表"""
        # OpenAI兼容适配器支持任意模型名称
        # 返回常见的模型作为参考，但不限制使用其他模型
        common_models = [
            # OpenAI官方模型
            "gpt-3.5-turbo",
            "gpt-4",
            "gpt-4-turbo",
            "gpt-4o",
            "gpt-4o-mini",
            # 未来OpenAI模型
            "gpt-4o-2024-12-17",
            "gpt-5",
            "gpt-5-turbo",
            "o1-preview",
            "o1-mini",
            # DeepSeek模型
            "deepseek-chat",
            "deepseek-reasoner",
            "deepseek-coder",
            "deepseek-v3",
            "deepseek-chat-v2",
            "deepseek-chat-v3",
            # 其他OpenAI兼容模型
            "claude-3-opus",
            "claude-3-sonnet",
            "claude-3-haiku"
        ]

        # 如果当前模型不在常见列表中，也添加进去（支持未来模型）
        if self.model and self.model not in common_models:
            common_models.append(self.model)
            logger.info(f"添加未知OpenAI兼容模型到支持列表: {self.model}")

        return common_models

    def validate_config(self) -> bool:
        """验证配置是否有效"""
        if not self.api_key or self.api_key.strip() == "":
            logger.error("OpenAI兼容API密钥未配置")
            return False

        if not self.api_url or self.api_url.strip() == "":
            logger.error("OpenAI兼容API地址未配置")
            return False

        if not self.model or self.model.strip() == "":
            logger.error("OpenAI兼容模型名称未配置")
            return False

        # 检查是否为占位符
        placeholder_keys = [
            "your-openai-api-key-here",
            "your-deepseek-api-key-here",
            "your-api-key-here"
        ]
        if self.api_key in placeholder_keys:
            logger.error("检测到OpenAI兼容API密钥为占位符，请配置真实的API密钥")
            return False

        return True
