"""
本地大模型适配器
支持Ollama、LM Studio、Text Generation WebUI、vLLM等本地部署的大模型
"""

import httpx
import json
from typing import Dict, Any, List, Optional
import logging
from .base_adapter import BaseModelAdapter

logger = logging.getLogger(__name__)


class LocalModelAdapter(BaseModelAdapter):
    """本地大模型适配器"""

    def __init__(self, api_key: str = "local", api_url: str = "http://localhost:11434/v1/chat/completions", model: str = "llama2"):
        super().__init__(api_key, api_url, model)

    async def generate_completion(
        self,
        messages: List[Dict[str, str]],
        temperature: float = 0.2,
        max_tokens: int = 4000,
        timeout: float = 300.0,
        **kwargs
    ) -> str:
        """生成文本完成"""
        try:
            # 构建请求数据
            request_data = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "stream": False,
                **kwargs
            }

            # 智能检测JSON格式支持
            try_json_format = self._should_use_json_format()
            if try_json_format:
                request_data["response_format"] = {"type": "json_object"}

            # 构建请求头
            headers = {
                "Content-Type": "application/json"
            }

            # 如果API密钥不是默认的"local"，则添加Authorization头
            if self.api_key and self.api_key != "local":
                headers["Authorization"] = f"Bearer {self.api_key}"

            async with httpx.AsyncClient(timeout=timeout) as client:
                response = await client.post(
                    self.api_url,
                    headers=headers,
                    json=request_data
                )

                if response.status_code != 200:
                    error_detail = ""
                    try:
                        error_response = response.json()
                        error_detail = f", 错误详情: {error_response}"
                    except:
                        error_detail = f", 响应内容: {response.text[:200]}"

                    raise Exception(
                        f"本地模型API调用失败: 状态码={response.status_code}{error_detail}")

                result = response.json()

                # 智能解析响应格式
                content = self._extract_content_from_response(result)
                if not content:
                    raise Exception("本地模型API返回格式异常：无法找到响应内容")

                logger.info(f"本地模型API调用成功，返回内容长度: {len(content)}")
                return content

        except Exception as e:
            logger.error(f"本地模型API调用失败: {str(e)}")
            raise

    def _should_use_json_format(self) -> bool:
        """智能检测是否应该使用JSON格式"""
        # 检查API URL是否为OpenAI兼容接口
        if "v1/chat/completions" in self.api_url:
            return True

        # 检查常见的支持JSON格式的本地模型服务
        json_supported_services = [
            "localhost:11434",  # Ollama with OpenAI compatibility
            "127.0.0.1:11434",
            "vllm",
            "text-generation-inference",
            "fastchat"
        ]

        for service in json_supported_services:
            if service in self.api_url.lower():
                return True

        return False

    def _extract_content_from_response(self, result: Dict[str, Any]) -> Optional[str]:
        """从响应中智能提取内容"""
        try:
            # 优先尝试OpenAI兼容格式
            if "choices" in result and result["choices"]:
                choice = result["choices"][0]
                if "message" in choice and "content" in choice["message"]:
                    return choice["message"]["content"]

            # 尝试Ollama原生API格式
            if "response" in result:
                return result["response"]

            # 尝试其他常见格式
            if "text" in result:
                return result["text"]

            if "content" in result:
                return result["content"]

            # 尝试生成字段
            if "generated_text" in result:
                return result["generated_text"]

            return None

        except (KeyError, IndexError, TypeError):
            return None

    def get_provider_name(self) -> str:
        """获取提供商名称"""
        return "本地大模型"

    def get_supported_models(self) -> List[str]:
        """获取支持的模型列表"""
        # 本地模型适配器支持任意本地模型名称
        # 返回常见的模型作为参考，但不限制使用其他本地模型
        common_models = [
            # Llama系列
            "llama2",
            "llama2:7b",
            "llama2:13b",
            "llama2:70b",
            "llama3",
            "llama3:8b",
            "llama3:70b",
            "llama3.1",
            "llama3.1:8b",
            "llama3.1:70b",
            "llama3.2",
            "llama3.2:1b",
            "llama3.2:3b",
            # 未来Llama模型
            "llama4",
            "llama4:8b",
            "llama4:70b",
            # CodeLlama系列
            "codellama",
            "codellama:7b",
            "codellama:13b",
            "codellama:34b",
            # Mistral系列
            "mistral",
            "mistral:7b",
            "mistral:22b",
            "mixtral",
            "mixtral:8x7b",
            "mixtral:8x22b",
            # Qwen系列
            "qwen",
            "qwen:7b",
            "qwen:14b",
            "qwen:72b",
            "qwen2",
            "qwen2:7b",
            "qwen2:72b",
            "qwen2.5",
            "qwen2.5:7b",
            "qwen2.5:14b",
            "qwen2.5:32b",
            "qwen2.5:72b",
            # 未来Qwen模型
            "qwen3",
            "qwen3:7b",
            # ChatGLM系列
            "chatglm3",
            "chatglm3:6b",
            "chatglm4",
            "chatglm4:9b",
            # Baichuan系列
            "baichuan2",
            "baichuan2:7b",
            "baichuan2:13b",
            # Yi系列
            "yi",
            "yi:6b",
            "yi:34b",
            # DeepSeek系列
            "deepseek-coder",
            "deepseek-coder:6.7b",
            "deepseek-coder:33b",
            # 通用本地模型名称
            "local-model",
            "custom-model"
        ]

        # 如果当前模型不在常见列表中，也添加进去（支持未来模型）
        if self.model and self.model not in common_models:
            common_models.append(self.model)
            logger.info(f"添加未知本地模型到支持列表: {self.model}")

        return common_models

    def validate_config(self) -> bool:
        """验证配置是否有效"""
        if not self.api_url:
            logger.error("本地模型API地址未配置")
            return False
        if not self.model:
            logger.error("本地模型名称未配置")
            return False
        return True

    def format_error_message(self, error: Exception) -> str:
        """格式化错误消息"""
        return f"本地模型API调用失败: {str(error)}"
