"""
多后端 LLM 服务封装
支持 Ollama、OpenAI、Grok 等多种模型后端
"""

import os
import json
import logging
from typing import Dict, Any, Optional
import requests
from langchain.schema import BaseLanguageModel, LLMResult
from langchain.callbacks.manager import CallbackManagerForLLMRun

logger = logging.getLogger(__name__)


class OllamaChat(BaseLanguageModel):
    """Ollama 本地模型封装"""

    model_name: str = "qwen2.5:7b"
    temperature: float = 0.1
    max_tokens: int = 2000
    base_url: str = "http://localhost:11434"

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.base_url = os.getenv('OLLAMA_BASE_URL', 'http://localhost:11434')
        self.model_name = os.getenv('OLLAMA_MODEL_NAME', 'qwen2.5:7b')

        logger.info(f"Ollama service initialized: {self.base_url}, model: {self.model_name}")

    def _call(
        self,
        prompt: str,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> str:
        """
        调用 Ollama 模型

        Args:
            prompt: 输入提示
            stop: 停止词列表
            run_manager: 回调管理器

        Returns:
            模型响应
        """
        try:
            data = {
                "model": self.model_name,
                "prompt": prompt,
                "stream": False,
                "options": {
                    "temperature": self.temperature,
                    "num_predict": self.max_tokens,
                    "top_p": 0.9,
                    "top_k": 40
                }
            }

            if stop:
                data["options"]["stop"] = stop

            response = requests.post(
                f"{self.base_url}/api/generate",
                json=data,
                timeout=60
            )

            if response.status_code == 200:
                result = response.json()
                content = result.get("response", "")
                logger.info(f"Ollama response received: {len(content)} chars")
                return content
            else:
                logger.error(f"Ollama API error: {response.status_code} - {response.text}")
                return self._get_fallback_response(prompt)

        except Exception as e:
            logger.error(f"Ollama API call failed: {e}")
            return self._get_fallback_response(prompt)

    def _get_fallback_response(self, prompt: str) -> str:
        """
        获取后备响应（当API调用失败时）

        Args:
            prompt: 输入提示

        Returns:
            后备响应
        """
        return json.dumps({
            "thought": "API调用失败，使用后备响应",
            "action": "",
            "observation": "服务暂时不可用",
            "answer": "抱歉，AI 服务暂时出现问题。请稍后重试或直接联系我们的客服热线。"
        })

    @property
    def _llm_type(self) -> str:
        """返回模型类型"""
        return "ollama"

    def _generate(
        self,
        prompts: list,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> LLMResult:
        """生成多个响应"""
        generations = []
        for prompt in prompts:
            response = self._call(prompt, stop, run_manager, **kwargs)
            generations.append([{"text": response}])

        return LLMResult(generations=generations)


class OpenAIChat(BaseLanguageModel):
    """OpenAI 兼容 API 封装"""

    model_name: str = "gpt-3.5-turbo"
    temperature: float = 0.1
    max_tokens: int = 2000
    api_key: str = ""
    base_url: str = "https://api.openai.com/v1"

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.api_key = os.getenv('OPENAI_API_KEY', '')
        self.base_url = os.getenv('OPENAI_BASE_URL', 'https://api.openai.com/v1')
        self.model_name = os.getenv('OPENAI_MODEL_NAME', 'gpt-3.5-turbo')

        if not self.api_key:
            logger.warning("OPENAI_API_KEY not set")

    def _call(
        self,
        prompt: str,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> str:
        """
        调用 OpenAI 兼容 API

        Args:
            prompt: 输入提示
            stop: 停止词列表
            run_manager: 回调管理器

        Returns:
            模型响应
        """
        try:
            if not self.api_key:
                return self._get_fallback_response(prompt)

            data = {
                "model": self.model_name,
                "messages": [
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                "temperature": self.temperature,
                "max_tokens": self.max_tokens
            }

            if stop:
                data["stop"] = stop

            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }

            response = requests.post(
                f"{self.base_url}/chat/completions",
                headers=headers,
                json=data,
                timeout=60
            )

            if response.status_code == 200:
                result = response.json()
                content = result.get("choices", [{}])[0].get("message", {}).get("content", "")
                logger.info(f"OpenAI response received: {len(content)} chars")
                return content
            else:
                logger.error(f"OpenAI API error: {response.status_code} - {response.text}")
                return self._get_fallback_response(prompt)

        except Exception as e:
            logger.error(f"OpenAI API call failed: {e}")
            return self._get_fallback_response(prompt)

    def _get_fallback_response(self, prompt: str) -> str:
        """
        获取后备响应

        Args:
            prompt: 输入提示

        Returns:
            后备响应
        """
        return json.dumps({
            "thought": "API调用失败，使用后备响应",
            "action": "",
            "observation": "服务暂时不可用",
            "answer": "抱歉，AI 服务暂时出现问题。请稍后重试或直接联系我们的客服热线。"
        })

    @property
    def _llm_type(self) -> str:
        """返回模型类型"""
        return "openai"

    def _generate(
        self,
        prompts: list,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> LLMResult:
        """生成多个响应"""
        generations = []
        for prompt in prompts:
            response = self._call(prompt, stop, run_manager, **kwargs)
            generations.append([{"text": response}])

        return LLMResult(generations=generations)


class GrokChat(BaseLanguageModel):
    """Grok 语言模型封装（兼容性保持）"""

    model_name: str = "grok-code-fast-1"
    temperature: float = 0.0
    max_tokens: int = 2000
    api_key: str = ""
    endpoint: str = ""

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.api_key = os.getenv('GROK_API_KEY', '')
        self.endpoint = os.getenv('GROK_ENDPOINT', 'http://grok-code-fast-1:8000/v1/chat/completions')

        if not self.api_key:
            logger.warning("GROK_API_KEY not set, using mock responses")

    def _call(
        self,
        prompt: str,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> str:
        """
        调用 Grok 模型

        Args:
            prompt: 输入提示
            stop: 停止词列表
            run_manager: 回调管理器

        Returns:
            模型响应
        """
        try:
            if not self.api_key:
                # 返回模拟响应用于演示
                return self._get_mock_response(prompt)

            # 构建请求数据
            data = {
                "model": self.model_name,
                "messages": [
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                "temperature": self.temperature,
                "max_tokens": self.max_tokens
            }

            if stop:
                data["stop"] = stop

            # 发送请求
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }

            response = requests.post(
                self.endpoint,
                headers=headers,
                json=data,
                timeout=30
            )

            if response.status_code == 200:
                result = response.json()
                content = result.get("choices", [{}])[0].get("message", {}).get("content", "")
                logger.info(f"Grok response received: {len(content)} chars")
                return content
            else:
                logger.error(f"Grok API error: {response.status_code} - {response.text}")
                return self._get_fallback_response(prompt)

        except Exception as e:
            logger.error(f"Grok API call failed: {e}")
            return self._get_fallback_response(prompt)

    def _get_mock_response(self, prompt: str) -> str:
        """
        获取模拟响应（用于演示）

        Args:
            prompt: 输入提示

        Returns:
            模拟响应
        """
        # 简单的关键词匹配来生成相关响应
        prompt_lower = prompt.lower()

        if "订单" in prompt_lower or "order" in prompt_lower:
            return json.dumps({
                "thought": "用户询问订单相关信息，需要查询订单状态",
                "action": "query_order",
                "observation": "调用订单查询API",
                "answer": "我来帮您查询订单状态。请告诉我您的订单号。"
            })

        elif "退款" in prompt_lower or "refund" in prompt_lower:
            return json.dumps({
                "thought": "用户需要办理退款，需要先确认订单状态",
                "action": "query_order",
                "observation": "检查订单是否符合退款条件",
                "answer": "我来帮您办理退款。首先需要确认您的订单状态，请提供订单号。"
            })

        elif "发票" in prompt_lower or "receipt" in prompt_lower:
            return json.dumps({
                "thought": "用户提到发票，可能需要OCR识别",
                "action": "ocr_image",
                "observation": "调用OCR服务识别发票信息",
                "answer": "我可以帮您识别发票信息。请上传发票图片，我会自动提取相关信息。"
            })

        else:
            return json.dumps({
                "thought": "处理用户的通用客服请求",
                "action": "",
                "observation": "无需调用外部工具",
                "answer": "您好！我是智能客服助理。请告诉我您需要什么帮助？我可以帮您查询订单、办理退款、识别发票等。"
            })

    def _get_fallback_response(self, prompt: str) -> str:
        """
        获取后备响应（当API调用失败时）

        Args:
            prompt: 输入提示

        Returns:
            后备响应
        """
        return json.dumps({
            "thought": "API调用失败，使用后备响应",
            "action": "",
            "observation": "服务暂时不可用",
            "answer": "抱歉，服务暂时出现问题。请稍后重试或直接联系我们的客服热线。"
        })

    @property
    def _llm_type(self) -> str:
        """返回模型类型"""
        return "grok"

    def _generate(
        self,
        prompts: list,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> LLMResult:
        """生成多个响应"""
        generations = []
        for prompt in prompts:
            response = self._call(prompt, stop, run_manager, **kwargs)
            generations.append([{"text": response}])

        return LLMResult(generations=generations)


def create_llm_service() -> BaseLanguageModel:
    """
    根据环境配置创建合适的 LLM 服务实例

    Returns:
        LLM 服务实例
    """
    # 优先使用 Ollama（本地服务）
    if os.getenv('OLLAMA_BASE_URL') and os.getenv('OLLAMA_MODEL_NAME'):
        logger.info("Using Ollama service")
        return OllamaChat()

    # 其次使用 OpenAI
    elif os.getenv('OPENAI_API_KEY'):
        logger.info("Using OpenAI service")
        return OpenAIChat()

    # 最后使用 Grok（需要 API 密钥）
    elif os.getenv('GROK_API_KEY'):
        logger.info("Using Grok service")
        return GrokChat()

    # 默认使用 Ollama
    else:
        logger.warning("No LLM service configured, using default Ollama")
        return OllamaChat()


def create_grok_chat(model_name: str = "grok-code-fast-1", temperature: float = 0.0) -> GrokChat:
    """
    创建 Grok 聊天模型实例（向后兼容）

    Args:
        model_name: 模型名称
        temperature: 温度参数

    Returns:
        GrokChat 实例
    """
    return GrokChat(
        model_name=model_name,
        temperature=temperature
    )
