"""
Grok 模型服务封装
提供与 grok-code-fast-1 模型的接口
"""

import os
import json
import logging
from typing import Dict, Any, Optional
import requests
from langchain.schema import BaseLanguageModel, LLMResult
from langchain.callbacks.manager import CallbackManagerForLLMRun

logger = logging.getLogger(__name__)


class GrokChat(BaseLanguageModel):
    """Grok 语言模型封装"""

    model_name: str = "grok-code-fast-1"
    temperature: float = 0.0
    max_tokens: int = 2000
    api_key: str = ""
    endpoint: str = ""

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.api_key = os.getenv('GROK_API_KEY', '')
        self.endpoint = os.getenv('GROK_ENDPOINT', 'http://grok-code-fast-1:8000/v1/chat/completions')

        if not self.api_key:
            logger.warning("GROK_API_KEY not set, using mock responses")

    def _call(
        self,
        prompt: str,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> str:
        """
        调用 Grok 模型

        Args:
            prompt: 输入提示
            stop: 停止词列表
            run_manager: 回调管理器

        Returns:
            模型响应
        """
        try:
            if not self.api_key:
                # 返回模拟响应用于演示
                return self._get_mock_response(prompt)

            # 构建请求数据
            data = {
                "model": self.model_name,
                "messages": [
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                "temperature": self.temperature,
                "max_tokens": self.max_tokens
            }

            if stop:
                data["stop"] = stop

            # 发送请求
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }

            response = requests.post(
                self.endpoint,
                headers=headers,
                json=data,
                timeout=30
            )

            if response.status_code == 200:
                result = response.json()
                content = result.get("choices", [{}])[0].get("message", {}).get("content", "")
                logger.info(f"Grok response received: {len(content)} chars")
                return content
            else:
                logger.error(f"Grok API error: {response.status_code} - {response.text}")
                return self._get_fallback_response(prompt)

        except Exception as e:
            logger.error(f"Grok API call failed: {e}")
            return self._get_fallback_response(prompt)

    def _get_mock_response(self, prompt: str) -> str:
        """
        获取模拟响应（用于演示）

        Args:
            prompt: 输入提示

        Returns:
            模拟响应
        """
        # 简单的关键词匹配来生成相关响应
        prompt_lower = prompt.lower()

        if "订单" in prompt_lower or "order" in prompt_lower:
            return json.dumps({
                "thought": "用户询问订单相关信息，需要查询订单状态",
                "action": "query_order",
                "observation": "调用订单查询API",
                "answer": "我来帮您查询订单状态。请告诉我您的订单号。"
            })

        elif "退款" in prompt_lower or "refund" in prompt_lower:
            return json.dumps({
                "thought": "用户需要办理退款，需要先确认订单状态",
                "action": "query_order",
                "observation": "检查订单是否符合退款条件",
                "answer": "我来帮您办理退款。首先需要确认您的订单状态，请提供订单号。"
            })

        elif "发票" in prompt_lower or "receipt" in prompt_lower:
            return json.dumps({
                "thought": "用户提到发票，可能需要OCR识别",
                "action": "ocr_image",
                "observation": "调用OCR服务识别发票信息",
                "answer": "我可以帮您识别发票信息。请上传发票图片，我会自动提取相关信息。"
            })

        else:
            return json.dumps({
                "thought": "处理用户的通用客服请求",
                "action": "",
                "observation": "无需调用外部工具",
                "answer": "您好！我是智能客服助理。请告诉我您需要什么帮助？我可以帮您查询订单、办理退款、识别发票等。"
            })

    def _get_fallback_response(self, prompt: str) -> str:
        """
        获取后备响应（当API调用失败时）

        Args:
            prompt: 输入提示

        Returns:
            后备响应
        """
        return json.dumps({
            "thought": "API调用失败，使用后备响应",
            "action": "",
            "observation": "服务暂时不可用",
            "answer": "抱歉，服务暂时出现问题。请稍后重试或直接联系我们的客服热线。"
        })

    @property
    def _llm_type(self) -> str:
        """返回模型类型"""
        return "grok"

    def _generate(
        self,
        prompts: list,
        stop: Optional[list] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs
    ) -> LLMResult:
        """生成多个响应"""
        generations = []
        for prompt in prompts:
            response = self._call(prompt, stop, run_manager, **kwargs)
            generations.append([{"text": response}])

        return LLMResult(generations=generations)


def create_grok_chat(model_name: str = "grok-code-fast-1", temperature: float = 0.0) -> GrokChat:
    """
    创建 Grok 聊天模型实例

    Args:
        model_name: 模型名称
        temperature: 温度参数

    Returns:
        GrokChat 实例
    """
    return GrokChat(
        model_name=model_name,
        temperature=temperature
    )
