import json
import asyncio
from typing import Optional, Dict, Any, List, Union
from enum import Enum
import httpx
from openai import AsyncOpenAI
import os
from config import get_settings
from app.utils.logger_util import user_logger


class LLMProvider(Enum):
    """支持的大模型提供商"""
    # OPENAI = "openai"
    QWEN = "qwen"
    DEEPSEEK = "deepseek"
    CLAUDE = "claude"
    GLMZ = "glm"


class LLMClient:
    """统一的大模型客户端"""

    def __init__(
            self,
            provider: Union[str, LLMProvider] = None,
            api_key: Optional[str] = None,
            base_url: Optional[str] = None,
            model: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None,
            **kwargs
    ):
        """初始化LLM客户端

        Args:
            provider: 模型提供商
            api_key: API密钥
            base_url: API基础URL
            model: 模型名称
            max_tokens: 最大token数
            temperature: 温度参数
            **kwargs: 其他参数
        """
        self.settings = get_settings()
        
        # 处理provider参数，如果没有指定则从配置文件读取
        if provider is None:
            provider = self.settings.llm_provider
        
        if isinstance(provider, str):
            self.provider = LLMProvider(provider.lower())
        else:
            self.provider = provider

        # 设置基本参数
        self.api_key = api_key or self._get_default_api_key()
        self.base_url = base_url or self._get_default_base_url()
        # 优先使用配置文件中的模型名，其次采用各Provider默认
        self.model = model or (self.settings.model_name if getattr(self.settings, "model_name", None) else self._get_default_model())
        self.max_tokens = max_tokens or self.settings.max_tokens
        self.temperature = temperature or self.settings.temperature
        # 日志上下文（用于用户级日志分片）
        self.thread_id: Optional[str] = kwargs.get("thread_id")
        
        # 其他配置
        self.timeout = kwargs.get('timeout', 60)
        self.max_retries = kwargs.get('max_retries', 3)
        
        # 初始化客户端
        self._init_client()

    def _get_default_api_key(self) -> Optional[str]:
        """获取默认API密钥"""
        key_mapping = {
            # LLMProvider.OPENAI: self.settings.openai_api_key or os.getenv("OPENAI_API_KEY"),
            LLMProvider.QWEN: self.settings.qwen_api_key or os.getenv("QWEN_API_KEY"),
            LLMProvider.DEEPSEEK: self.settings.deepseek_api_key or os.getenv("DEEPSEEK_API_KEY"),
            LLMProvider.CLAUDE: self.settings.claude_api_key or os.getenv("CLAUDE_API_KEY"),
            LLMProvider.GLMZ: self.settings.glm_api_key or os.getenv("GLM_API_KEY"),
        }
        return key_mapping.get(self.provider)

    def _get_default_base_url(self) -> Optional[str]:
        """获取默认基础URL"""
        url_mapping = {
            # LLMProvider.OPENAI: self.settings.openai_base_url or "https://api.openai.com/v1",
            LLMProvider.QWEN: "https://dashscope.aliyuncs.com/compatible-mode/v1",
            LLMProvider.DEEPSEEK: "https://api.deepseek.com/v1",
            LLMProvider.CLAUDE: "https://api.anthropic.com/v1",
            LLMProvider.GLMZ: "https://open.bigmodel.cn/api/paas/v4",
        }
        return url_mapping.get(self.provider)

    def _get_default_model(self) -> str:
        """获取默认模型名称"""
        model_mapping = {
            # LLMProvider.OPENAI: self.settings.model_name or "gpt-3.5-turbo",
            LLMProvider.QWEN: "qwen-plus",
            LLMProvider.DEEPSEEK: "deepseek-chat",
            LLMProvider.CLAUDE: "claude-3-sonnet-20240229",
            LLMProvider.GLMZ: "glm-4",
        }
        return model_mapping.get(self.provider, "gpt-3.5-turbo")

    def _init_client(self):
        """初始化具体的客户端"""
        if self.provider in [LLMProvider.QWEN, LLMProvider.DEEPSEEK, LLMProvider.GLMZ]:
            self.client = AsyncOpenAI(
                api_key=self.api_key,
                base_url=self.base_url,
                timeout=self.timeout,
                max_retries=self.max_retries
            )
        else:
            # 对于其他提供商，使用通用HTTP客户端
            self.client = httpx.AsyncClient(
                timeout=self.timeout,
                headers={"Authorization": f"Bearer {self.api_key}"}
            )

    async def chat_completion(
            self,
            messages: List[Dict[str, str]],
            model: Optional[str] = None,
            max_tokens: Optional[int] = None,
            temperature: Optional[float] = None,
            stream: bool = False,
            **kwargs
    ) -> Union[Dict[str, Any], str]:
        """聊天补全接口

        Args:
            messages: 消息列表，格式：[{"role": "user", "content": "你好"}]
            model: 模型名称，不指定则使用默认模型
            max_tokens: 最大token数
            temperature: 温度参数
            stream: 是否流式输出
            **kwargs: 其他参数

        Returns:
            完整响应字典或仅内容字符串
        """
        model = model or self.model
        max_tokens = max_tokens or self.max_tokens
        temperature = temperature or self.temperature

        try:
            if self.provider == LLMProvider.CLAUDE:
                return await self._claude_chat_completion(messages, model, max_tokens, temperature, **kwargs)
            else:
                return await self._openai_compatible_chat_completion(
                    messages, model, max_tokens, temperature, stream, **kwargs
                )
        except Exception as e:
            raise Exception(f"调用{self.provider.value}模型失败: {str(e)}")

    async def _openai_compatible_chat_completion(
            self,
            messages: List[Dict[str, str]],
            model: str,
            max_tokens: int,
            temperature: float,
            stream: bool = False,
            **kwargs
    ) -> Union[Dict[str, Any], str]:
        """OpenAI兼容的聊天补全"""
        response = await self.client.chat.completions.create(
            model=model,
            messages=messages,
            max_tokens=max_tokens,
            temperature=temperature,
            stream=stream,
            **kwargs
        )
        # 非流式时尝试提取并打印/记录“思考”内容（reasoning）
        if not stream:
            try:
                self._log_reasoning_from_response(response)
            except Exception:
                # 日志失败不影响主流程
                pass

        if stream:
            return response
        
        # 返回完整响应或仅内容
        if kwargs.get('return_full_response', False):
            return response.model_dump()
        else:
            return response.choices[0].message.content

    async def _claude_chat_completion(
            self,
            messages: List[Dict[str, str]],
            model: str,
            max_tokens: int,
            temperature: float,
            **kwargs
    ) -> str:
        """Claude API聊天补全"""
        # Claude API的请求格式可能不同，这里提供基础实现
        # 实际使用时需要根据Claude API文档调整
        data = {
            "model": model,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "messages": messages
        }
        
        response = await self.client.post(
            f"{self.base_url}/messages",
            json=data,
            headers={"Content-Type": "application/json"}
        )
        response.raise_for_status()
        result = response.json()
        
        return result.get("content", [{}])[0].get("text", "")

    async def simple_chat(self, prompt: str, system_prompt: Optional[str] = None, thread_id: Optional[str] = None) -> str:
        """简单聊天接口

        Args:
            prompt: 用户提示
            system_prompt: 系统提示

        Returns:
            模型回复内容
        """
        messages = []
        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})
        messages.append({"role": "user", "content": prompt})
        # 透传thread_id给实例，用于日志
        if thread_id is not None:
            self.thread_id = thread_id
        return await self.chat_completion(messages)

    async def batch_chat(
            self,
            prompts: List[str],
            system_prompt: Optional[str] = None,
            concurrent_limit: int = 5
    ) -> List[str]:
        """批量聊天

        Args:
            prompts: 提示列表
            system_prompt: 系统提示
            concurrent_limit: 并发限制

        Returns:
            回复列表
        """
        semaphore = asyncio.Semaphore(concurrent_limit)
        
        async def process_single(prompt: str) -> str:
            async with semaphore:
                return await self.simple_chat(prompt, system_prompt)
        
        tasks = [process_single(prompt) for prompt in prompts]
        return await asyncio.gather(*tasks)

    def get_provider_info(self) -> Dict[str, Any]:
        """获取提供商信息"""
        return {
            "provider": self.provider.value,
            "model": self.model,
            "base_url": self.base_url,
            "max_tokens": self.max_tokens,
            "temperature": self.temperature
        }

    async def close(self):
        """关闭客户端连接"""
        if hasattr(self.client, 'close'):
            await self.client.close()

    async def __aenter__(self):
        """异步上下文管理器入口"""
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        await self.close()

    # —— 内部工具：提取与记录“思考”内容 ——
    def _log_reasoning_from_response(self, response_obj: Any) -> None:
        """
        从OpenAI兼容响应中提取 reasoning 内容（若存在），以 <think> 标签打印并写入用户日志。
        目前兼容：
        - 深度思考模型（如 deepseek-reasoner / deepseek-r1）的 message.reasoning_content 字段
        - 新式 content 分片中 type 为 reasoning/reasoning_content 的场景
        """
        try:
            data = response_obj.model_dump() if hasattr(response_obj, "model_dump") else response_obj
        except Exception:
            data = None

        if not isinstance(data, dict):
            return

        reasoning_text: Optional[str] = None

        try:
            choices = data.get("choices") or []
            if choices:
                msg = choices[0].get("message") or {}
                # deepseek 风格
                if isinstance(msg, dict) and msg.get("reasoning_content"):
                    reasoning_text = msg.get("reasoning_content")
                # content 可能是多段结构
                if reasoning_text is None:
                    msg_content = msg.get("content")
                    if isinstance(msg_content, list):
                        parts: List[str] = []
                        for part in msg_content:
                            if isinstance(part, dict) and part.get("type") in ("reasoning", "reasoning_content"):
                                txt = part.get("text") or part.get("content") or ""
                                if txt:
                                    parts.append(str(txt))
                        if parts:
                            reasoning_text = "\n".join(parts)
        except Exception:
            pass

        # 仅在开启记录、且允许记录供应商原生 reasoning 时打印/写入
        if (
            reasoning_text
            and getattr(self.settings, "log_think", True)
            and getattr(self.settings, "log_provider_reasoning", False)
        ):
            # 截断
            limit = getattr(self.settings, "think_log_char_limit", 0) or 0
            if limit > 0 and len(reasoning_text) > limit:
                reasoning_text = reasoning_text[:limit] + "..."
            think_wrapped = f"<think>{reasoning_text}</think>"
            # 控制台打印
            print(think_wrapped)
            # 用户日志写入（若设置了 thread_id）
            if self.thread_id:
                try:
                    user_logger.info(self.thread_id, think_wrapped)
                except Exception:
                    pass

    # —— 公用“思考摘要”生成 ——
    def _build_think_prompts(
        self,
        purpose: str,
        context: Dict[str, Any],
        style: str = "concise",
    ) -> Dict[str, str]:
        """
        构造用于生成“思考摘要”的提示词，约束输出为一段完整的中文话语，
        直接可落入日志的 desc 字段（不含任何 JSON/列表/标签/引号）。
        """
        # 风格控制仅影响长度提示，不改变“一段话”这一硬性约束
        length_tip = "尽量简短，大概200字。" if style == "concise" else "控制在200字以内。"

        sys = (
            "你是一个简洁的分析助手。请输出‘思考摘要’，用于解释当前步骤的结果或决策。\n"
            "要求：\n"
            "- 只输出一段完整的中文话语；\n"
            "- 不要输出任何 JSON、列表、项目符号、代码块、XML/HTML 标签（如<think>）或引号；\n"
            "- 直接给出当前步骤的结论与关键理由，避免展示推理过程细节；\n"
            f"- {length_tip}"
        )
        usr = (
            "基于以下上下文，生成上述格式的‘思考摘要’：\n" + json.dumps(context, ensure_ascii=False)
            + f"\n\n当前步骤：{purpose}"
        )
        return {"system": sys, "user": usr}

    async def generate_think(
        self,
        purpose: str,
        context: Dict[str, Any],
        *,
        model: Optional[str] = None,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        style: str = "concise",
        char_limit: Optional[int] = None,
    ) -> Dict[str, Any]:
        """
        主动向模型请求‘思考摘要’，用于在不支持 reasoning_content 的模型中也能记录思考。
        变更后规范：desc 只需要一段中文话语（不含花括号/JSON 键名）。
        - 提示词要求模型直接输出一句话；
        - 为兼容旧行为，若模型仍返回 JSON，将尽力提取 summary/decision 等为一句话；
        - 返回 {"parsed": 任意解析结构, "display": 纯文本一句话}，供上层写入 desc。
        """
        if not getattr(self.settings, "log_think", True):
            return {}

        prompts = self._build_think_prompts(purpose, context, style)
        model = model or getattr(self.settings, "think_model_name", None) or self.model
        max_tokens = max_tokens or getattr(self.settings, "think_max_tokens", 300)
        temperature = temperature if temperature is not None else getattr(self.settings, "think_temperature", 0.2)

        messages = [
            {"role": "system", "content": prompts["system"]},
            {"role": "user", "content": prompts["user"]},
        ]

        try:
            resp = await self.chat_completion(
                messages=messages,
                model=model,
                max_tokens=max_tokens,
                temperature=temperature,
                stream=False,
            )
            text = resp if isinstance(resp, str) else str(resp)
        except Exception as e:
            return {"error": str(e), "display": f"[think-error] {purpose}: {str(e)}"}

        # 提取纯文本一句话（兼容可能的 JSON 返回）
        s = (text or "").strip()

        # 去除可能的代码块围栏
        if s.startswith("```") and s.endswith("```"):
            s = s.strip("`")  # 去三反引号
            # 去可能的语言标记行（如 ```json\n{...} ）
            nl = s.find("\n")
            if nl != -1:
                s = s[nl + 1 :].strip()

        parsed: Dict[str, Any] = {}
        plain: Optional[str] = None

        # 若是 JSON，择优提取 summary/decision/reason
        if s.startswith("{") and s.endswith("}"):
            try:
                obj = json.loads(s)
                if isinstance(obj, dict):
                    parsed = obj
                    for key in [
                        "summary",
                        "decision",
                        "reason",
                        "rationale",
                        "conclusion",
                        "desc",
                        "message",
                    ]:
                        val = obj.get(key)
                        if isinstance(val, str) and val.strip():
                            plain = val.strip()
                            break
                    # 退而求其次：由 key_points 拼一句话
                    if plain is None:
                        kp = obj.get("key_points")
                        if isinstance(kp, list) and kp:
                            join_txt = "；".join([str(x).strip() for x in kp if str(x).strip()][:3])
                            if join_txt:
                                plain = join_txt
            except Exception:
                parsed = {"raw": s}

        # 若没有从 JSON 中提取到可用文本，或原本就非 JSON，则直接使用原文
        if not plain:
            plain = s

        # 清洗：去 <think> 标签、首尾引号、多余空白与换行
        if isinstance(plain, str):
            # 去可能的 <think> 标签包裹
            plain = plain.replace("<think>", "").replace("</think>", "").strip()
            # 合并多行到一行
            plain = " ".join([seg.strip() for seg in plain.splitlines() if seg.strip()])
            # 去外围成对引号
            if (plain.startswith("\"") and plain.endswith("\"")) or (
                plain.startswith("'") and plain.endswith("'")
            ):
                plain = plain[1:-1].strip()

        # 截断
        limit = char_limit if char_limit is not None else getattr(self.settings, "think_log_char_limit", 0)
        display = plain or purpose
        if limit and len(display) > limit:
            display = display[:limit] + "..."

        # 返回供上层节点写入 desc 的纯文本
        return {"parsed": parsed if parsed else {"raw": text}, "display": display}

    def generate_think_sync(
        self,
        purpose: str,
        context: Dict[str, Any],
        *,
        model: Optional[str] = None,
        max_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        style: str = "concise",
        char_limit: Optional[int] = None,
    ) -> Dict[str, Any]:
        """同步包装，便于在同步节点里直接调用。"""
        return asyncio.run(
            self.generate_think(
                purpose,
                context,
                model=model,
                max_tokens=max_tokens,
                temperature=temperature,
                style=style,
                char_limit=char_limit,
            )
        )
