from __future__ import annotations

import json
import time
from typing import Any, Dict, Generator, List, Optional

from backend.common.config import config_manager, get_current_model_name
from backend.llm import llm_manager
from backend.logger_setup import get_logger
from backend.rag.retriever import rag_manager
from backend.settings import MAX_NEW_TOKENS, TEMPERATURE, TOP_P, USE_STREAMING
from backend.tools.tool_caller import tool_caller

from .domain import ChatSummary, Message, Role
from .exceptions import ChatError, InvalidRequestError, LLMProviderError, ToolCallError
from .repositories import MessageRepository, SessionRepository

logger = get_logger("chat")


class ChatService:
    """Use case layer coordinating chat flow, persistence, and model calls."""

    def __init__(
        self,
        message_repo: Optional[MessageRepository] = None,
        session_repo: Optional[SessionRepository] = None,
    ):
        self.llm_provider = None
        self.active_provider: Optional[str] = None
        self.message_repo = message_repo or MessageRepository()
        self.session_repo = session_repo or SessionRepository()
        self._memory_sessions: Dict[str, List[Message]] = {}

    def process_message(
        self,
        session_id: str,
        message: str,
        use_rag: bool = True,
        use_tools: bool = True,
        stream: Optional[bool] = None,
    ) -> Any:
        """Entry point for chat completion; returns a generator when streaming."""
        self._load_model()
        use_streaming = USE_STREAMING if stream is None else stream

        logger.info(
            f"处理消息: session_id={session_id}, message={message[:50]}..., "
            f"use_rag={use_rag}, use_streaming={use_streaming}"
        )

        prompt = self._build_prompt(session_id, message, use_rag, use_tools)

        if use_streaming:
            return self._generate_streaming_response(prompt, session_id, message)

        response = self._generate_response(prompt)
        self._cache_session_history(session_id, message, response)
        return response

    def handle_tool_call(self, session_id: str, tool_call_payload: Any) -> Dict[str, Any]:
        """Process tool call request from client/LLM."""
        payload = self._parse_tool_call(tool_call_payload)
        tool_call = payload.get("tool_call")
        if not tool_call:
            raise InvalidRequestError("无效的工具调用格式", details={"payload": payload})

        tool_name = tool_call.get("name")
        parameters = tool_call.get("parameters", {})

        if not tool_name:
            raise InvalidRequestError("工具名称不能为空")

        logger.info(f"执行工具调用: session_id={session_id}, tool_name={tool_name}")
        try:
            result = tool_caller.execute_tool(tool_name, parameters)
            self._save_tool_call_history(session_id, tool_name, parameters, result)
            return result
        except Exception as exc:
            logger.error(f"工具调用处理失败: {exc}")
            raise ToolCallError(str(exc)) from exc

    def get_chat_summary(self, session_id: str) -> Dict[str, Any]:
        """Return summary of a chat session."""
        try:
            history = self._get_session_history(session_id, limit=100)
            summary = ChatSummary(
                session_id=session_id,
                message_count=len(history),
                last_message_time=history[-1].created_at if history else None,
                has_history=bool(history),
            )
            return summary.to_dict()
        except Exception as exc:  # pragma: no cover - defensive
            logger.error(f"获取聊天摘要失败: {exc}")
            raise ChatError("获取聊天摘要失败", details=str(exc)) from exc

    # Internal helpers -----------------------------------------------------

    def _load_model(self) -> None:
        """Lazy load the configured LLM provider."""
        if self.llm_provider is not None:
            return

        try:
            self.active_provider = config_manager.llm_providers.active_provider
            logger.info(f"开始加载LLM提供商 {self.active_provider}")

            provider_config = getattr(config_manager.llm_providers, self.active_provider)
            self.llm_provider = llm_manager.get_provider(
                self.active_provider, provider_config.model_dump()
            )

            if hasattr(self.llm_provider, "embedding_model"):
                rag_manager.set_embedding_model(self.llm_provider.embedding_model)

            logger.info(f"LLM提供商加载完毕 {self.active_provider}")
        except Exception as exc:  # pragma: no cover - fallback
            logger.error(f"LLM提供商加载失败: {exc}")
            logger.warning("回退到模拟响应模型")
            self.llm_provider = self._create_mock_model()

    def _create_mock_model(self):
        """Fallback mock model when provider fails to load."""

        class MockModel:
            def generate(self, text, **kwargs):
                return f"这是对 '{text}' 的响应。由于使用的是模拟模型，此响应是预定义的。"

            def generate_stream(self, text, **kwargs):
                response = f"这是对 '{text}' 的流式响应。由于使用的是模拟模型，此响应是预定义的。"
                for char in response:
                    time.sleep(0.05)
                    yield char

        return MockModel()

    def _build_prompt(self, session_id: str, message: str, use_rag: bool, use_tools: bool) -> str:
        """Compose the full prompt string for the underlying LLM."""
        session_history = self._get_session_history(session_id)
        model_name = (get_current_model_name() or "").strip()
        model_name_display = model_name or "unknown"

        system_prompt = f"""
你是一个本地部署的通用 AI 助手。
当前底层模型名称：{model_name_display}。
当用户问“你现在用的是什么模型”“你的模型是什么”“你是哪个模型”等类似问题时：

1. 请直接回答：“我现在使用的模型是 {model_name_display}。”然后可以用一两句话简要介绍该模型的特点，但不要凭空编造不确定的细节。
2. 如果你发现 {model_name_display} 明显是空或无效值，就回答：“我是由后端配置的大模型提供支持，具体模型名称由系统决定。”

除此之外，你像普通聊天助手一样正常回答用户的其他问题。禁止谎称自己是固定的某个模型，要以系统提供的 {model_name_display} 为准。
""".strip()

        analysis_instruction = """
你需要根据用户的问题和提供的信息，给出准确、有用的回答。
# 数据分析意图格式
当你需要进行数据分析时，请以JSON格式输出，格式为：
{
  "tool": "data.fetch_analyze",
  "params": {
    "query": "分析查询内容",
    "chart_type": "图表类型（可选：line, bar, pie, scatter等）",
    "export_excel": true/false（是否需要导出excel）
  }
}

当你需要导出最近一次分析的数据为excel时，请以JSON格式输出，格式为：
{
  "tool": "data.export_last_excel"
}
""".strip()

        messages: List[Dict[str, str]] = [
            {"role": Role.SYSTEM.value, "content": system_prompt},
            {"role": Role.SYSTEM.value, "content": analysis_instruction},
        ]

        if use_rag:
            try:
                context = rag_manager.get_context_for_query(message)
                if context and "没有找到相关文档" not in context:
                    messages.append(
                        {
                            "role": Role.SYSTEM.value,
                            "content": f"以下是与用户问题相关的文档信息，供参考：\n{context}",
                        }
                    )
            except Exception as exc:  # pragma: no cover - retriever errors are non-fatal
                logger.error(f"RAG检索失败: {exc}")

        if use_tools:
            available_tools = tool_caller.get_available_tools()
            if available_tools:
                tools_info = json.dumps(available_tools, ensure_ascii=False)
                tool_instruction = (
                    f"你可以使用以下工具来帮助回答问题（如果需要）：{tools_info}\n"
                    "如果你决定使用工具，请以JSON格式输出工具调用信息，格式为: "
                    '{"tool_call": {"name": "工具名称", "parameters": {工具参数}}}。'
                )
                messages.append({"role": Role.SYSTEM.value, "content": tool_instruction})

        for item in session_history:
            role_value = item.role.value if isinstance(item.role, Role) else str(item.role)
            if role_value not in {Role.USER.value, Role.ASSISTANT.value, Role.SYSTEM.value}:
                role_value = Role.USER.value
            messages.append({"role": role_value, "content": item.content})

        messages.append({"role": Role.USER.value, "content": message})

        role_label = {
            Role.SYSTEM.value: "系统",
            Role.USER.value: "用户",
            Role.ASSISTANT.value: "助手",
        }
        prompt_sections = [f"{role_label.get(msg['role'], '系统')}: {msg['content']}" for msg in messages]
        prompt_sections.append("助手:")
        return "\n\n".join(prompt_sections)

    def _generate_response(self, prompt: str) -> str:
        """Generate full response text from LLM."""
        try:
            start_time = time.time()
            if hasattr(self.llm_provider, "generate"):
                response = self.llm_provider.generate(
                    prompt,
                    max_new_tokens=MAX_NEW_TOKENS,
                    temperature=TEMPERATURE,
                    top_p=TOP_P,
                )
            else:
                response = f"这是对 '{prompt[:100]}...' 的响应。当前正在使用模拟模型。"

            generation_time = time.time() - start_time
            logger.info(f"响应生成完成，耗时: {generation_time:.2f}s")
            return response
        except Exception as exc:
            logger.error(f"响应生成失败: {exc}")
            raise LLMProviderError(f"生成响应时出现错误: {exc}") from exc

    def _generate_streaming_response(
        self, prompt: str, session_id: str, user_message: str
    ) -> Generator[str, None, None]:
        """Generate streaming response chunks."""
        start_time = time.time()
        full_response = ""

        try:
            if hasattr(self.llm_provider, "generate_stream"):
                for chunk in self.llm_provider.generate_stream(
                    prompt,
                    max_new_tokens=MAX_NEW_TOKENS,
                    temperature=TEMPERATURE,
                    top_p=TOP_P,
                ):
                    full_response += chunk
                    yield chunk
            else:
                mock_response = f"这是对 '{prompt[:100]}...' 的流式响应。当前正在使用模拟模型。"
                for char in mock_response:
                    time.sleep(0.05)
                    full_response += char
                    yield char

            generation_time = time.time() - start_time
            logger.info(f"流式响应生成完成，耗时: {generation_time:.2f}s")
        except Exception as exc:
            logger.error(f"流式响应生成失败: {exc}")
            raise LLMProviderError(f"生成响应时出现错误: {exc}") from exc
        finally:
            self._cache_session_history(session_id, user_message, full_response)

    def _parse_tool_call(self, payload: Any) -> Dict[str, Any]:
        """Parse tool call payload from JSON string or dict."""
        if isinstance(payload, dict):
            return payload
        if isinstance(payload, str):
            try:
                return json.loads(payload)
            except json.JSONDecodeError as exc:
                raise InvalidRequestError("工具调用JSON格式无效", details=str(exc)) from exc
        raise InvalidRequestError("工具调用格式必须是字符串或字典")

    def _get_session_history(self, session_id: str, limit: int = 5) -> List[Message]:
        """Load session history with DB fallback."""
        try:
            history = self.message_repo.get_history(session_id, limit)
            if history:
                return history
        except ChatError:
            # Already logged in repository
            pass

        if session_id in self._memory_sessions:
            return self._memory_sessions[session_id][-limit:]
        return []

    def _cache_session_history(
        self,
        session_id: str,
        user_message: str,
        assistant_message: str,
        timestamp: Optional[int] = None,
    ) -> None:
        """Cache minimal history in memory as a fallback when DB is unavailable."""
        ts = timestamp if timestamp is not None else int(time.time())
        messages = self._memory_sessions.setdefault(session_id, [])
        messages.append(Message(role=Role.USER, content=user_message, created_at=ts))
        messages.append(Message(role=Role.ASSISTANT, content=assistant_message, created_at=ts))

    def _save_tool_call_history(self, session_id: str, tool_name: str, parameters: Dict, result: Dict):
        """Persist or log tool call history (placeholder for future repo)."""
        try:
            logger.info(f"工具调用历史已记录 session_id={session_id}, tool_name={tool_name}")
        except Exception as exc:  # pragma: no cover - logging errors should not break flow
            logger.error(f"保存工具调用历史失败: {exc}")
