import asyncio
import json
import time
import uuid
from typing import AsyncGenerator

from langchain_core.language_models import BaseChatModel, LanguageModelInput
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate

from app.config.logging import logger
from app.core import document_manage
from app.llm.ollama import chat_model
from app.models.KnowledgeQueryCondition import KnowledgeQueryCondition
from app.models.chat_event import ChatEvent
from app.models.chat_payload import ChatPayload
from app.prompt.question_answer_prompt import QuestionAnswerPrompt, prompt_template_by_mcp_result
import chainlit as cl

from app.utils.mcp_fun_utils import find_mcp_name_for_tool, add_mcp_tool_to_model


class ChatEngine:

    def __init__(self, payload: ChatPayload):
        self.payload = payload

    async def astream_events(self) -> AsyncGenerator[ChatEvent, None]:
        """
        处理完整对话流程：
        1. 格式化Prompt
        2. 记录上下文
        3. 流式生成响应
        :param payload: chat payload
        :return: AsyncGenerator[Any]
        """
        payload = self.payload
        try:

            # 0. 给大模型注册MCP工具
            model_with_tools = add_mcp_tool_to_model(chat_model)

            # event 响应，现在写入 request/response message id
            yield ChatEvent(event_type="ready", data={"request_message_id": payload.session_id, "status": "ok"})

            # 核心处理
            # 1.进行向量化搜索
            rag_result = document_manage.search(condition=KnowledgeQueryCondition(query=payload.prompt))
            context = [item.text for item in rag_result.data]
            logger.info("context: {}".format(context))

            # 2.格式化prompt
            question_answer_prompt = QuestionAnswerPrompt(query=payload.prompt, context=context)
            formatted_prompt = ChatPromptTemplate.from_messages([
                SystemMessage(content=question_answer_prompt.prompt),
                HumanMessage(content=payload.prompt)
            ]).format_messages()

            logger.info(f"prompt: {formatted_prompt}")

            # 3.大模型进行处理
            # LLM判断是否需要外部工具，如果需要则会返回工具信息，如果不需要则会返回响应结果
            full_response = []
            mcp_function = []
            tool_results = {}
            # model_with_tools 绑定工具之后，astream会非常的慢
            async for chunk in model_with_tools.astream(formatted_prompt):
                # 3.1 不需要调用MCP工具
                if chunk.content:
                    full_response.append(chunk.content)
                    yield ChatEvent(event_type="token", data={"token": chunk.content})
                # 3.2 检测工具调用请求
                if hasattr(chunk, 'tool_calls') and chunk.tool_calls:
                    mcp_function.extend(chunk.tool_calls)
                    yield ChatEvent(event_type="tool_call", data={"tools": chunk.tool_calls})

            # 5. 调用MCP工具获取结果，给大模型重新回答
            if mcp_function:
                logger.info(f"mcp_function: {mcp_function}")
                for tool_use in mcp_function:
                    tool_name = tool_use.get("name")
                    tool_args = tool_use.get("args")
                    mcp_name = find_mcp_name_for_tool(tool_name)
                    if not mcp_name:
                        raise ValueError(f"未配置工具: {tool_name}")
                    mcp_session, _ = cl.context.session.mcp_sessions.get(mcp_name)
                    if not mcp_session:
                        raise ConnectionError(f"连接不可用: {mcp_name}")
                    result = await mcp_session.call_tool(tool_name, tool_args)
                    tool_results[tool_name] = result.content[0].text
                    yield ChatEvent(event_type="tool_result", data={"tool": tool_name, "status": "completed"})

            # 6. 生成最终回复内容
            if tool_results:
                # 处理工具调用结果，将起
                context = [str(tool_results)]
                question_answer_prompt = QuestionAnswerPrompt(query=payload.prompt, context=context,
                                                              prompt_template=prompt_template_by_mcp_result)
                # 重新生成prompt进行处理
                formatted_prompt = ChatPromptTemplate.from_messages([
                    SystemMessage(content=question_answer_prompt.prompt),
                    HumanMessage(content=payload.prompt)
                ]).format_messages()

                logger.info(f"mcp tool prompt: {formatted_prompt}")
                async for chunk in chat_model.astream(formatted_prompt):
                    if chunk.content:
                        full_response.append(chunk.content)
                        yield ChatEvent(event_type="token", data={"token": chunk.content})

            yield ChatEvent(
                event_type="complete",
                data={"full_response": full_response, "session_id": payload.session_id}
            )
        except Exception as e:
            logger.error(e)
            yield ChatEvent(
                event_type="error",
                data={"error_msg": str(e), "session_id": payload.session_id}
            )
