import uuid
from langchain_community.llms import Ollama
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_core.callbacks import BaseCallbackHandler
from operator import itemgetter
from typing import Dict, Any, Optional
import re


class DebugCallbackHandler(BaseCallbackHandler):
    """修复回调处理器，处理serialized可能为None的情况"""

    def __init__(self, log_file="chain_log.txt"):
        self.log_file = log_file

    def on_chain_start(
            self,
            serialized: Dict[str, Any],
            inputs: Dict[str, Any],
            **kwargs: Any
    ) -> None:
        # 安全获取链名称：处理serialized可能为None的情况
        chain_name = "RunnableSequence"
        if serialized and isinstance(serialized, dict):
            chain_name = serialized.get('name', chain_name)

        with open(self.log_file, 'a',encoding="utf-8-sig") as f:
            f.write(f"启动任务链: {chain_name}\n输入: {inputs}\n")

    def on_chain_end(
            self,
            outputs: Dict[str, Any],
            **kwargs: Any
    ) -> None:
        with open(self.log_file, 'a',encoding="utf-8-sig") as f:
            f.write(f"输出: {outputs}\n结束任务链\n")

    def on_chain_error(
            self,
            error: Exception,
            **kwargs: Any
    ) -> None:
        with open(self.log_file, 'a',encoding="utf-8-sig") as f:
            f.write(f"任务链发生错误：{str(error)}\n")


class TaskChainModule:
    def __init__(self, callback_handler: Optional[DebugCallbackHandler] = None):
        self.llm = Ollama(
            model="qwen3:4b",
            temperature=0.3,
            num_ctx=4096
        )
        self.debug_handler = callback_handler or DebugCallbackHandler()

    # def clean_output(text: str) -> str:
    #     # 去掉 <think>...</think> 段落
    #     text = re.sub(r"<think>.*?</think>", "", text, flags=re.S)
    #     # 只取第一行，避免额外解释
    #     return text.strip().split("\n")[0]

    def create_task_chain(self):
        # 使用更简洁的模板创建方式
        question_template = PromptTemplate.from_template(
            "请分析以下问题，并指出问题的主题和需要的详细信息。\n问题：{query}\n输出格式：⚠️要求：只输出一个简短的主题，不要输出任何解释、推理或其他内容。"
        )
        question_chain = question_template | self.llm | StrOutputParser()

        search_template = PromptTemplate.from_template(
            "根据主题'{theme}',从企业知识库中寻找相关信息，并提供简要概述"
        )
        search_chain = search_template | self.llm | StrOutputParser()

        answer_template = PromptTemplate.from_template(
            "将以下信息格式化为企业知问答的回答：\n问题:{query}\n主题:{theme}\n内容概述：{summary}"
        )
        answer_chain = answer_template | self.llm

        # 优化链式结构，确保变量正确传递
        task_chain = (
                {"theme": question_chain, "query": itemgetter("query")}
                | RunnablePassthrough.assign(summary=search_chain)
                | RunnablePassthrough.assign(final_answer=answer_chain)
        )
        return task_chain


    def execute_task_chain(self, query: str):
        task_chain = self.create_task_chain()
        try:
            # 关键修复：必须传入字典格式输入
            result = task_chain.invoke(
                {"query": query},
                config={"callbacks": [self.debug_handler]}
            )
            print("\n任务执行结果：", result)
            return result
        except Exception as e:
            print("任务链执行发生错误", e)
            return None


if __name__ == "__main__":
    task_chain = TaskChainModule()
    query = "无人机端需要小模型做实时处理用什么板子"
    final_answer = task_chain.execute_task_chain(query)
