from langchain_core.messages import HumanMessage
from customize.get_ollama import GetOllama
from langgraph.prebuilt import create_react_agent
from backup.bochawebsearch import BochaWebSearchTool
from backup.debate_evidence_retriever import DebateEvidenceTool
from backup.bailianapi import chatLLM


class LLMIntegration:
    def __init__(self, url="127.0.0.1:11434", type="normal", title=""):
        # 添加类型注解
        self.type: str = type
        self.title: str = title

        # 优化模型初始化方式
        # self.base_llm = chatLLM
        self.base_llm = GetOllama(ip=url, model_type=1, model_name="deepseek-r1:14b")()
        self.tool_llm = GetOllama(ip=url, model_type=0, model_name="qwen2.5:3b")()

        # 添加错误处理
        if type not in ["normal", "react"]:
            raise ValueError(f"Invalid type: {type}. Must be 'normal' or 'react'")

        if type == "react":
            self.agent = self._create_react_agent(llm=self.base_llm, tool_llm=self.tool_llm, title=title)
        elif type == "normal":
            self.agent = self.base_llm

    def generate_response(self, prompt):
        """调用 LLM 生成响应"""
        if self.type == "normal":
            return self.remove_think_tags(self._normal_agent_response(prompt))
        elif self.type == "react":
            return self._react_agent_response(prompt)

    def remove_think_tags(self, text):
        start_index = text.find("<think>")
        end_index = text.find("</think>")

        if start_index != -1 and end_index != -1:
            end_index += len("</think>")
            return text[:start_index] + text[end_index:]
        return text

    def _create_react_agent(self, llm, tool_llm, title):
        # 统一使用传入的llm参数（原代码错误点）
        de_tool = DebateEvidenceTool(
            llm=tool_llm,  # 原错误：使用self.base_llm而不是参数llm
            collection_name=title if title else "knowledge_base"
        ).tool

        # 添加工具缓存机制
        if not hasattr(self, '_tools'):
            web_search = BochaWebSearchTool(llm=tool_llm).tool
            self._tools = [de_tool, web_search]

        # 提取系统提示为常量
        sys_prompt = (
            "你是辩论赛辩手，使用中文表述。"
            "需要实例或数据时优先调用[retrieve_tool]工具获取，也可调用[bocha_search_tool]工具从互联网获取。"
            "不得捏造事实，回答要明确。"
        )

        return create_react_agent(llm, self._tools, prompt=sys_prompt)

    def _normal_agent_response(self, prompt):
        return self.agent.invoke(prompt).content

    def _react_agent_response(self, prompt):
        try:
            inputs = {"messages": [("user", prompt)]}
            stream = self.agent.stream(inputs, stream_mode="values")
            # self.print_stream(stream)
            # 添加空结果处理

            results = list(stream)
            if not results:
                return "未获得有效响应"

            final_output = results[-1]
            message = final_output["messages"][-1]
            return message.content if hasattr(message, 'content') else message

        except Exception as e:
            # 添加错误日志
            print(f"Agent执行异常: {str(e)}")
            return "系统处理请求时发生错误"

    def print_stream(self, stream):
        for s in stream:
            message = s["messages"][-1]
            if isinstance(message, tuple):
                print(message)
            else:
                message.pretty_print()


if __name__ == "__main__":
    llm = LLMIntegration(type="normal", title="knowledge_base")
    print(llm.generate_response("中学生是否应该带手机进入校园?"))
