from langchain.retrievers import ContextualCompressionRetriever, MultiQueryRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain.tools import tool
import sys
import os
user_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'storage')
# 将 user 目录添加到 sys.path 中
sys.path.append(user_dir)
from chroma_manager import ChromaManager
from pydantic import BaseModel, Field

class DebateEvidenceTool:
    def __init__(self, collection_name: str = "knowledge_base", llm=None):
        """
        初始化辩论证据检索工具
        :param collection_name: 指定操作的集合名称
        :param llm: 语言模型实例
        """
        self.manager = ChromaManager(collection_name=collection_name)
        self.llm = llm or self._default_llm()
        self.retriever = self._create_retriever()
        self.tool = self._create_tool()

    def _default_llm(self):
        from langchain.chat_models import ChatOpenAI
        return ChatOpenAI(temperature=0)

    def _create_tool(self):
        """创建符合参数验证的工具"""

        @tool
        def retrieve_tool(claim: str) -> str:
            """用于检索辩论证据的工具，主要包括辩论所用的实例、数据等和事实辩论的论据。
            :param
                claim: 要检索的辩论论点
            :return:
                包含检索到的辩论证据的字符串
            """

            return self._retrieve_impl(claim)

        return retrieve_tool

    def _create_retriever(self):
        """创建检索器链"""
        base_retriever = self.manager.langchain_chroma.as_retriever(
            search_type="similarity_score_threshold",
            search_kwargs={'score_threshold': 0.5, 'k': 5}
        )

        multi_retriever = MultiQueryRetriever.from_llm(
            retriever=base_retriever,
            llm=self.llm,
            include_original=True,
        )

        compressor = LLMChainExtractor.from_llm(self.llm)
        return ContextualCompressionRetriever(
            base_compressor=compressor,
            base_retriever=multi_retriever
        )
        """创建精简版检索器链"""
        # # 直接使用MultiQueryRetriever作为基础检索器
        # multi_retriever = MultiQueryRetriever.from_llm(
        #     retriever=self.manager.langchain_chroma.as_retriever(
        #         search_type="similarity_score_threshold",
        #         search_kwargs={'score_threshold': 0.6, 'k': 2}
        #     ),
        #     llm=self.llm,
        #     include_original=True,
        #
        # )
        #
        # # 直接应用压缩器到MultiQueryRetriever
        # return ContextualCompressionRetriever(
        #     base_compressor=LLMChainExtractor.from_llm(self.llm),
        #     base_retriever=multi_retriever
        # )


    def _retrieve_impl(self, claim: str) -> str:
        """实际检索逻辑"""
        docs = self.retriever.invoke(claim)
        print("~~~调用了检索器~~~")
        return "\n".join([f"▲ 证据 {i+1}:\n{doc.page_content}\n"
                        for i, doc in enumerate(docs)])


# 使用示例
# tool = DebateEvidenceTool(collection_name="climate_debate").retrieve
if __name__ == "__main__":
    import sys
    import os
    from langchain.agents import AgentExecutor, initialize_agent
    from langchain.agents.types import AgentType
    user_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'customize')
    # 将 user 目录添加到 sys.path 中
    sys.path.append(user_dir)
    from customize.get_ollama import GetOllama
    r1 = GetOllama(model_type=0, model_name="qwen2.5:3b", temperature=0)()
    tool_instance = DebateEvidenceTool(collection_name="knowledge_base", llm=r1)
    print(tool_instance.tool.args)
    # input_args = RetrieveArgs(claim="手机对中小学生的危害有哪些？")
    print(tool_instance.tool.run({"claim":"中学生使用手机危害"}))