import os
import requests
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain.schema import Document
from langchain.tools import tool
from langchain_community.chat_models import ChatOpenAI
from langchain_community.retrievers import BM25Retriever

class BochaWebSearchTool:
    def __init__(self, llm=None):
        """
        初始化博查网络搜索工具
        :param llm: 语言模型实例
        """
        self.llm = llm or self._default_llm()
        self.api_key = os.environ.get('BOCHA_KEY')
        self.tool = self._create_tool()

    def _default_llm(self):
        return ChatOpenAI(temperature=0)

    def _bocha_web_search_tool(self, query: str, count: int = 5, llm=None) -> str:
        """
        使用 bocha web search api 进行联网搜索，返回搜索结果的字符串。
        参数:
        - query: 搜索关键词
        - count: 返回的搜索结果数量
        返回:
        - 搜索结果的字符串形式
        """
        url = 'https://api.bochaai.com/v1/web-search'
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        data = {
            "query": query,
            "freshness": "nolimit",  # 搜索的时间范围，例如"oneday","oneweek","onemonth","oneyear","nolimit"
            "summary": True,  # 是否返回长文本摘要总结
            "count": count
        }

        response = requests.post(url, headers=headers, json=data)
        if response.status_code == 200:
            data = response.json()
            web_pages = data['data']['webPages']['value']

            # 转换为Document格式
            docs = [Document(
                page_content=page['summary'],
                metadata={"title": page['name'], "url": page['url']}
            ) for page in web_pages]

            # 创建合法检索器实例
            base_retriever = BM25Retriever.from_documents(docs)
            base_retriever.k = min(len(docs), 3)  # 限制返回数量

            # 创建上下文压缩检索器
            compressor = LLMChainExtractor.from_llm(self.llm)
            compression_retriever = ContextualCompressionRetriever(
                base_compressor=compressor,
                base_retriever=base_retriever  # 使用合法检索器
            )

            # 执行压缩检索
            compressed_docs = compression_retriever.invoke(query)

            print("~~~调用了搜索引擎~~~")
            # 格式化输出
            return "\n\n▲ 压缩后摘要 ▲\n".join(
                [f"来源：{doc.metadata['title']}\n{doc.page_content}"
                 for doc in compressed_docs if doc.page_content.strip()]
            ) or "未找到相关摘要"
        else:
            raise Exception(f"API 请求失败，状态码: {response.status_code}, 错误信息: {response.text}")

    def _create_tool(self):
        """创建符合参数验证的工具"""
        @tool
        def bocha_search_tool(query: str) -> str:
            """
            使用 bocha web search api 进行联网搜索，返回搜索结果的字符串。
            :param
                query: 搜索关键词
            :return:
                搜索结果的字符串形式
            """
            return self._bocha_web_search_tool(query)
        return bocha_search_tool


if __name__ == "__main__":
    import sys
    import os

    user_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'customize')
    # 将 user 目录添加到 sys.path 中
    sys.path.append(user_dir)
    from customize.get_ollama import GetOllama

    r1 = GetOllama(model_type=0, model_name="qwen2.5:3b", temperature=0)()

    # 使用示例
    search_tool = BochaWebSearchTool(llm=r1)
    print(search_tool.tool.args)
    print(search_tool.tool.run({"query":"巴黎奥运，中国代表团总结。"}))
