from llama_index.core import get_response_synthesizer, Settings, QueryBundle
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from typing import Any, List, Optional, Sequence, Type

from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks import CallbackManager
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.response_synthesizers import BaseSynthesizer

from tool.search_engine import GoogleSearchTool


# 自定义网络查询引擎
class WebEngine(BaseQueryEngine):
    def _get_prompt_modules(self) -> PromptMixinType:
        pass

    async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
        pass

    def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
        pass

    def __init__(
        self,
        response_synthesizer: Optional[BaseSynthesizer] = None,
        node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
        callback_manager: Optional[CallbackManager] = None,
    ) -> None:
        self._response_synthesizer = response_synthesizer or get_response_synthesizer(
            llm=Settings.llm,
            callback_manager=callback_manager or Settings.callback_manager,
        )

        self._node_postprocessors = node_postprocessors or []
        callback_manager = (
            callback_manager or self._response_synthesizer.callback_manager
        )
        for node_postprocessor in self._node_postprocessors:
            node_postprocessor.callback_manager = callback_manager
        super().__init__(callback_manager=callback_manager)
    def query(self, query_str: str) -> str:
        """处理网络搜索查询"""
        results = GoogleSearchTool.fetch_web_results(query_str)
        return "\n\n".join(results[:3])  # 返回前3条结果