"""
基于bing + crawl4ai + trafilatura 的网络检索工具
"""
from crawl4ai import AsyncWebCrawler
from lxml import html
from lxml.etree import HTMLParser as LHTMLParser
import trafilatura
import asyncio
from loguru import logger
import requests
from fake_useragent import UserAgent

from typing import Type, Optional, ClassVar
from pydantic import BaseModel, Field
from langchain_core.tools import BaseTool
from langchain_core.callbacks import (
    AsyncCallbackManagerForToolRun,
    CallbackManagerForToolRun,
)

from tools.tool_result import ToolResult

class SearchInput(BaseModel):
    query: str = Field(description="用于网络信息检索的查询语句")
    max_results: int = Field(default=5, description="要返回的搜索结果数量")

class TextResult(BaseModel):
    title: str = ""
    href: str = ""
    body: str = ""

class BingSearchTool(BaseTool):
    name: str = 'web_search'
    description: str = "网络信息检索，可依据用户查询精准筛选和排序结果，帮用户快速获取所需信息。"
    args_schema: Type[BaseModel] = SearchInput.model_json_schema()
    return_direct: bool = True
    parser: ClassVar[LHTMLParser] = LHTMLParser(
        remove_blank_text=True, remove_comments=True, remove_pis=True, collect_ids=False
    )

    def _run(self, query: str, max_results: int = 5,
        run_manager: Optional[CallbackManagerForToolRun] = None
    ) -> ToolResult:
        raise NotImplementedError("不支持同步调用. ")

    async def _arun(self, query: str, max_results: int = 5,
        run_manager: Optional[AsyncCallbackManagerForToolRun] = None
    ) -> ToolResult:
        return await self._search_with_bing(query, max_results)

    async def _search_with_bing(
            self, query: str, max_results: int = 5,
    ) -> ToolResult:
        try:
            query = query.replace('"', '')

            # # 第一种方式
            # search_url = f'https://cn.bing.com/search?q={query}&pq={query}&setlang=zh-CN&qs=n&sp=-1&lq=0&sc=7-10&sk='
            # async with AsyncWebCrawler(verbose=False) as client:
            #     resp = await client.arun(search_url)
            # results = self.extract_results(resp.html)

            # 第二种方式
            params = {
                'q': query, 'pq': query, 'setlang': 'zh-CN', 'qs': 'n',
                'sp': '-1', 'lq': '0', 'sc': '7-10', 'sk': ''
            }
            headers ={
                'User-Agent': UserAgent().random,
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache',
                'Sec-Fetch-Dest': 'document', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'none',
                'Sec-Fetch-User': '?1', 'Upgrade-Insecure-Requests': '1',
                'Cookie': 'SRCHHPGUSR=SRCHLANG=zh-Hans; _EDGE_S=ui=zh-cn; _EDGE_V=1'
            }
            req = requests.get('https://cn.bing.com/search', headers=headers, params=params)
            results = self.extract_results(req.text)

            # 工具调用crawl4ai进行异步网页爬取时报NotImplementedError:
            # 错误原因： Playwright在启动浏览器进程时，会通过asyncio.create_subprocess_exec创建子进程。
            #          当在Windows平台上运行且没有正确配置事件循环策略时，就会触发这个未实现错误。
            # 解决办法：https://blog.csdn.net/m0_70647377/article/details/147540801
            async with AsyncWebCrawler(verbose=False) as client:
                tasks = [self.crawl4ai_extract(client, item) for item in results]
                fix_results = [item for item in await asyncio.gather(*tasks) if item is not None]
                fix_results = fix_results[:max_results]

            return ToolResult(
                success=True,
                data=fix_results,
                metadata={"provider": "bing"}
            )
        except Exception as e:
            logger.error(e)
            return ToolResult(
                success=False,
                data=None,
                error=f"Bing 检索失败: {str(e)}"
            )

    def extract_results(self, html_text):
        tree = html.fromstring(html_text, parser=self.parser)
        items_xpath = "//li[contains(@class, 'b_algo')]"
        elements_xpath = {
            "title": ".//h2/a//text()",
            "href": ".//h2/a/@href",
            "body": ".//p//text()",
        }
        items = tree.xpath(items_xpath)
        results = []
        for item in items:
            result = TextResult()
            for key, value in elements_xpath.items():
                data = " ".join(x.strip() for x in item.xpath(value))
                result.__setattr__(key, data)
            results.append(result)
        return results

    async def crawl4ai_extract(self, client: AsyncWebCrawler, item: TextResult):
        link = item.href
        if link is None or link == '':
            return item.model_dump(mode='json')

        resp = await client.arun(link)
        if resp.success:
            item.body = trafilatura.extract(resp.html)
            return item.model_dump(mode='json')


if __name__ == '__main__':
    tool_case = BingSearchTool()
    print(asyncio.run(tool_case.ainvoke({
        'query': '四川大学的校长是谁？',
        'max_results': 10,
    })))