import os
import json
import requests
import re
from bs4 import BeautifulSoup  #做网页内容清洗，将一些html中的固定返回内容清洗掉，将有效内容拼装
import nest_asyncio
from langchain_community.utilities import GoogleSerperAPIWrapper
SERPER_API_KEY=os.getenv("SERPER_API_KEY")
# 搜索工具
def search(keywords:list):
    payload = json.dumps({
        "q": ' '.join(keywords),
    })
    headers = {
        # 访问serper.dev注册账号获取API Key
        'X-API-KEY': os.environ["SERPER_API_KEY"],
        'Content-Type': 'application/json'
    }
    response = requests.request("POST", "https://google.serper.dev/search", headers=headers, data=payload)
    return response.text
    
# 浏览工具
def browse(url: str):
    content = ""
    try:
        request_options = {
            "headers": { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36" }
        }
        page = requests.get(
            url,
            **request_options
        )
        soup = BeautifulSoup(page.content, "html.parser")
        # find text in p, list, pre (github code), td
        chunks = soup.find_all(["h1", "h2", "h3", "h4", "h5", "p", "pre", "td"])
        for chunk in chunks:
            if chunk.name.startswith("h"):# 获取标题
                content += "#" * int(chunk.name[-1]) + " " + chunk.get_text() + "\n"
            else:# 获取文本
                text = chunk.get_text()
                if text and text != "":
                    content += text + "\n"
        # find text in div that class=content
        divs = soup.find("div", class_="content")
        if divs:
            chunks_with_text = divs.find_all(text=True)
            for chunk in chunks_with_text:
                if isinstance(chunk, str) and chunk.strip():
                    content += chunk.strip() + "\n"
        content = re.sub(r"\n+", "\n", content)
        return content
    except Exception as e:
        return f"Can not browse '{ url }'.\tError: { str(e) }"

# 工具说明信息
tools_info = [
    {
        "name": "search",
        "desc": "使用网络搜索工具，搜索{keywords}指定关键词相关信息",
        "kwargs": {
            "keywords": [("str", "一个关键词")],
        },
    },
    {
        "name": "browse",
        "desc": "使用浏览工具，浏览{url}指定的页面内容",
        "kwargs": {
            "url": ("str", "可访问的URL地址")
        },
    },
]

# 存放真正的工具函数
tools_mapping = {
    "search": search,
    "browse": browse,
}
search_test = GoogleSerperAPIWrapper(max_results=2,serper_api_key=SERPER_API_KEY)
if __name__ == "__main__" :
    print(search("beijing"))
    print(search_test.run("介绍下北京好玩的景点"))
    # OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
    # client_with_tools = client.bind_tools(search_test)
    # print(client_with_tools.invoke("中国首都在哪里"))