from time import sleep

import requests
from bs4 import BeautifulSoup
from selenium.webdriver.chrome.options import Options

from selenium import webdriver
class SearchApi:
    def bing_search(query,size=10,isSelenium = False,UserAgent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'):
        """
        bing search api
    input: 搜索关键词,size词条数量(可选),是否使用selenium(可选),UserAgent(可选)
    output: 返回为结构体对象数组[{"href": href,"title": title,"abstract": abstract}]
    """
        pageSize = 1
        results = []
        while True:
            try:
                url = f"https://cn.bing.com/search?q={query}&pq={query}&first={pageSize}&FORM=QBRE&PC=U531"
                print(url)
                if not isSelenium:
                    headers = {
                        'User-Agent': UserAgent,
                        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
                        'Accept-Encoding': 'gzip, deflate',
                    }
                    response = requests.get(url, headers=headers)
                    response.raise_for_status()
                    html = response.text
                else:
                    options = Options()
                    # 启用无界面模式
                    options.add_argument('--headless')
                    driver = webdriver.Chrome(options=options)
                    driver.get(url)
                    sleep(2)
                    # 创建BeautifulSoup对象
                    html = driver.page_source
                soup = BeautifulSoup(html, 'html.parser')
                # 因为非自动化有点问题,暂时先这样处理
                if not isSelenium:
                    size = -1

                for result in soup.select("#b_results .b_algo")[:size]:
                    link_element = result.select_one("a")
                    href = link_element.get("href")
                    title = link_element.get_text()

                    abstract_element = result.select_one(".b_caption p")
                    abstract = abstract_element.get_text() if abstract_element else ""

                    identical = False
                    for item in results:
                        if title == item["title"]:
                            identical = True

                    if not identical:
                        results.append({
                            "href": href,
                            "title": title,
                            "abstract": abstract
                        })
                print(len(results))
                #当搜到足够多的词条就退出
                if  len(results) >= size:
                    return results
                else:
                    pageSize += 10

            except requests.exceptions.RequestException as e:
                print(f"An error occurred: {e}")
                return None
    def google_search(query,size=10,UserAgent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'):
        """
        google search api
    input: 搜索关键词,size词条数量,UserAgent(可选)
    output: 返回为结构体对象数组[{"href": href,"title": title,"abstract": abstract}]
    注意:需要魔法
    """
        try:
            url = f"https://www.google.com.hk/search?q={query}&oq={query}&uule=w+CAIQICIaQXVzdGluLFRleGFzLFVuaXRlZCBTdGF0ZXM&hl=en&gl=us&sourceid=chrome&ie=UTF-8%22#ip=1"
            headers = {
                "User-Agent": UserAgent
            }
            response = requests.get(url, headers=headers)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'html.parser')
            results = []
            for result in soup.select("#search div.g"):
                link_element = result.find("a")
                href = link_element.get("href")
                title = link_element.find("h3").get_text()

                abstract_elements = result.select("div.IsZvec span")
                abstract = ''.join(element.get_text() for element in abstract_elements)

                results.append({
                    "href": href,
                    "title": title,
                    "abstract": abstract
                })
                #词条长度
                if len(results) == size:
                    break

            return results

        except requests.exceptions.RequestException as e:
            print(f"An error occurred: {e}")
            return None
    def yahoo_search(query,size=10,UserAgent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'):
        """
        yahoo search api
    input: 搜索关键词,size词条数量,UserAgent(可选)
    output: 返回为结构体对象数组[{"href": href,"title": title,"abstract": abstract}]
    注意:需要魔法
    """
        try:
            url = f"https://search.yahoo.com/search?p={query}&ei=UTF-8&fr=fp-tts"
            headers = {
                "User-Agent": UserAgent
            }
            response = requests.get(url, headers=headers)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'html.parser')
            results = []
            for result in soup.select(".searchCenterMiddle")[0].children:
                if result.name == "li":
                    link_element = result.find("a")
                    href = link_element.get("href")
                    title = link_element.get("aria-label")

                    comp_text_element = result.find(class_="compText")
                    abstract = comp_text_element.get_text() if comp_text_element else ""

                    results.append({
                        "href": href,
                        "title": title,
                        "abstract": abstract
                    })
                    if len(results) == size:
                        break

            return results

        except requests.exceptions.RequestException as e:
            print(f"An error occurred: {e}")
            return None

    def duckduckgo_search(query,size=10,UserAgent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'):
        """
        duckduckgo search api
    input: 搜索关键词,size词条数量,UserAgent(可选)
    output: 返回为结构体对象数组[{"href": href,"title": title,"abstract": abstract}]
    注意:需要魔法
    """
        try:
            url = f"https://duckduckgo.com/?q={query}&kl=hk-tzh&ia=web"
            headers = {
                "User-Agent": UserAgent
            }
            response = requests.get(url, headers=headers)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'html.parser')
            results = []
            for result in soup.select("#react-layout ol li")[:size]:
                link_element = result.select_one("div:nth-child(2) a")
                href = link_element.get("href")
                title = link_element.get_text()

                abstract_element = result.select_one("div:nth-child(3) > div")
                abstract = abstract_element.get_text() if abstract_element else ""

                results.append({
                    "href": href,
                    "title": title,
                    "abstract": abstract
                })

            return results

        except requests.exceptions.RequestException as e:
            print(f"An error occurred: {e}")
            return None