import requests
from bs4 import BeautifulSoup
from da import Ua






def parseBaidu(searchName, head=None):
    ua = Ua.randomUA()
    head = {
        "User-Agent": ua,
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                  "application/signed-exchange;v=b3;q=0.9",
    }
    url = "https://www.baidu.com/s?wd=site:www.biquwx.la {} 目录&usm=3&rsv_idx=2&rsv_page=1".format(searchName)
    # url = "http://localhost:63342/aotlook/searchbaiduindex.html"
    html = requests.get(url, headers=head)
    bs = BeautifulSoup(html.content, "html.parser")
    content = bs.select("div#content_left > div.result.c-container.new-pmd")
    item = []
    for c in content:
        titleTag = c.select_one("h3.t a").text
        print(titleTag)
        baidu_url = c.select_one("div.f13.c-gap-top-xsmall.se_st_footer.user-avatar > a.c-showurl.c-color-gray").text
        item.append({"baidu_title": titleTag, "baidu_url": "https://" + baidu_url})
    return item


def parse360(searchName, head=None):
    ua = randomUA()
    head = {
        "User-Agent": ua,
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                  "application/signed-exchange;v=b3;q=0.9",
    }
    url = "https://www.so.com/s?q=site:www.biquwx.la {} 目录".format(searchName)

    html = requests.get(url, headers=head)
    bs = BeautifulSoup(html.content, "html.parser")
    content = bs.select("div#content_left > div.result.c-container.new-pmd")
    item = []
    for c in content:
        titleTag = c.select_one("h3.t a").text
        print(titleTag)
        baidu_url = c.select_one("div.f13.c-gap-top-xsmall.se_st_footer.user-avatar > a.c-showurl.c-color-gray").text
        item.append({"baidu_title": titleTag, "baidu_url": "https://" + baidu_url})
    return item


if __name__ == "__main__":
    # searchName = "修界最强赘婿"
    # parseBaidu(searchName)
    print(randomUA())
