import requests
from bs4 import BeautifulSoup
import re


def parse(url):
    """
    提供onlyFans每个人的url
    :param url:
    :return:
    """

    pattern = re.compile(r'^http[s]?://.*?/')    # 匹配模式
    basePath = re.findall(pattern,url)[0]
    # 由于一般网站都是供用户访问 如果检测到User-Agent是黑客或者其他可能拒绝访问 故此处模拟浏览器
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
    }
    response = requests.get(url=url, headers=headers)
    # 以防乱码 此处将其编码设置为utf-8 因为有中文
    response.encoding = 'utf-8'
    # print(response.text)

    print("\n开始解析网页=》=》=》")
    print("页面地址：  "+url)
    # 通过html.parser解析器把我们的HTML解析成了一棵树
    bs = BeautifulSoup(response.text, "html.parser")
    print("页面标题：  "+bs.title.string)

    pageBox = bs.find_all(class_="wrap")[1]#页面主体
    pageTitleBox = pageBox.find_all(class_="title", recursive=1)[0]
    pageTitle = pageTitleBox.find("h3").string
    searchNum = pageTitleBox.find("em").string
    print("搜索标题：  "+pageTitle)
    print("搜索数量：  "+searchNum)


    sourceList = []
    page = pageBox.select("dl")
    for item in page:
        sourceTitle = item.find("h3").string
        sourceUrl = basePath + item.find("a")["href"]


        topic = ""
        if  "图片" in sourceTitle or re.findall("[0-9]+P", sourceTitle):
            topic = "picture"
        else:
            topic = "video"
        # print("   source标题：  "+item.find("h3").string)
        # print("   source url：  "+sourceUrl)
        # print("   source topic：  "+topic)
        source = {"sourceTitle":sourceTitle ,"sourceUrl":sourceUrl,"topic":topic}
        sourceList.append(source)
    resultJson = {"url":url, "pageTitle":pageTitle, "searchNum":searchNum,"sourceList":sourceList}
    print("resultJson"+resultJson.__str__())


    print("解析结束=》=》=》")
    return resultJson
def search(name):
    url = 'https://www.a678nh.com/onlyfans/'
    return parse(url + name)
# name = "hongkongdoll2"
# search(name)
