# @Author: 唐奇才
# @Time: 2021/5/24 22:45
# @File: spider.py
# @Software: PyCharm

import requests
from SpiderUtils.myUtils import myFakerHeaders
from bs4 import BeautifulSoup

baseULR = "https://wallhaven.cc/search?categories=111&purity=110&topRange=1y&sorting=toplist&order=desc&page={}"


def get_html(url):
    header = {'User-Agent': myFakerHeaders.getFakeHeaders()}
    p = myFakerHeaders.getFakeProxy()
    req = requests.get(url=url, headers=header, proxies=p)
    # 因为又是后是图片，就不要返回text了
    return req


def get_details(i):
    html = get_html(baseULR.format(i))
    html.encoding = "utf-8"
    bs = BeautifulSoup(html.text, "html.parser")
    lis = bs.select(".thumb-listing-page ul li")
    for li in lis:
        # print(li.img["data-src"])
        img_src = li.img["data-src"]
        chage_url(img_src, i)


def is_girls(href):
    html = get_html(href)
    bs = BeautifulSoup(html.text, parser="html.parser")
    lis = bs.select("#tags li")
    tags_str = ""
    for li in lis:
        tags_str += li.text

    if "girl" in tags_str:
        return True
    elif "women" in tags_str:
        return True
    else:
        return False


def get_img(href):
    a = 5
    while a > 0:
        try:
            html = get_html(href)
            print("get_img", html.status_code)
            if html.status_code == 200:
                bs = BeautifulSoup(html.text, parser="html.parser")
                rc = bs.select("#wallpaper")
                return rc[0]["src"]
        except:
            a -= 1
    return None


def chage_url(url1, i):
    """
    https://th.wallhaven.cc/small/x8/x88o53.jpg
    https://w.wallhaven.cc/full/x8/wallhaven-x88o53.jpg
    https://th.wallhaven.cc/full/x8/wallhaven-x88o53.jpg
    """
    # url1 = "https://th.wallhaven.cc/small/x8/x88o53.jpg"

    old_end = url1.split("/")[-1]
    new_end = "wallhaven-" + old_end
    clear = url1.split("/")[-2]

    # print("old:", url1)
    # print("new:", url1.replace("small", "full").replace(old_end, new_end).replace("th", "w"))
    # print("detail:", url1.replace("th.", "").replace("small", "w")[:-4].replace(clear + '/', "")

    new = url1.replace("small", "full").replace(old_end, new_end).replace("th", "w")
    detail = url1.replace("th.", "").replace("small", "w")[:-4].replace(clear + '/', "")
    print(detail, end=" | ")
    if is_girls(detail):
        a = 5
        while a > 0:
            print()
            print(a, "-----------------start")
            img_src = get_img(detail)
            if img_src == None:
                a -= 1
                continue
            html = get_html(img_src)
            print(html.status_code)
            print(detail)
            print(img_src)
            print(a, '-----------------end')
            a -= 1
            if html.status_code == 200:
                a = 0
                with open("./images/{}_{}".format(i, img_src.split("/")[-1]), "wb") as f:
                    f.write(html.content)
                    print(i, "下载成功")


def main():
    # chage_url("https://th.wallhaven.cc/small/x8/x88o53.jpg")
    for i in range(1, 3290):
        print("\n开始第{}页下载".format(i))
        get_details(i)


if __name__ == '__main__':
    main()
