# @Author: 唐奇才
# @Time: 2021/5/25 8:18
# @File: index.py
# @Software: PyCharm

import requests
from SpiderUtils.myUtils import myFakerHeaders
from bs4 import BeautifulSoup

baseULR = "https://wallhaven.cc/search?categories=111&purity=110&topRange=1y&sorting=toplist&order=desc&page={}"


def get_html(url):
    header = {'User-Agent': myFakerHeaders.getFakeHeaders()}
    p = myFakerHeaders.getFakeProxy()
    req = requests.get(url=url, headers=header, proxies=p)
    # 因为又是后是图片，就不要返回text了
    return req

def get_img(href):
    a = 5
    while a > 0:
        try:
            html = get_html(href)
            print("get_img", html.status_code)
            if html.status_code == 200:
                bs = BeautifulSoup(html.text, parser="html.parser")
                rc = bs.select("#wallpaper")
                return rc[0]["src"]
        except:
            a -= 1
    return None

def get_details(i):
    a = 5
    while a > 0:
        html = get_html(baseULR.format(i))
        if html.status_code == 200:
            a = 0
            print(baseULR.format(i))
            html.encoding = "utf-8"
            bs = BeautifulSoup(html.text, "html.parser")
            lis = bs.select(".thumb-listing-page ul li")
            for li in lis:
                # print(li.img["data-src"])
                img_src = li.a["href"]
                # print(img_src)
                if is_girls(img_src):
                    a = 5
                    while a > 0:
                        print()
                        print(a, "-----------------start")
                        img = get_img(img_src)
                        if img == None:
                            a -= 1
                            continue
                        html = get_html(img)
                        print(html.status_code)
                        print(img_src)
                        print(img)
                        print(a, '-----------------end')
                        a -= 1
                        if html.status_code == 200:
                            a = 0
                            with open("./images/{}_{}".format(i, img.split("/")[-1]), "wb") as f:
                                f.write(html.content)
                                print(i, "下载成功")

        else:
            a -= 1


def is_girls(href):
    html = get_html(href)
    bs = BeautifulSoup(html.text, parser="html.parser")
    lis = bs.select("#tags")
    tags_str = str(lis)
    if "girl" in tags_str:
        return True
    elif "women" in tags_str:
        return True
    else:
        return False

def main():
    for i in range(445, 3290):
        print("\n开始第{}页下载".format(i))
        get_details(i)
    # print(is_girls("https://wallhaven.cc/w/y8552x"))

"""
如果三台一起运行的话这回导致远程强制他下线，一台电脑只能在同一时间用一个电脑访问，除非可以用虚拟机模拟几台电脑
"""

if __name__ == '__main__':
    main()