## 16行 瑕疵需要改  也没有封装方法


import time
from urllib import parse
import requests
from bs4 import BeautifulSoup
import re
import os

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'}


def get522Screenshot(i):
    response = requests.get(f'https://www.522gg.com/screenshot/page_1_{i}.html', headers=headers)
    response.encoding = 'utf-8'  # 处理乱码
    # print(response.text)
    main_page = BeautifulSoup(response.text, "html.parser")  # 指定HTML解析器
    divs = main_page.find("div", attrs={"class": "supplier_photo_main"}).find_all("a")
    # print(divs)
    for div in divs:
        href = div.get('href')  # 子页面连接
        directory_name = div.find("span").get_text()
        img_directory_name = re.sub(r'[\\/:*?"<>|\r\n]+', "", directory_name) # 在Python中过滤Windows文件名中的非法字符方法
        folder = os.path.exists(img_directory_name)
        if not folder:
            os.makedirs(img_directory_name)
        print(href)
        print(img_directory_name)
        child_page_resp = requests.get(href)
        child_page_resp.encoding = "utf-8"
        child_page = BeautifulSoup(child_page_resp.text, "html.parser")  # 指定HTML解析器
        imgsli = child_page.find("div", attrs={"class": "bd"}).find_all("li")
        for li in imgsli:
            img = li.find("img").get("src")
            img_resp = requests.get(img)
            # img_resp.content # 这里拿到的是字节
            img_name = img.split("/")[-1]  # 拿到 url中最后一个/以后的内容
            with open(img_directory_name + "/" + img_name, mode="wb") as f:
                f.write(img_resp.content)
            print(img_name + " is over")
            time.sleep(1)  # 下载完图片睡眠1S  我怕被封ip
    print("all over")


get522Screenshot(2)

# url_ = 'https://www.vmgirls.com/12945.html'
# response = requests.get(url=url_, headers=headers)
# html = response.text
# dir_name = re.findall(h1 class=post-title h3(.*?)/h1, html)【-1】
# if not os.path.exists(dir_name):
#     os.mkdir(dir_name)
#     print(dir_name)
# urls = re.findall(a href=(.*?) alt=.*? title=.*?, html)
# for url in urls:
#     time.sleep(1)
#     url = parse.urljoin(http://www.vmgirls.com/, url)
#     print(url)
#     file_name = url.split(/)【-1】
#     response = requests.get(url=url, headers=headers)
#     with open(dir_name + / + file_name, mode=wb) as f:
#         f.write(response.content)
#         print(正在保存, file_name)
