import os
import random
import time
from urllib.request import urlretrieve

import requests
from bs4 import BeautifulSoup

"""
采集图片的url
    requests.get(url, headers) 获得html
    BeautifulSoup(html, "lxml") 对象格式化html进行查找
    urls对图片提示名称alt和图片路径href的收集
"""


def collect_url():
    urls = []
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36 HBPC/11.0.4.301"
    }
    for num in range(2, 8):
        url = "http://www.shuaia.net/index_%d.html" % num
        req = requests.get(url, headers)
        req.encoding = "utf-8"
        html = req.text
        bf = BeautifulSoup(html, "lxml")
        target_url = bf.find_all(class_="item-img")
        for each in target_url:
            urls.append(each.img.get("alt") + "=" + each.get("href"))
    print("收集完成,收集图片数", len(urls))
    return urls


"""
下载图片
    思路：找到大图路径，再进行下载
    1、取出收集的图片url和图片的名称
    2、requests.get(target_url, headers)访问图片路径获得url
    3、通过bf.find_all("div", class_="wr-single-content-list")，bf2.div.img.get("src")进行两次查找，找到图片的具体位置
    
"""


def download_img(list_url):
    for each_img in list_url:
        img_info = each_img.split("=")
        filename = img_info[0] + ".jpg"
        print('下载：' + filename)
        target_url = img_info[1]
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36 HBPC/11.0.4.301"
        }
        img_req = requests.get(target_url, headers)
        img_req.encoding = "utf-8"
        html = img_req.text
        bf = BeautifulSoup(html, "lxml")
        img_div = bf.find_all("div", class_="wr-single-content-list")
        bf2 = BeautifulSoup(str(img_div), "lxml")
        img_url = bf2.div.img.get("src")
        path = "d:/LOG/images/"
        if not os.path.exists(path):
            os.makedirs(path)

        try:
            urlretrieve(url=img_url, filename=path + filename)
        except Exception as e:
            print("有异常，异常如下\n %s:" % e)
        # 单位秒，随机休息下
        time.sleep(random.randint(1, 3) * 0.15)


if __name__ == "__main__":
    list_url = collect_url()
    download_img(list_url)
