# -*- coding: utf-8 -*-
# 创建时间：2021/7/19 10:31
from bs4 import BeautifulSoup
import requests
import time, math, random, json
import pymysql
import re
import os
from lxml import etree
from hhy import FileUtil,DateUtil,ToolUtil,HttpUtil

__author__ = 'LuckyHhy'
headers = {
    'User-Agent': HttpUtil.AgentRandom()
}
# 获取下载图片
def get_data(url,page):
    response = requests.get(url, headers=headers)
    text = response.text
    html = etree.HTML(text)
    imgs = html.xpath("//div[@class='random_picture']//a//img")
    save_path = 'D:\\pic\\{}\\'.format(str(page))
    FileUtil.MakeDir(save_path)
    print('正在下载第{}页图片数据....'.format(page))
    for img in imgs:
        # 过滤动图
        if img.get('class') == 'gif':
            continue

        # 获取图片url
        img_url = img.xpath(".//@data-backup")[0]
        if img_url.split('.')[-1] == 'gif':
            continue

        # 获取图片后缀
        suffix = os.path.splitext(img_url)[1]

        # 获取图片名称
        alt = img.xpath(".//@alt")[0]
        r = str(random.random())
        if alt:
            rz = re.compile("\\||/.*?")
            rule = re.search(rz, alt)
            if rule:
                #aa=alt.replace("/", '')
                img_name = r[3:13] + suffix
            else:
                img_name = alt + suffix
        else:
            img_name = r[2:10] + suffix

        with open(save_path + img_name, 'wb') as f:
            f.write(requests.get(img_url, timeout=30, headers=headers).content)
            f.close()
            print(img_name + ' 下载完成！')

    print('第{}页图片数据下载完成'.format(page))
    time.sleep(1)


def main():
    # 创建保存文件夹
    print("总计：1000页")
    page = input("请输入要爬取的页数： ")
    if int(page) > 1000:
        print("总计：1000页")
        return

    for i in range(1, int(page) + 1):
        url = "https://www.doutula.com/photo/list/?page={}".format(str(i))
        get_data(url, i)

    print("爬取完成....")


if __name__ == '__main__':
    main()
