import re
import requests
from urllib import error
from bs4 import BeautifulSoup
import os

import time
num = 0
numPicture = 0
file = ''
sub_url_list = []


def get_img_url(url, A):
    global sub_url_list
    t = 0
    s = 0
    while t < 1500:
        Url = url + str(t)
        try:
            Result = A.get(Url, timeout=20, allow_redirects=False)
        except BaseException:
            t = t + 60
            continue
        else:
            result = Result.text
            pic_url = re.findall('"objURL":"(.*?)",', result, re.S)  # 先利用正则表达式找到图片url
            s += len(pic_url)
            if len(pic_url) == 0:
                break
            else:
                sub_url_list.append(pic_url)
                t = t + 60
    return s

def dowmloadPicture(html, keyword):
    global num
    # t =0
    pic_url = re.findall('"objURL":"(.*?)",', html, re.S)  # 先利用正则表达式找到图片url
    # print('找到关键词:' + keyword + '的图片，即将开始下载图片...')

    for each in pic_url:
        # if not each.endswith('.jpeg') or each.endswith('.jpg'):
        #     continue

        print('正在下载第' + str(num + 1) + '张图片，图片地址:' + str(each))
        time.sleep(0.01)
        try:
            if each is not None:
                pic = requests.get(each, timeout=7)
            else:
                continue
        except BaseException:
            print('错误，当前图片无法下载')
            continue
        else:
            if len(pic.content) < 1024 * 80:
                print("文件太小了，继续")
                continue
            string = file + r'\\' + keyword + '_' + str(num) + '.jpg'
            fp = open(string, 'wb')
            fp.write(pic.content)
            fp.close()

            num += 1
        if num >= numPicture:
            return

if __name__ == '__main__':  # 主函数入口

    headers = {
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
        'Upgrade-Insecure-Requests': '1'
        }

    sess = requests.Session()
    sess.headers = headers
    
    tm = 500 # int(input('请输入每类图片的下载数量 '))
    numPicture = tm
    # line_list = ['张天爱', '王祖贤', '刘亦菲'] 

    # line_list = ['短裤照片', '长裤照片', '牛仔短裤照片', '牛仔长裤照片','休闲短裤照片', '休闲长裤照片', '连衣裙照片',
    #         '旗袍照片','短裙照片', 'T恤照片', '短衬衫照片','长衬衫照片','百褶裙照片','西服照片', '西裤照片', 
    #         '毛衣照片','羽绒服照片','沙滩裤照片', '篮球服照片', '篮球裤照片', '毛衣照片', '紧身衣照片', 
    #         '卫衣照片', '皮衣照片','羽绒服照片','冲锋衣照片', '棉衣照片','牛仔裙照片','半身裙照片']
    line_list = ['纯色背景美女全身照','纯色帅哥全身照', '短裤照片', '长裤照片', '牛仔裤照片','休闲长裤照片', '连衣裙照片','T恤照片', '短衬衫照片','长衬衫照片','短裙照片']

    for word in line_list:
        url = 'https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + '&pn=?'
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
            "Referer": "https://www.baidu.com/"
        }
        img_totals = get_img_url(url, sess)

        print('经过检测%s类图片共有%d张' % (word, img_totals))
        file = word + '文件'
        y = os.path.exists(file)
        if y == 1:
            print('该文件已存在，请重新输入')
            file = word+'文件夹2'
            os.mkdir(file)
        else:
            os.mkdir(file)

        t = 0
        tmp = url
        while t < numPicture:
            try:
                url = tmp + str(t)
                result = sess.get(url, timeout=10, allow_redirects=False)
            except error.HTTPError as e:
                print('网络错误，请调整网络后重试')
                t = t + 60
            else:
                dowmloadPicture(result.text, word)
                t = t + 60
        numPicture = numPicture + tm
        