import requests
import re
import os
# pip install fake_useragent
from fake_useragent import UserAgent

#确定网址
url='https://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=%E6%B0%B4%E6%9E%9C'
form_header = {
    "User-Agent": UserAgent().chrome,
    "Host":"image.baidu.com",
    "Accept-Language":"zh-CN,zh;q=0.9",
    "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
}
'''
百度图片下载爬虫
'''
def main():
    res=requests.get(url, headers=form_header).text
    # 正则表达式筛选数据
    image_urls = re.findall('"objURL":"(.*?)"', res, re.S)
    for image_url in image_urls:
        #图片名称
        image_name = get_image_name(image_url)
        # 图片文件后缀正则匹配
        image_end = re.search('(.jpg|.png|.jpeg|.gif)$', image_name)
        # 创建文件夹
        download_path = './images/case_04/'
        download_path_first = './images/'
        # 判断该文件夹是否存在
        if os.path.exists(download_path_first) is False:
            # 不存在则创建
            os.mkdir(download_path_first)
            os.mkdir(download_path)
        elif os.path.exists(download_path) is False:
            os.mkdir(download_path)
        # 处理未直接指向图片文件的url
        if image_end is None:
            if image_url.find('.jpg') > 0:
                image_url = image_url[0:image_url.find('.jpg') + 4]
            elif image_url.find('.png') > 0:
                image_url = image_url[0:image_url.find('.png') + 4]
            elif image_url.find('.jpeg') > 0:
                image_url = image_url[0:image_url.find('.jpeg') + 5]
            elif image_url.find('.gif') > 0:
                image_url = image_url[0:image_url.find('.gif') + 4]
            else:
                image_url = None
        if image_url is not None:
            # 由于url经过了处理，所以这里image_name要根据新的url重新获取
            image_name = get_image_name(image_url)
            # 下载
            img_download(image_url, image_name, download_path)
            if os.path.exists(download_path + image_name) is False:
                img_download(image_url, image_name, download_path)

'''根据url获取文件名'''
def get_image_name(image_url):
    image_name = image_url.split('/')[-1]
    if image_name.find('%') > 0:
        image_name = image_name.split('%')[-1]
        image_name = image_name.split('?')[0]
    if len(image_name.split('.')) > 1:
        image_name = image_name.split('.')[-2]+'.' + image_name.split('.')[-1]
    return image_name

'''下载图片'''
def img_download(image_url, image_name, download_path):
    try:
        image_res = requests.get(image_url)
        if image_res.ok is True and image_res.status_code == 200:
            image_content = image_res.content
            with open(download_path + image_name.split("&")[0], 'wb') as file:
                file.write(image_content)
                file.close()
    except Exception:
        pass

if __name__ == '__main__':
    print("开始爬取百度果蔬图片")
    main()
    print("爬取结束")
