import re
import requests
import bs4
import os

url = 'https://www.jd.com/?cu=true&utm_source=www.baidu.com&utm_medium=tuiguang&utm_campaign=t_1003608409_&utm_term=cef958b26f87497d89204a6fb88d1103'
HEADER = {  # 下载图片时不加这个报403
    "USER-AGENT": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
                  "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"
}
try:
    htmlfile = requests.get(url)
except Exception as err:
    print("网页读取失败 %s" % err)

objSoup = bs4.BeautifulSoup(htmlfile.text, 'lxml')
img_list = objSoup.select('img')

img_destdir = 'imglists'
if os.path.exists(img_destdir) == False:
    os.mkdir(img_destdir)

if len(img_list) > 0:
    for i in img_list:
        imgUrl = i.get('src')                               # 获取图片路径
        print('%s 图片下载中 ...' % imgUrl)
        picture = requests.get(imgUrl, headers=HEADER)                      # 下载图片
        picture.raise_for_status()                          # 验证图片是否下载成功
        print('%s 图片下载成功' % imgUrl)

        # 先开启文件，再存储图片
        pictFile = open(os.path.join(img_destdir, os.path.basename(imgUrl)), 'wb')
        for diskStorage in picture.iter_content(10240):
            pictFile.write(diskStorage)
        pictFile.close()                                     # 关闭文件


print(type(objSoup), objSoup.title, img_list)