from bs4 import BeautifulSoup
import requests
import uuid

headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,zh-TW;q=0.5',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'ua=771410d30f9309a16dc0d25e4999dec0; platform_cookie_reset=pc; platform=pc; bs=z9zluvh2c22z0w0w18s9u1rpz1rucr5k; ss=322596816450719248; RNLBSERVERID=ded6729; _ga=GA1.2.1719811878.1583068960; _gid=GA1.2.2048297946.1583068960; _gat=1; performance_timing=home',
'Host': 'cn.pornhub.com',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36 Edg/80.0.361.62'

}

url = 'https://shanghai.mmall.com/'
html = requests.get(url, timeout=10000).content
soup = BeautifulSoup(html, 'html.parser')
result = soup('img')

for r in result:
    if 'src' in r.attrs:
        imageUrl = r['src']
        print(imageUrl)
        try:
            image = requests.get(imageUrl,stream=True)
            with open('./temp/' + str(uuid.uuid4()) +'.png', 'wb') as fd:
                for chunk in image.iter_content():
                    fd.write(chunk)
        except ValueError:
            print("图片文件有误")