import os
import shutil

from bs4 import BeautifulSoup
import requests,re

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept - Encoding": "gzip, deflate",
    "Accept - Language": "zh-CN,zh;q=0.9",
    "Host": "httpbin.org",
    "Upgrade - Insecure - Requests": "1",
    "User - Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400"
}

url = "http://www.cnu.cc/discoveryPage/hot-人像"

def craw(url):
    response = requests.get(url) #, headers=headers
    soup = BeautifulSoup(response.text, 'lxml')
    for pic_href in soup.find_all('div', class_='grid'):
        for pic in pic_href.find_all('img'):
            imgurl = pic.get('src')
            dir = os.path.abspath("./downloadImg")
            filename = os.path.basename(imgurl.split("?")[0])
            imgpath = os.path.join(dir, filename)
            print("开始下载 %s", imgurl)
            download_pic(imgurl, imgpath)


def download_pic(image_url, image_localpath):
    response = requests.get(image_url, stream=True)
    if response.status_code == 200:
        with open(image_localpath, 'wb') as f:
            response.raw.deconde_content = True
            shutil.copyfileobj(response.raw, f)


craw(url)