from bs4 import BeautifulSoup
import requests
import os
import shutil
import re

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8",
    "Connection": "close",
    "Cookie": "_gauges_unique_hour=1; _gauges_unique_day=1; _gauges_unique_month=1; _gauges_unique_year=1; _gauges_unique=1",
    "Referer": "http://www.infoq.com",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER"
}

url = 'http://www.cnu.cc/discoveryPage/hot-人像'


def download_img(img_url, img_localpath):
    response = requests.get(img_url, stream=True)
    if 200 == response.status_code:
        with open(img_localpath, 'wb') as f:
            response.raw.decode_content = True
            shutil.copyfileobj(response.raw, f)


# http://img.cnu.cc/uploads/images/flow/1901/27/40aed5a738603c72bcd70c6bfd40b228.jpg?width=3358&height=1889
def craw_img(url):
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, 'lxml')

    # for pic_href in soup.find_all('div', class_='grid-item work-thumbnail'):
    for pic_href in soup.find_all('img'):
        img_url = pic_href.get('src')
        dir = os.path.abspath('.')
        # 当前路径加上/image
        dir = dir + '/image'
        filename = os.path.basename(img_url)
        # 当前文件名去掉?width=3358&height=1889
        filename = re.sub(r'\?.*$', '', filename)
        #print(filename)
        img_localpath = os.path.join(dir, filename)
        print('downloading..... %s ' % img_url)
        download_img(img_url, img_localpath)


craw_img(url)
