import re
import os
import time
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup

os.makedirs('./img/', exist_ok=True)
url = ''
pages = 'page/'
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=chrome_options)
# driver = webdriver.Chrome()
driver.get(url)
time.sleep(2)

#将滚动条移动到页面的底部
driver.execute_script("var q=document.documentElement.scrollTop=100000")  
time.sleep(2)

soup = BeautifulSoup(driver.page_source, features = 'lxml')
divs = soup.find_all('div',{'class':'item masonry-brick'})

i = 1

try:
    for imgs in divs:
        img = imgs.find_all('img')
        img = img[0]
        picture = 'https:'+ img['src']
        pic = requests.get(picture, stream=True )
        img_name = img['title'] + '.jpg'
        with open('./img/%s' % img_name, 'wb') as f:
            for chunk in pic.iter_content(chunk_size=128):
                f.write(chunk)
        print(i,'Save %s successful.'% img_name)
        i += 1
except:
    pass

for page in range(2,10):
    print(page,'页')
    driver.find_element_by_link_text(u"»").click()
    time.sleep(3)
    soup = BeautifulSoup(driver.page_source, features = 'lxml')
    divs = soup.find_all('div',{'class':'item masonry-brick'})
    try:
        for imgs in divs:
            img = imgs.find_all('img')
            img = img[0]
            picture = 'https:'+img['src']
            pic = requests.get(picture, stream=True )
            img_name = img['title']+'.jpg'
            with open('./img/%s' %img_name, 'wb') as file:
                for chunk in pic.iter_content(chunk_size=128):
                    file.write(chunk)
            print(i,'Save %s successful.'% img_name)
            i +=1
    except:
        pass

driver.close()
