import re
import time

import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

# chrome_options = Options()
# chrome_options.add_argument("--headless")       # define headless

# driver = webdriver.Chrome(chrome_options=chrome_options)
driver = webdriver.Chrome()     # 打开 Chrome 浏览器

# 将刚刚复制的帖在这
driver.get("http://www.ngchina.com.cn/animals/")

time.sleep(6)
# 得到网页 html, 还能截图
# html = driver.page_source       # get html
# soup = BeautifulSoup(html, features='lxml')
# imgs = soup.find_all('img',{"src":re.compile('http://image.*?')})

# for img in imgs:
#     print(img['src'])
#     r = requests.get(img['src'], stream=True)
#     image_name = img['src'].split('/')[-1]  ########################
#     with open('./img/%s' % image_name, 'wb') as f:
#         for chunk in r.iter_content(chunk_size=128):
#             f.write(chunk)
#     print('Saved %s' % image_name)
# driver.get_screenshot_as_file("./img/sreenshot1.png")
js="var q=document.documentElement.scrollTop=10000"
driver.execute_script(js)
time.sleep(3)
driver.close()
