import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
from PIL import Image
from io import BytesIO


def get_soup(url):
	ChromeDriverPath = "/home/nc/download/google/chromedriver"
	driver = webdriver.Chrome(executable_path=ChromeDriverPath)
	driver.get(url)
	soup = BeautifulSoup(driver.page_source, "lxml")
	# print(soup.prettify())
	time.sleep(3)
	driver.close()#浏览器可以同时打开多个界面，close只关闭当前界面，不退出浏览器
	driver.quit()#退出整个浏览器
	return soup


def main():
	urls = []
	pages = 5 # 下载10页内容，每一页20张图像
	# 初始化链接
	for page in range(pages):
		url_front = "https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=狼"
		url_end = "&ct=&ic=0&lm=-1&width=0&height=0"
		url = url_front + "&pn=" + str(page*20) + "&gsm=" + str(hex(page*20))
		urls.append(url)


	num = 0
	for url in urls:
		soup = get_soup(url)

		children = soup.find_all("li", class_="imgitem")
		print(len(children))
		imageLinks = []

		# 保存链接
		for child in children:
			imageLinks.append(child.img["src"])


		headers = {'User-Agent': 
               'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
    	}
		for imageLink in imageLinks:
			r = requests.get(imageLink, headers=headers)
			image = Image.open(BytesIO(r.content))
			try:
				image.save("../downloadResources/wolf_" + str(num) + ".jpg")
			except:
				print("save Error")
			num += 1


if __name__ == '__main__':
	main()

