import requests
import re
from bs4 import BeautifulSoup
import os 

#获取页面信息
def getPage(url):
	try:
		r = requests.get(url)
		r.status_code
		return r.text
	except:
		return '页面信息不存在！'
#解析页面信息，获取图片
def parsePage(content):
	try:
		pat = '"middleURL":"(.*?)"'
		picurllist = re.findall(pat,content)
		for picurl in picurllist:
			yield {
				"image":picurl
			}
	except:
		return 'error'
#储存图片信息
def saveImage(item):
	root = 'C://Users//wqh//Desktop//image//'
	imagename = item['image'].split('/')[-1]
	path = root + imagename
	try:
		if not os.path.exists(root):
			os.mkdir(root)
		if not os.path.exists(path):
			r = requests.get(item['image'])
			with open(path,'wb') as f:				
				f.write(r.content)
				f.close()
				print('save successful!!!!')
		else:
			print("already exists!!!!")
	except Exception as e:
		print(e) 
		print("error!!!")


def main(p):
	url = "https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E8%A1%97%E6%8B%8D&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word=%E8%A1%97%E6%8B%8D&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn="+str(p*30)
	html = getPage(url)
	info = parsePage(html)
	for item in info:
		print(item)
		saveImage(item)


if __name__ == "__main__":
	for i in range(1,6):
		main(i)