import json,re,requests,os,time
import urllib
from urllib.request import urlretrieve as saveImg #这边坑呐！！！！ 不是 urllib.urlretrieve
from urllib.parse import quote,urlparse
from requests.exceptions import RequestException

def getData(keyword,curPage=1,pageSize=30):
	pageNum = (curPage - 1)* pageSize
	headers = {
		'Referer': 'https://image.baidu.com/search/index?tn=baiduimage&word=\"'+quote(keyword)+'\"',
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'
	}
	url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=\''+quote(keyword)+'\'&ie=utf-8&oe=utf-8&word=\''+quote(keyword)+'\'&pn='+str(pageNum)+'&rn='+str(pageSize)
	res =  requests.get(url,headers=headers)
	if res.status_code == 200:
		return json.loads(res.content.decode('utf8','ignore'))

def saveImg(imgurl,addr):
	res =  requests.get(imgurl)
	with open(addr,'wb+') as f:
		f.write(res.content)

if __name__ == '__main__':
	print()
	keyword = str(input('请输入图片关键字：'))
	totalPage = int(input('抓取的页数：'))
	try:
		for page in range(totalPage):
			print('第',page+1,'页',end='')
			result = getData(keyword,page,30)['data']
			if len(result) >1:
				if os.path.exists('./'+keyword) == False:
					os.mkdir('./'+keyword)
				for i in range(len(result)-1):
					print('\r{0}/30'.format(i + 1),end='',flush=True)
					if result[i] and result[i]['thumbURL']:
						urlsslit = result[i]['thumbURL'].split('.')
						#获取后缀
						sffix = '.'+urlsslit[len(urlsslit)-1]
						saveImg(result[i]['thumbURL'],'./'+keyword+'/'+str(time.time())+sffix)
					else:
						print('第',(i+1),'张照片未获取到')
			else:
				print('*'*20,'\n')
				print('暂未搜索到相关图片','\n')
				print('*'*20,'\n')
		print('\n获取',keyword,'系列照片结束，请查看文件夹：./',keyword,'内照片')
	except RequestException as e:
		print('erro:'+str(e.reason))

