import requests,time,os


#获取网页数据
def open_url(url):
	headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
	}
	res = requests.get(url,headers=headers)
	if res.status_code == 200:
		data = res.json()
		print(type(data))
		return data
	else:
		return None

#解析数据
def parse_data(data):
	for item in data['data']:
		img_url = item.get('hoverURL')
		yield img_url

#保存图片
def save_img(img_url,filename):
	if not os.path.exists('./百度街拍'):
		os.makedirs('./百度街拍')
	headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.104 Safari/537.36',
	}
	try:
		res = requests.get(img_url,headers=headers)
		with open('./百度街拍/'+str(filename)+'.png','wb') as f:
			print('正在保存第%d条图片...' % (filename))
			f.write(res.content)
	except Exception as err:
		print('第%d条图片无法保存原因是:%s' % (filename,err))
		pass


#主函数
def main(url,filename):
	data = open_url(url)
	for item in parse_data(data):
		save_img(item,filename)
		filename += 1
	return filename

if __name__ == '__main__':
	filename = 1
	page = int(input('请输入需要爬取的页数(每页30张图片):'))
	for i in range(0,page):
		url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E8%A1%97%E6%8B%8D&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word=%E8%A1%97%E6%8B%8D&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn='+str(i*30)+'&rn=30'
		filename = main(url,filename)
