'''
第八周作业第三题,抓取百度图片街拍
时间:20180918
地址:"https://images.baidu.com/search/acjson?"+urlencode(params)
'''

# 导入数据爬取所需模块
import os,time,json
import requests
from urllib.parse import urlencode
from urllib.request import urlretrieve

# 定义数据爬取方法
def getPicList(offset):
	'''
	爬取百度图片方法
	@param int offset
	'''

	# 定义参数项
	params = {
		'tn':'resultjson_com',
		'ipn':'rj',
		'ct':'201326592',
		'queryWord':'街拍',
		'ie':'utf-8',
		'oe':'utf-8',
		'word':'街拍',
		'pn':str(offset),
		'rn':30
	}

	# 定义url地址
	_url = "https://images.baidu.com/search/acjson?"+urlencode(params)
	try:
		response = requests.get(_url)
		if response.status_code == 200:
			return json.loads(response.content.decode('utf-8'))
	except requests.ConnectionError:
		return None
# 解析数据
def getImages(json):
	'''解析获取图片信息'''
	data = json.get('data')
	# data = unicode(data, "gb2312").encode("utf8")
	# print(data)
	if data:
		for item in data:
			if item:
				image_list = item.get('thumbURL')
				image_title = item.get('fromPageTitleEnc')	
				# image_title = str(image_title, "gb2312").encode("utf8")
				image_title = image_title.replace('/','_')
				yield {
					'image': image_list,
					'title': image_title
				}
			else:
				continue
def saveImage(item):
	'''储存图片'''
	# 处理每组图片的存储路径
	path = os.path.join("./mypic/",item.get('title'))
	if not os.path.exists(path):
		os.mkdir(path)

	# 拼装原图和目标图片的路径即名称
	local_image_url = item.get('image')
	save_pic = path+"/"+local_image_url.split("/").pop()+".jpg"

	# 使用urllib中urlretrieve直接存储图片
	urlretrieve(local_image_url,save_pic)

# 定义主函数
def main(offset):
	''' 主程序函数，负责调度执行爬虫处理 '''
	json = getPicList(offset)
	# print(json)
	# print('='*70)
	for item in getImages(json):
		print(item)
		saveImage(item)

# 判断是否为主入口
if __name__ == '__main__':
	# main(0)
	for i in range(5):
		offset = i*30
		main(offset)
