import requests
from pyquery import PyQuery as pq 
from urllib.request import urlretrieve 
import os,time,re,json
from urllib import request
from urllib.parse import urlencode

#获取网页信息
def getPage(offset):
	tn = 'resultjson_com'
	data = {
		'tn':tn,
		'ipn':'rj',
		'queryWord':"街拍",
		'word':"街拍",
		'pn':offset*30
	}

	url = 'http://image.baidu.com/search/acjson?'+urlencode(data)
	
	headers = {
		'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
		'Accept-Encoding': 'gzip, deflate',
		'Host': 'image.baidu.com',
		'Referer': 'https://www.baidu.com/s?ie=utf-8&f=3&rsv_bp=1&tn=request_2_pg&wd=%E8%A1%97%E6%8B%8D&oq=%25E8%25A1%2597%25E6%258B%258D&rsv_pq=9f3a2562000142ed&rsv_t=5551AxLt%2FAgSsqTiKBgNUHWfrg5G5gztHglfmjlL6ia7cDG%2BANBsKEiS2xsnhALZowHv&rqlang=cn&rsv_enter=0&prefixsug=%25E8%25A1%2597%25E6%258B%258D&rsp=0&rsv_sug=1',
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',

	}
	try:
		res = requests.get(url,headers=headers)
		if res.status_code == 200:
			con = res.content.decode("utf-8","ignore")
			return json.loads(con)
	except requests.ConnectionError as err:
		print(err)
		return None
	except Exception as err:
		print(err)
		return None


#获取并封装返回url及图片
def parsePage(json):
	data = json.get('data')

	if data:
		for item in data:
			if item:
				yield{
					'img_url':item.get("thumbURL"),
					'title':item.get("fromPageTitleEnc")
				}



#保存图片
def savePic(item):
	image_url = item.get('img_url')
	title = item.get('title')
	t = re.sub(r",|\"|:|\.|！|\?|\s|\(|\)|\\|\||\/|\-|\-+","",title)

	# 建立文件夹
	if not os.path.exists("./pic"):
		os.mkdir("pic")
	path = os.path.join("./pic/",t)	
	#if not os.path.exists(path):
	#	os.mkdir(path)

	 # 拼装原图和目标图片的路径即名称

	#save_pic = str(image_url.split("/").pop()+".jpg")

	# 为urlretrieve 伪装头信息，否则会拒绝访问403
	opener=request.build_opener()	
	opener.addheaders = [
		('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
		('Cache-Control','max-age=0'),
		('Connection','keep-alive'),
		('Referer','http://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=%E8%A1%97%E6%8B%8D'),
		('Upgrade-Insecure-Requests','1'),
		('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2969.0 Safari/537.36'),
	]
	request.install_opener(opener)

	 # 使用urllib中urlretrieve直接存储图片
	urlretrieve(image_url,path+'.jpg')



#主函数,调度爬虫处理
def main(offset):
	html = getPage(offset)

	if html:
		for item in parsePage(html):
			savePic(item)





if __name__ == '__main__':
	for i in range(2):
		main(i)
		time.sleep(1)



