#爬取百度图片网站的图片
import os,time,re
import requests
from urllib.parse import urlencode

def getPage(offset):
	'''爬取信息'''
	#定义参数
	params = {
		'tn': 'resultjson_com',
		'ipn': 'rj',
		'ct': '201326592',
		'is': '',
		'fp': 'result',
		'queryWord': image_key,
		'cl': '2',
		'lm': '-1',
		'ie': 'utf-8',
		'oe': 'utf-8',
		'adpicid':'', 
		'st': '-1',
		'z': '',
		'ic': '0',
		'word': image_key,
		's': '',
		'se': '',
		'tab': '',
		'width': '',
		'height': '',
		'face': '0',
		'istype': '2',
		'qc': '',
		'nc': '1',
		'fr': '',
		'expermode':'', 
		'pn': offset,
		'rn': '30',
		'gsm': '1e',
		'1539681915698':'', 
	}
	#url地址
	url = "https://image.baidu.com/search/acjson?"+urlencode(params)

	res = requests.get(url)
	try:
		if res.status_code == 200:
			print("爬取网页成功")
			html = res.content.decode('utf-8')
			return html
		else:
			return None
	except:
		return None


def getImage(html):
	#获取图片信息
	if html:
		pat='"thumbURL":"(.*?)"'
		dlist=re.findall(pat,html,re.S)
		#print(dlist)

		#准备复制图片
		path = os.path.join("./mypic/",image_key)
		if not os.path.exists(path):
			os.mkdir(path)
		#复制图片
		for url_img in dlist:
			with requests.get(url_img,stream=True) as ir:
				s_pic = path +"/" +url_img.split("/").pop()
				with open(s_pic,"wb") as f:
					for chunk in ir:
						f.write(chunk)
		return print("成功获取图片")

def main(offset):
	html = getPage(offset)
	getImage(html)


if __name__ == "__main__":
	image_key = input("请输入想要爬取的图片：")
	image_num = int(input("请输入想要爬取的页数(每页30张):"))
	for i in range(image_num):
		main(offset =30+i*30)
		time.sleep(2)