import requests
import os
from lxml import html
from time import sleep
path = "D:/animation_data/"
def get_maxpage():
	"""获取网页总页数"""
	url = "https://konachan.net/post?tags="
	sp = requests.get(url).text
	tree = html.fromstring(sp)
	a = tree.xpath("//div[@class='pagination']/a/text()")
	sleep(.8)
	return int(a[-2])
def get_links_and_download_images():
	"""获取图片链接，并下载到本地"""
	maxpage = get_maxpage()
	for w in range(1,maxpage+1):
		links = []
		url = "https://konachan.net/post?page={a}&tags=".format(a=w)
		sp = requests.get(url).text
		tree = html.fromstring(sp)
		href = tree.xpath("//ul[@id='post-list-posts']//a")
		for i in range(1,len(href)+1):
			a = tree.xpath("//ul[@id='post-list-posts']/li["+str(i)+"]/a/@href")
			if len(a) >0:
				links.append(a[0])
		sleep(.8)
		links_dict = generate_filepath_and_check_repeat_file(links)
		download(links_dict)
def generate_filepath_and_check_repeat_file(links):
	"""生成文件名称"""
	links_dict = {}
	for i in range(0,len(links)):
		link = links[i]
		if link.find("image/")!=-1:
			star = link.find("image/")+len("image/")
			end = star + 32
			filename = link[star:end]+".jpg"#生成文件名
			filepath = path + filename#生成文件路径
			if len(filename) == 32+len(".jpg"):
				if os.path.exists(filepath) is True:
					continue
				if os.path.exists(filepath) is not True:#文件不存在，则写入字典
					links_dict[filepath] = link
			elif len(filename) != 32 +len(".jpg"):
				print(link)
		elif link.find("jpeg/")!=-1:
			star = link.find("jpeg/")+len("jpeg/")
			end = star + 32
			filename = link[star:end]+".jpg"#生成文件名
			filepath = path + filename#生成文件路径
			if len(filename) == 32+len(".jpg"):
				if os.path.exists(filepath) is True:
					continue
				if os.path.exists(filepath) is not True:#文件不存在，则写入字典
					links_dict[filepath] = link
			elif len(filename) != 32+len(".jpg"):
				print(link)
		else:
			print(link)
	return links_dict
def download(links_dict):
	"""下载图片到本地"""
	filename_list = list(links_dict.keys())
	links = list(links_dict.values())
	for num in range(0,len(filename_list)):
		link = links[num]
		r = requests.request(method="GET",url=link,stream=True)
		if r.status_code == 200:
			open(filename_list[num],"wb").write(r.content)
		sleep(.8)
if __name__=="__main__":
	get_links_and_download_images()