import re
import asyncio
import requests
import random
from lxml import etree
import aiohttp

USER_AGENT_LIST = [
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
	"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
	"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
	"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
	"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
	"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
	"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
	"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR "
	"2.0.50727; SE 2.X MetaSr 1.0)",
	"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
	"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
	"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
	"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
	"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
	"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
	"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
	"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4496.0 "
	"Safari/537.36",
	"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 "
	"Safari/605.1.15",
	"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 "
	"Safari/605.1.15 "
]

headers = {
	'User-Agent': random.choice(USER_AGENT_LIST),
}
base_url = "http://www.jj20.com"
base_image_url = "http://img.jj20.com"
# http://img.jj20.com/up/allimg/1114/0610210RQ1/2106100RQ1-1.jpg
local_path = "/Users/farben/Pictures/bizhi"

header_aio = {
	'Accept': 'image/webp,image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5',
	'User-Agent': random.choice(USER_AGENT_LIST),
}


def request_url(url):
	retry = 0
	resp = None
	try:
		while retry < 3:
			resp = requests.get(url, headers=headers)
			resp.encoding = 'GB2312'
			retry += 1
			if resp:
				break
		return resp
	except Exception as e:
		print("页面请求失败" + str(e))
		return resp


# aiohttp协程异步请求
async def get_content(link_url):
	async with aiohttp.ClientSession() as session:
		resp = await session.get(link_url, headers=header_aio, ssl=False)
		# 获取二进制格式数据
		content = await resp.read()
		# 获取字符串格式数据
		# content = await resp.text()
		return content


# 协程实现异步写入文件:info是包含(url,file_name)的元组
async def write_to_file(info):
	content = await get_content(info[0])
	with open(f'{local_path}/{info[1]}', 'wb') as f:
		f.write(content)


# 1.确定并爬取切入点网址
def first_step(url):
	resp = request_url(url)
	assert resp is not None, "请求失败,返回为None"
	html = etree.HTML(resp.text)
	totel = html.xpath('//span[@class="pageinfo"]/strong/text()')[0]
	print(totel, type(int(totel)))
	for i in range(1, int(totel) + 1):
		page_url = url.replace('.html', f'_{i}.html')
		print(page_url)
		second_step(page_url)


# 示例: http://www.jj20.com/bz/nxxz/list_7_cc_12_2.html  最后的2控制页数


# 2.根据切入点网页信息,爬取某一页面内容
def second_step(page_url):
	resp = request_url(page_url)
	assert resp is not None, "请求失败,返回为None"
	html = etree.HTML(resp.text)
	pages = html.xpath('//ul[@class="picbz"]/li')
	for page in pages:
		temp_url = page.xpath('.//a/@href')[0]
		detail_url = base_url + temp_url
		print(detail_url)
		third_step(detail_url)
		pass


# 3.根据从页面获取得详情页url,抓取详情页信息
def third_step(detail_url):
	resp = request_url(detail_url)
	assert resp is not None, "请求失败,返回为None"
	html = etree.HTML(resp.text)
	# 获取图片个数
	images = html.xpath('//ul[@id="showImg"]/li')
	image_num = len(images)
	# 生成所有图片链接
	image_url = re.findall(r"<script>var id='(.*?)';</script>", resp.text, re.S).pop()
	number = re.findall(r'-(.*?)\.', str(image_url), re.S)[0]
	index = int(number)
	total_num = index + image_num
	print(image_url)
	image_urls = []
	for i in range(index, total_num):
		temp_url = str(image_url).replace(f'-{index}.', f'-{i}.')
		link_url = base_image_url + temp_url
		image_urls.append(link_url)
	# print(link_url)
	if index == 0:
		format_download_image_info(image_urls)
	# format_download_image_info(image_urls)


# 4.处理图片链接生成需要下载的数据
def format_download_image_info(image_urls):
	infos = []
	for image_url in image_urls:
		info = [image_url]
		file_name = str(image_url).split('/')[-1]
		info.append(file_name)
		file_path = str(image_url).split('/')[-2]
		info.append(file_path)
		infos.append(info)
	download_images(infos)


# 5.开始下载图片数据
def download_images(links):
	tasks = [asyncio.ensure_future(write_to_file(link)) for link in links]
	loop = asyncio.get_event_loop()
	loop.run_until_complete(asyncio.wait(tasks))


if __name__ == '__main__':
	nums = [1]
	print(nums.pop())
	info = {}
	print(info.get('number', 10))
	
	url = 'http://www.jj20.com/bz/nxxz/list_7_cc_12.html'
	first_step(url)
	
	pass
