import requests
from lxml import etree
import aiohttp
import asyncio

# url = 'https://www.gushiwen.cn/mingjus/'

# res = requests.get(url)

# tree = etree.HTML(res.text)
# datas = tree.xpath('//div[@id="type2"][1]/div[@class="sright"][1]/a')
# for data in datas:
# 	autor = data.xpath('.//text()')[0]
# 	href = data.xpath('.//@href')[0]
# 	autor_url = href.split('=')[1]
# 	page = 1
# 	while True:
# 		total_href = f'https://www.gushiwen.cn/mingjus/default.aspx?page={page}&tstr=&astr={autor_url}&cstr=&xstr='
# 		gushiwen_url = total_href
# 		res = requests.get(gushiwen_url)
# 		tree = etree.HTML(res.text)
# 		if tree.xpath('//form[@id="FromPage"][1]/div[@class="pagesright"][1]/a[@class="amore"]/@href'):
# 			datas = tree.xpath('//div[@class="left"][1]/div[@class="sons"][1]/div[@class="cont"]')
# 			for data in datas:
# 				content = data.xpath('.//a[1]/text()')
# 				title = data.xpath('.//a[2]/text()')
# 				print({"page": page})
# 				print({'title': title, 'content': content})
# 			page += 1
# 		else:
# 			datas = tree.xpath('//div[@class="left"][1]/div[@class="sons"][1]/div[@class="cont"]')
# 			for data in datas:
# 				content = data.xpath('.//a[1]/text()')
# 				title = data.xpath('.//a[2]/text()')
# 				print({"page": page})
# 				print({'title': title, 'content': content})
# 			break
# 	break

async def get_data(session, url, xpath_query):
	async with session.get(url) as res:
		text = await res.text()
		tree = etree.HTML(text)
		datas = tree.xpath(xpath_query)
		return datas


async def main():
	async with aiohttp.ClientSession() as session:
		# 获取作者链接
		datas = await get_data(session, 'https://www.gushiwen.cn/mingjus/',
		                       '//div[@id="type2"][1]/div[@class="sright"][1]/a')
		
		for data in datas:
			href = data.xpath('.//@href')[0]
			autor_url = href.split('=')[1]
			page = 1
			
			while True:
				total_href = f'https://www.gushiwen.cn/mingjus/default.aspx?page={page}&tstr=&astr={autor_url}&cstr=&xstr='
				
				# 使用异步请求检查是否有更多页面
				tree = await get_data(session, total_href, '//body')
				more_pages = tree[0].xpath(
					'//form[@id="FromPage"][1]/div[@class="pagesright"][1]/a[@class="amore"]/@href') if tree else []
				
				if more_pages:
					page += 1
				else:
					# 获取所有页面的数据
					tasks = []
					for p in range(1, page + 1):
						url = f'https://www.gushiwen.cn/mingjus/default.aspx?page={p}&tstr=&astr={autor_url}&cstr=&xstr='
						tasks.append(
							get_data(session, url, '//div[@class="left"][1]/div[@class="sons"][1]/div[@class="cont"]'))
					
					all_datas = await asyncio.gather(*tasks)
					
					for page_num, data_list in enumerate(all_datas, 1):
						for data in data_list:
							content = data.xpath('.//a[1]/text()')
							title = data.xpath('.//a[2]/text()')
							print({"page": page_num})
							print({'title': title, 'content': content})
					break
			# break  # 只处理第一个作者，如需要处理所有作者，删除这行


asyncio.run(main())