import requests
from lxml import etree
import asyncio
import aiohttp


# url = 'https://www.gushiwen.cn/mingjus/'
# response = requests.get(url)
# tree = etree.HTML(response.text)
# items = tree.xpath('//div[@id="type2"]/div[@class="sright"]/a/text()')
# total = []
# page = 1
# for item in items:
#     shis = []
#     while True:
#         detailurl = f'https://www.gushiwen.cn/mingjus/default.aspx?page={page}&tstr=&astr={item}'
#         response = requests.get(detailurl)
#         tree = etree.HTML(response.text)
#         datas = tree.xpath('//div[@class="sons"]')[0]
#         contents = datas.xpath('.//div[@class="cont"]')
#         next = tree.xpath('//div[@class="pagesright"]/a[@class="amore"]/@href')
#         for content in contents:
#             texts = content.xpath('.//a[@style="float: left;"]/text()')
#             for i in range(0, len(texts), 2):
#                 shi = texts[i]
#                 author_book = texts[i + 1]
#                 shis.append({
#                     'shi': shi,
#                     'author_book': author_book,
#                 })
#         page += 1
#         if not next:
#             break
#     total.append({
#         'author': item,
#         'shis': shis,
#     })
# print(total)


# async def get_author_shi(session, url):
#     async with session.get(url) as response:
#         html_str = await response.text()
#         tree = etree.HTML(html_str)
#         datas = tree.xpath('//div[@class="sons"]')[0]
#         contents = datas.xpath('.//div[@class="cont"]')
#         next = tree.xpath('//div[@class="pagesright"]/a[@class="amore"]/@href')
#         for content in contents:
#             shi = content.xpath('.//a[1]/text()')[0]
#             author_book = content.xpath('.//a[2]/text()')
#             if author_book:
#                 author_book = author_book[0]
#             else:
#                 author_book = '没有标明出处'
#             print(shi, author_book)
#         if not next:
#             return '没有下一页'
#         else:
#             next_url = f'https://www.gushiwen.cn{next[0]}'
#             await get_author_shi(session, next_url)
#
#
# async def main():
#     async with aiohttp.ClientSession() as session:
#         async with session.get('https://www.gushiwen.cn/mingjus/') as response:
#             html_str = await response.text()
#             tree = etree.HTML(html_str)
#             items = tree.xpath('//div[@id="type2"]/div[@class="sright"]/a/text()')
#             await asyncio.gather(
#                 *[get_author_shi(session, f'https://www.gushiwen.cn/mingjus/default.aspx?astr={item}') for item in
#                   items])
#
#
# asyncio.run(main())
