# import requests
#
# from bs4 import BeautifulSoup
#
# url = "https://www.gushiwen.cn/mingjus/default.aspx?page=1&tstr=&astr=&cstr=&xstr="
#
# response = requests.get(url)
# # print(response.text)
# soup = BeautifulSoup(response.text, "lxml")
# items = soup.find_all("div", class_="sright")
# # print(items)
# for item in items:
#     author = item.find("a")
#
#     # print(item)







import aiohttp
import asyncio
from lxml import etree
# //div[@id="type2"]//div[@class="sright"]/a/text()

# from urllib import parse
#
# print(parse.unquote("%e6%9d%8e%e7%99%bd"))


async def fetch(session, url):
    async with session.get(url) as response:
        html_str = await response.text()
        tree = etree.HTML(html_str)
        items = tree.xpath('//div[@class="left"]//div[@class="sons"]/div[@class="cont"]')
        for item in items:
            # print(item.xpath('./a[1]/text()'),item.xpath('./a[2]/text()'))
            line = {
                'content': item.xpath('./a[1]/text()')[0],
                'author': item.xpath('./a[2]/text()')[0],
            }
            print(line)

        next_url = tree.xpath('//a[@class="amore"]/@href')
        if next_url:
            next_url = f'https://www.gushiwen.cn{next_url[0]}'
            print(next_url)
            await fetch(session, next_url)
        else:
            print('没有下一页了')



async def get_authors():
    async with aiohttp.ClientSession() as session:
        async with session.get(url = 'https://www.gushiwen.cn/mingjus/') as response:
            authors_html = await response.text()
            tree = etree.HTML(authors_html)
            authors = tree.xpath("//div[@id='type2']//div[@class='sright']/a")[:1]

            # for a in authors:
            #     # print(a.xpath('./text()'),a.xpath('./@href'))
            #     author_name = a.xpath('./text()')[0].strip()
            #     author_url = a.xpath('./@href')[0]

            # author_tasks = [f'https://www.gushiwen.cn{a.xpath('./@href')[0]}' for a in authors]

            author_tasks = [fetch(session,f'https://www.gushiwen.cn{a.xpath('./@href')[0]}') for a in authors]
            await asyncio.gather(*author_tasks)
    await asyncio.sleep(1)

asyncio.run(get_authors())

















