import requests
from lxml import etree
import json
import aiohttp
import asyncio

# url = 'https://www.gushiwen.cn/mingjus/'
# res = requests.get(url)
# # print(res.text)
# tree = etree.HTML(res.text)
# # div[@class='titletype'][1]/div[@id='type2'][1]/div[@class='sright'][1]/a[1]
# authors = tree.xpath('//div[@id="type2"]/div[@class="sright"]/a/text()')
# # print(authors)
#
# for author in authors:
#     url = f'https://www.gushiwen.cn/mingjus/default.aspx?astr={author}'
#     res = requests.get(url)
#     tree = etree.HTML(res.text)

    # items = tree.xpath('//div[@class="left"]/div[@class="sons"]/div')

    # print(len(items))

    # contents = tree.xpath('//div[@class="sons"]/div[@class="cont"]/a[1]/text()')
    # titles = tree.xpath('//div[@class="sons"]/div[@class="cont"]/a[2]/text()')
    # print(res.text)

    # print(items)
    # for item in items:
    #     print(item)
    # break

async def fetch(session, url):
    async with session.get(url) as response:
        html_str = await response.text()
        tree = etree.HTML(html_str)
        items = tree.xpath('//div[@class="left"]/div[@class="sons"]/div[@class="cont"]')
        # print(items)

        print('开始-----------')
        for item in items:

            line = {
                'content': item.xpath('./a[1]/text()')[0],
                'source':item.xpath('./a[2]/text()')[0] if item.xpath('./a[2]/text()') else None
            }

            print(line)
        print('结束-----------')
        next_url = tree.xpath('//a[@class="amore"]/@href')
        print(next_url)

        if next_url:
            next_url = f'https://www.gushiwen.cn{next_url[0]}'
            # print(next_url)
            await fetch(session, next_url)
        else:
            print('没有下一页了')


async def main():
    async with aiohttp.ClientSession() as session:
        async with session.get('https://www.gushiwen.cn/mingjus/') as response:
            res = await response.text()
            tree = etree.HTML(res)
            authors = tree.xpath('//div[@id="type2"]/div[@class="sright"]/a')
            tasks = [fetch(session, f'https://www.gushiwen.cn{item.xpath("./@href")[0]}') for item in authors]
            await asyncio.gather(*tasks)

        await asyncio.sleep(1)



asyncio.run(main())