import json

import aiohttp
import asyncio
from lxml import etree
from util.logger import get_logger

logger = get_logger('古诗词.log', __file__)


async def fetch(session, url):
    async with session.get(url) as response:
        print(f'开始爬取{url}')
        html_text = await response.text()
        tree = etree.HTML(html_text)
        items = tree.xpath('//div[@class="left"]//div[@class= "sons"]/div[@class="cont"]')
        for item in items:
            info = {
                'content': item.xpath('./a[1]/text()')[0],
                # 'author': item.xpath('./a[2]/text()')[0],
            }
            logger.info(json.dumps(info, ensure_ascii=False))
        print(f'爬取{url}结束')
        next_urls = tree.xpath('//a[@class="amore"]/@href')
        if next_urls:
            next_url = f'https://www.gushiwen.cn{next_urls[0]}'
            print(next_url)
            await fetch(session, next_url)
        else:
            print(f"没有下一页")

async def get_author():
    # 异步上下文
    async with aiohttp.ClientSession() as session:
        async with session.get(url='https://www.gushiwen.cn/mingjus/') as response:
            html_text = await response.text()
            tree = etree.HTML(html_text)
            # authors = tree.xpath('//*[@id="type2"]//div[2]/a')
            author_urls = tree.xpath('//*[@id="type2"]//div[@class="sright"]/a')

        await asyncio.gather(*[fetch(session, f'https://www.gushiwen.cn{author_url.xpath("./@href")[0]}') for author_url in author_urls])


asyncio.run(get_author())

