import aiohttp

from lxml import etree
import asyncio



from util.get_logger import get_looger
import json
logger = get_looger('古诗文.log',__file__)

async def fetch(session,url):
    async with session.get(url) as response:
        print(f'开始爬取{url}')
        html_str = await response.text()
        tree = etree.HTML(html_str)
        items = tree.xpath('//div[@class="left"]//div[@class="sons"]/div[@class="cont"]')
        for item in items:
            data = {
                '诗句':item.xpath('./a[1]/text()')[0]
            }
            logger.info(json.dumps(data,ensure_ascii=False))
        next_urls = tree.xpath('//a[@class="amore"]/@href')

        if next_urls:
            next_url = f'https://www.gushiwen.cn{next_urls[0]}'
            await fetch(session,next_url)
        else:
            print('没有下一页了')

async def get_authors():
    async with aiohttp.ClientSession() as session:
        async with session.get('https://www.gushiwen.cn/mingjus/') as response:
            author_html = await response.text()
            tree = etree.HTML(author_html)
            authors = tree.xpath('//div[@id="type2"]//div[@class="sright"]/a')

            author_tasks = [fetch(session,f"https://www.gushiwen.cn{a.xpath('./@href')[0]}") for a in authors]
            await asyncio.gather(*author_tasks)


    await asyncio.sleep(1)

asyncio.run(get_authors())