import json

import aiohttp
import asyncio
from lxml import etree
from util.logger import get_looger

logger = get_looger("古诗文.log", __file__)



async def fetch(session, url):
    async with (session.get(url) as response):
        print(f"开始爬取 {url}")
        html_str = await response.text()
        tree = etree.HTML(html_str)
        items = tree.xpath('//div[@class="left"]//div[@class="sons"]/div[@class="cont"]')
        for item in items:
            line = {
                'content': item.xpath('./a[1]/text()')[0],
                # 'source': item.xpath('./a[2]/text()')[0]
            }
            logger.info(json.dumps(line, ensure_ascii=False))
        print(f"爬取 {url} 结束")


        next_urls = tree.xpath('//a[@class="amore"]/@href')
        if next_urls:
            next_url = f'https://www.gushiwen.cn{next_urls[0]}'
            print(next_url)
            await fetch(session, next_url)
        else:
            print(f"没有下一页")



async def get_authors():
    async with aiohttp.ClientSession() as session:
        async with session.get('https://www.gushiwen.cn/mingjus/') as response:
            authors_html = await response.text()
            tree = etree.HTML(authors_html)
            authors = tree.xpath('//div[@id="type2"]/div[@class="sright"]/a')
            # print(authors, "---")
            # for a in authors:
            #     # print(a.xpath('./text()'), a.xpath('./@href'))
            #     auth_name = a.xpath('./text()')[0].strip()
            #     author_url = a.xpath('./@href')[0]

            author_task = [fetch(session, f'https://www.gushiwen.cn{a.xpath('./@href')[0]}') for a in authors]
            await asyncio.gather(*author_task)




    await asyncio.sleep(1)


asyncio.run(get_authors())
