import urllib.parse
import asyncio
import aiohttp
from lxml import etree


async def fetch_page(session, url):
    """异步获取页面内容"""
    async with session.get(url) as response:
        return await response.text()


async def crawl_author_poems(session, author_name, author_url):
    """异步爬取单个作者的所有诗句（包括分页）"""
    current_url = author_url
    page_num = 1

    print(f"正在爬取作者: {author_name}")

    while current_url:
        try:
            # 获取页面内容
            html_content = await fetch_page(session, current_url)
            poem_tree = etree.HTML(html_content)

            # 获取诗句数据
            poem_data = poem_tree.xpath("//*[@class='left']/*[@class='sons']/*[@class='cont']")
            print(f"第{page_num}页诗句数量: {len(poem_data)}")

            # 遍历输出诗句
            for i, poem in enumerate(poem_data, 1):
                line = poem.xpath('./a[1]/text()')
                title = poem.xpath('./a[2]/text()')

                if line and title:
                    print(f"{author_name}-第{page_num}页-第{i}首：诗句：{line[0]}--标题：{title[0]}")
                else:
                    print(f"{author_name}-第{page_num}页-第{i}首：无标题")

            # 检查是否存在下一页
            next_page_links = poem_tree.xpath("//div[@class='pagesright']/a[contains(text(), '下一页')]")

            if next_page_links:
                next_page_href = next_page_links[0].get('href')
                current_url = f"https://www.gushiwen.cn{next_page_href}"
                page_num += 1
                # 添加延迟避免请求过快
                await asyncio.sleep(1)
            else:
                current_url = None

        except Exception as e:
            print(f"爬取 {current_url} 时出错: {e}")
            break


async def main():
    """主函数"""
    # 创建异步会话
    async with aiohttp.ClientSession() as session:
        # 获取主页内容
        html_content = await fetch_page(session, "https://www.gushiwen.cn/mingjus/")
        tree = etree.HTML(html_content)

        # 获取所有作者
        authors = tree.xpath('//*[@id="type2"]/*[@class="sright"]/a')
        print(f"找到 {len(authors)} 位作者")

        # 创建任务列表
        tasks = []

        # 为前3个作者创建爬取任务（避免请求过多）
        for i, data in enumerate(authors[:3]):  # 限制爬取前3个作者
            text_content = data.text
            encoded = urllib.parse.quote(text_content)
            author_url = f"https://www.gushiwen.cn/mingjus/default.aspx?astr={encoded}"

            # 创建异步任务
            task = crawl_author_poems(session, text_content, author_url)
            tasks.append(task)

            # 添加延迟避免请求过快
            await asyncio.sleep(1)

        # 并发执行所有任务
        await asyncio.gather(*tasks)


# 运行异步程序
if __name__ == "__main__":
    asyncio.run(main())
