import asyncio
import aiohttp
from lxml import etree

datas = []
async def fetch_page(session, url, datas):
    async with session.get(url) as response:
        html = await response.text()
        tree = etree.HTML(html)

        items = tree.xpath('//div[@class="quote"]')
        for item in items:
            content = item.xpath('.//span[@class="text"]/text()')[0]
            author = item.xpath('.//small/text()')[0]
            tag = item.xpath('.//div[@class="tags"]//span[@class="tag"]/text()')  # 修复tag提取（原xpath可能漏取多标签）
            datas.append({
                'content': content[1:-1],
                'author': author,
                'tag': tag
            })

        next_button = tree.xpath('//li[@class="next"]/a/@href')
        if next_button:
            next_url = f'http://quotes.toscrape.com{next_button[0]}'
            await fetch_page(session, next_url, datas)


async def main():
    async with aiohttp.ClientSession() as session:
        await fetch_page(session, 'http://quotes.toscrape.com/page/1', datas)
    return datas

quotes = asyncio.run(main())
print(f"共爬取 {len(quotes)} 条数据")