import aiohttp
import requests
import asyncio
from lxml import etree



async def fetch(session,url):
    async with session.get(url) as response:

        text = await response.text()
        tree = etree.HTML(text)
        items = tree.xpath('//div[@class="quote"]')
        # print(items)
        for item in items:
            content = item.xpath('./span')[0].text
            author = item.xpath('./span/small/text()')
            tags = item.xpath('./div[@class="tags"]/a/text()')
            print(f'文本内容：{content}，作者：{author},标签：{tags}')
        print(f'本页爬取结束')

        # next = tree.xpath('//li[@class="next"]/a')
        # if next:
        #     next_url = next[0].get('href')
        #     next_url = f'https://quotes.toscrape.com/{next_url}'
        #     print(next_url)
        #     await fetch(session,next_url)



async def main():
    async with aiohttp.ClientSession() as session:
        result = await asyncio.gather(*[fetch(session,f'https://quotes.toscrape.com/page/{page}') for page in range(10)])




if __name__ == '__main__':
    asyncio.run(main())





