# -*- coding: utf-8 -*-
"""
@Time    : 2024/6/20 15:28 
@Author  : ZhangShenao 
@File    : crawl_aio.py 
@Desc    : 使用异步AIO实现爬虫
"""
import asyncio
import time

import aiohttp


# 异步函数——爬取单个网页url
async def download_page(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as resp:
            print(f'read {resp.content_length} from {url}')


# 异步函数——爬取所有网页
async def download_all_pages(urls):
    # 创建多个任务
    tasks = [asyncio.create_task(download_page(url)) for url in urls]

    # 等待所有任务完成
    await asyncio.gather(*tasks)


if __name__ == '__main__':
    sites = ['https://en.wikipedia.org/wiki/Portal:Arts', 'https://en.wikipedia.org/wiki/Portal:History',
             'https://en.wikipedia.org/wiki/Portal:Society', 'https://en.wikipedia.org/wiki/Portal:Biography',
             'https://en.wikipedia.org/wiki/Portal:Mathematics', 'https://en.wikipedia.org/wiki/Portal:Technology',
             'https://en.wikipedia.org/wiki/Portal:Geography', 'https://en.wikipedia.org/wiki/Portal:Science',
             'https://en.wikipedia.org/wiki/Computer_science',
             'https://en.wikipedia.org/wiki/Python_(programming_language)',
             'https://en.wikipedia.org/wiki/Java_(programming_language)', 'https://en.wikipedia.org/wiki/PHP',
             'https://en.wikipedia.org/wiki/Node.js', 'https://en.wikipedia.org/wiki/The_C_Programming_Language',
             'https://en.wikipedia.org/wiki/Go_(programming_language)']

    start_time = time.perf_counter()
    asyncio.run(download_all_pages(sites))
    end_time = time.perf_counter()
    print(f'Crawl {len(sites)} pages in {end_time - start_time} seconds')
