# 通过async异步机制实现爬虫
import asyncio  # asyncio库中包含了使用协程所需要的大部分工具
import time


# 异步爬虫函数
# 使用async修饰词声明异步函数
async def crawl_page(url):
    print(f'crawling page:  {url}')
    sleep_time = url.split('_')[-1]  # 假设URL的最后部分代表休眠时间
    await asyncio.sleep(int(sleep_time))  # 通过await调用协程函数,采用的是阻塞的形式
    print(f'crawl page {url} done')


async def crawl(urls):
    # 创建异步协程任务,任务创建后很快就会被调度执行
    tasks = [asyncio.create_task(crawl_page(url)) for url in urls]
    # 执行异步任务,等待所有任务执行完成
    await asyncio.gather(*tasks)


if __name__ == '__main__':
    urls = ['http://example.com/page_1', 'http://example.com/page_2', 'http://example.com/page_3',
            'http://example.com/page_4']
    start = time.perf_counter()
    crawl_coroutine = crawl(urls)  # 创建一个协程对象,类型为coroutine object.这里并不会真正执行
    print(crawl_coroutine)
    asyncio.run(crawl_coroutine)
    end = time.perf_counter()
    print(f'Total time: {end - start} seconds')
