'''
aiphttp，异步库的使用
'''
# 爬取崔庆才的博客
# import asyncio
# import aiohttp
# aiohttp的客户端用法
# async def fetch(session, url):
#     async with session.get(url) as response:
#         return await response.text(), response.status
#
# async def main():
#     async with aiohttp.ClientSession() as session:
#         html, status = await fetch(session, 'https://cuiqingcai.com')
#         print(f'html: {html[:100]}...')
#         print(f'status: {status}')
#
# if __name__ == '__main__':
#     # loop = asyncio.get_event_loop()
#     # loop.run_until_complete(main())
#     # python3.7及以后的版本，可以按以下写法，不需要显示声明时间循环
#     asyncio.run(main())


# 超时设置的用法
# async def main():
#     # 设置超时时间为1秒
#     timeout = aiohttp.ClientTimeout(total=1)
#     async with aiohttp.ClientSession(timeout=timeout) as session:
#         async with session.get('https://www.httpbin.org/get') as response:
#             print('status:', response.status)
#
# if __name__ == '__main__':
#     asyncio.run(main())


# 并发限制
# CONCURRENCY = 5
# URL = 'https://www.baidu.com'
# semaphore = asyncio.Semaphore(CONCURRENCY)
# session = None
#
# async def scrape_api():
#     async with semaphore:
#         print('scraping', URL)
#         async with session.get(URL) as response:
#             await asyncio.sleep(1)
#             return await response.text()
#
# async def main():
#     global session
#     session = aiohttp.ClientSession()
#     scrape_index_tasks = [asyncio.ensure_future(scrape_api()) for _ in range(10000)]
#     await asyncio.gather(*scrape_index_tasks)
#
# if __name__ == '__main__':
#     asyncio.run(main())