# long 爬虫
# {2021/9/13}
# 本章目的：
import aiohttp
import asyncio

uels = [
    'https://up.enterdesk.com/edpic/68/25/6c/68256cd943caf195804466f1c9247810.jpg',
    'https://up.enterdesk.com/edpic/c1/fa/b7/c1fab77af236ae20a6e725e7c30c8cf8.jpg',
    'https://up.enterdesk.com/edpic/e3/ce/24/e3ce242438d08fe0193b5565b8f40d5e.jpg'

]

async def aiodownload(url):
    name = url.rsplit("/",1)[1] #切最后面的获取url名字
    # 在异步里面必须加async 是规定 with是可以不用谢结束
    async  with aiohttp.ClientSession() as session:
        async with session.get(url) as resp:
            with open("img/"+name,mode="wb") as f:
                f.write(await resp.content.read()) #读取内容是异步的 需要await挂起
    print(name,"搞定")

    # 类似于之前requests
    # requests.get()    .post
    # s = aiohttp.ClientSession()
    # s.get() .post()

#     发送请求
# 得到图片内容
# 保存到文件

async def main():
    tasks = []
    for url in uels:
        tasks.append(aiodownload(url))

    await asyncio.wait(tasks)

if __name__ == '__main__':
    asyncio.run(main())