#通过异步io下载三个网站的首页

import asyncio
import aiohttp
import sys,os,time


WEB_URLS = {
    "baidu":'https://www.baidu.com/',
    "qq":'http://www.qq.com/',
    "y163":'http://www.163.com/'
}

ROOT = os.path.join(os.path.split(__file__)[0],'downloads')


def save_web(key,html):
    url = os.path.join(ROOT,key.lower()+'.html')
    with open(url,'wb') as fb:        
        fb.write(html)
    return url

@asyncio.coroutine
def get_web(key):
    resp = yield from aiohttp.request("GET",WEB_URLS[key]) #最内层的子生成器 aiohttp包自己实现的
    html = yield from resp.read() #读取resp的内容也需要异步
    return html


@asyncio.coroutine
def download_one(key):
    html = yield from get_web(key)
    url = save_web(key,html)
    print(url," is saved ..")

def download_many(key_list):
    loop = asyncio.get_event_loop()
    to_do = [download_one(key) for key in key_list]
    coro = asyncio.wait(to_do) #返回的是一个协程或者生成器 相当于委派生成器
    print('download_many coro==',type(coro))
    #res是已经结束的 _ 未结束的
    res,_ = loop.run_until_complete(coro) #参数接收协程或者期物对象(Future或Task对象)  调用方
    loop.close()
    print("res==",res,list(res)) #里面的元素是Task对象
    return len(res) 

def main(fn):
    t0 = time.time()
    key_list = ['baidu','qq','y163']
    
    n = fn(key_list)
    msg = "下载{}个html文件耗时{t}秒".format(n,t = time.time()-t0)
    print(msg)

if __name__ == "__main__":
    main(download_many)