import concurrent.futures as futures
from blog_spider import urls, crawl, parse

with futures.ThreadPoolExecutor(max_workers=5) as pool:
    # map 会按照urls的顺序返回结果
    htmls = pool.map(crawl, urls)
    results = list(zip(urls, htmls))
    for url, html in results:
        print('爬了：', url)


with futures.ThreadPoolExecutor(max_workers=5) as pool:
    # submit 会按照urls的顺序返回结果
    res = {}
    for url, html in results:
        future = pool.submit(parse, html)
        res[future] = url

    # for future, url in res.items():
    #     print(f'解析了 {url} ：结果是:', future.result())

    for future in futures.as_completed(res):
        url = res[future]
        print(f'解析了 {url} : 结果是', future.result()) # 按完成顺序返回结果
