import concurrent.futures
import blog_spider

#carw
#线程池 map方式：把列表准备好，并按顺序返回数据
with concurrent.futures.ThreadPoolExecutor() as pool:
    htmls = pool.map(blog_spider.craw, blog_spider.urls)
    # 拼接
    htmls = list(zip(blog_spider.urls, htmls))
    for url, html in htmls:
        print(url, len(html))

print('carw over')

#parse
#线程池 submit：功能更强大，两种循环返回结果。
with concurrent.futures.ThreadPoolExecutor() as pool:
    futures = {}
    for url,html in htmls:
        future = pool.submit(blog_spider.parse, html)
        futures[future] = url

    # 按顺序返回
    for future, url in futures.items():
        print(url, future.result())

    # 哪个先执行完，哪个先返回
    # for future in concurrent.futures.as_completed(futures):
    #     url = futures[future]
    #     print(url,future.result())