# 引入线程池包
from concurrent.futures import ThreadPoolExecutor
import time

def crawl(url):
    msg = f'请求地址为：{url}'
    time.sleep(3)
    # print(msg)
    return msg;


if __name__ == '__main__':
    base_url = 'http://www.baidu.com?page={}'
    # 创建包含3条线程的线程池 # 使用with上下问管理器，就不用管如何关线程池了
    with ThreadPoolExecutor(3) as pool:
        for i in range(5):
            # 向线程池提交一个任务
            future1 = pool.submit(crawl, base_url.format(i))
            # 判断future1的任务是否结束
            # print(future1.done())
            # 获取future1的结果
            print(future1.result())
