# 引入依赖
from concurrent.futures import ProcessPoolExecutor
import time, os
import requests


def task(name):
    print('%s %s is running' % (name, os.getpid()))
    # print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))


if __name__ == '__main__':
    p = ProcessPoolExecutor(4)  # 设置

    for i in range(10):
        # 同步调用方式，不仅要调用，还要等返回值
        obj = p.submit(task, "进程pid：")  # 传参方式(任务名，参数),参数使用位置或者关键字参数
        res = obj.result()
    p.shutdown(wait=True)  # 关闭进程池的入口，等待池内任务运行结束
    print("主")


################
################
# 另一个同步调用的demo
def get(url):
    print('%s GET %s' % (os.getpid(), url))
    time.sleep(3)
    response = requests.get(url)
    if response.status_code == 200:
        res = response.text
    else:
        res = "下载失败"
    return res  # 有返回值


def parse(res):
    time.sleep(1)
    print("%s 解析结果为%s" % (os.getpid(), len(res)))


if __name__ == "__main__":
    urls = [
        'https://www.baidu.com',
        'https://www.sina.com.cn',
        'https://www.tmall.com',
        'https://www.jd.com',
        'https://www.python.org',
        'https://www.openstack.org',
        'https://www.baidu.com',
        'https://www.baidu.com',
        'https://www.baidu.com',
    ]
    p = ProcessPoolExecutor(9)
    l = []
    start = time.time()
    for url in urls:
        future = p.submit(get, url)  # 需要等结果，所以是同步调用
        l.append(future)

    # 关闭进程池，等所有的进程执行完毕
    p.shutdown(wait=True)
    for future in l:
        parse(future.result())
    print('完成时间:', time.time() - start)
    # 完成时间: 13.209137678146362