# import multiprocessing
#
# def process(index):
#     print(f'Process:{index}')
#
# if __name__ == '__main__':
#     for i in range(5):
#         p = multiprocessing.Process(target = process,args = (i,))
#         p.start()

# import multiprocessing
#
# def process(index):
#     print(f'Process{index}')
# if __name__ == '__main__':
#     for i in range(1,10):
#         pro = multiprocessing.Process(target = process,args = (i,))
#         pro.start()

# import multiprocessing
# def process(index):
#     print(f'process{index}')
# if __name__ == '__main__':
#     for i in range(1,50):
#         pro = multiprocessing.Process(target = process,args = (i,))
#         pro.start()

# import multiprocessing
# import time
#
#
# def process(index):
#     # print(multiprocessing.cpu_count())
#     print(f'process{index}')
# if __name__ == '__main__':
#     start = time.time()
#     for i in range(1,100):
#
#         pro = multiprocessing.Process(target = process,args = (i,))
#         pro.start()
#     print(time.time() - start)


#  进程池

from multiprocessing import Pool
import requests
def scrape(url):
    try:
        requests.get(url)
        print(f'URL{url}scraped')
    except requests.ConnectionError:
        print(f'URL{url} not scraped',)

if __name__ == '__main__':
    pool = Pool(processes = 3)
    urls = [
        'http://www.baidu.com',
        'http://www.meituan.com',
        'http://www.blog.csdn.net',
        'http://sfsdfsdf.net'
    ]

    pool.map(scrape,urls)
    pool.close()


