# -*- coding: utf-8 -*-
"""
@Time    : 2024/6/20 14:19 
@Author  : ZhangShenao 
@File    : crawl_concurrency.py 
@Desc    : 多线程爬虫
"""
import concurrent.futures
import time

import requests


# 从指定页面url中下载数据
def download_page(url):
    response = requests.get(url)
    print(f'download {len(response.content)} from {url}')


# 从所有页面中下载数据
def download_all_pages(urls):
    # 创建线程池,线程数量max_workers=len(urls)
    with concurrent.futures.ThreadPoolExecutor(max_workers=len(urls)) as executor:
        # 调用executor.map方法,将urls中的每个url传入download_page函数,并在提交到线程池中执行
        executor.map(download_page, urls)


if __name__ == '__main__':
    sites = ['https://en.wikipedia.org/wiki/Portal:Arts', 'https://en.wikipedia.org/wiki/Portal:History',
             'https://en.wikipedia.org/wiki/Portal:Society', 'https://en.wikipedia.org/wiki/Portal:Biography',
             'https://en.wikipedia.org/wiki/Portal:Mathematics', 'https://en.wikipedia.org/wiki/Portal:Technology',
             'https://en.wikipedia.org/wiki/Portal:Geography', 'https://en.wikipedia.org/wiki/Portal:Science',
             'https://en.wikipedia.org/wiki/Computer_science',
             'https://en.wikipedia.org/wiki/Python_(programming_language)',
             'https://en.wikipedia.org/wiki/Java_(programming_language)', 'https://en.wikipedia.org/wiki/PHP',
             'https://en.wikipedia.org/wiki/Node.js', 'https://en.wikipedia.org/wiki/The_C_Programming_Language',
             'https://en.wikipedia.org/wiki/Go_(programming_language)']

    start_time = time.perf_counter()
    download_all_pages(sites)
    end_time = time.perf_counter()
    print(f'Crawl {len(sites)} pages in {end_time - start_time} seconds')
