"""
    @Author  ：思念 
    @File    ：10.使用线程池方式爬取豆瓣250.py
    @Date    ：2025/1/5 19:14 
"""
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
from lxml import etree

url = 'https://movie.douban.com/top250?start={}&filter='

headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}


def get_html(page):
    response = requests.get(url.format(page * 25), headers=headers)
    tree = etree.HTML(response.text)
    result = tree.xpath("//div[@class='hd']/a/span[1]/text()")
    return result


if __name__ == '__main__':
    with ThreadPoolExecutor(max_workers=5) as executor:
        # for page in range(10):
        #     executor.submit(get_html, page)
        # as_completed根据线程的完成情况返回结果，线程的完成情况是异步的，所以需要循环等待
        futures = [executor.submit(get_html, page) for page in range(10)]
        for future in as_completed(futures):
            print(future.result())
        print("程序结束")