# 1. 准备一个函数
# 2. 创建一个线程
# 3. 启动线程
# 4. 等待结束
import requests
import threading

urls = [f"https://www.cnblogs.com/#p{page}" for page in range(1, 50+1)]

def craw(url):
    r = requests.get(url)
    print(len(r.text))

def single_thread():
    for url in urls:
        craw(url)

def multi_thread():
    threads = []
    for url in urls:
        threads.append(threading.Thread(target=craw, args=(url,)))

    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()



