import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
import os

def craw(url):
    r = requests.get(url)
    results = parser(r.text)  # 解析网页并提取所需信息
    for result in results:
        print("当前进程编号：", os.getpid())  # 打印当前进程编号
        print(result)


def parser(html):
    soup = BeautifulSoup(html, 'html.parser')
    links = soup.find_all('a', 'post-item-title') # 标签，class
    return [(link['href'], link.get_text()) for link in links]

if __name__ == '__main__':
    urls = [
        f"https://www.cnblogs.com/#p{page}"
        for page in range(1, 10 + 1)
    ]

    with Pool(processes=4) as pool:  # 创建一个包含4个进程的进程池
        pool.map(craw, urls)  # 使用进程池并发获取网页

