import requests
import re
import os
import threading


# 过滤非法字符的函数
def clean_filename(filename):
    return re.sub(r'[\/:*?"<>|]', '', filename)


# 下载图片的函数
def download_image(image_link, image_name, folder_path):
    try:
        response = requests.get("https://pic.netbian.com" + image_link, headers=headers)
        if response.status_code == 200:
            with open(os.path.join(folder_path, "{}.jpg".format(image_name)), "wb") as img:
                img.write(response.content)
            print("{}.jpg 获取成功".format(image_name))
    except Exception as e:
        print("下载图片失败:", e)


# 爬取单页图片的函数
def crawl_page(page_number):
    if page_number == 1:
        url = "https://pic.netbian.com/index.html"
    else:
        url = "https://pic.netbian.com/index_{}.html".format(page_number)
    response = requests.get(url=url, headers=headers)
    response.encoding = response.apparent_encoding
    parr = re.compile('src="(/u.*?)".alt="(.*?)"')
    image = re.findall(parr, response.text)
    path = "彼岸图网图片获取-{}".format(page_number)

    if not os.path.isdir(path):  # 判断是否存在该文件夹，若不存在则创建
        os.mkdir(path)  # 创建

    # 对列表进行遍历
    for k in image:
        link = k[0]  # 获取链接
        name = clean_filename(k[1])  # 获取并清理名字
        threading.Thread(target=download_image, args=(link, name, path)).start()


# 设置请求头
headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"
}

# 启动多线程爬取
for i in range(1, 10):
    threading.Thread(target=crawl_page, args=(i,)).start()