import os  
import requests  
from bs4 import BeautifulSoup  
from urllib.parse import urljoin  
  
def fetch_image_urls(url):  
    # 发送HTTP请求获取网页内容  
    response = requests.get(url)  
    response.raise_for_status()  # 检查请求是否成功  
  
    # 使用BeautifulSoup解析HTML内容  
    soup = BeautifulSoup(response.text, 'html.parser')  
  
    # 提取图片URL  
    image_urls = []  
    for img in soup.find_all('img'):  
        img_url = img.get('src')  
        # 如果图片URL是相对路径，则将其转换为绝对路径  
        if not img_url.startswith(('http://', 'https://')):  
            img_url = urljoin(url, img_url)  
        image_urls.append(img_url)  
  
    return image_urls  
  
def download_images(image_urls, download_folder):  
    # 创建下载文件夹（如果不存在）  
    if not os.path.exists(download_folder):  
        os.makedirs(download_folder)  
  
    # 下载图片  
    for idx, img_url in enumerate(image_urls):  
        response = requests.get(img_url, stream=True)  
        response.raise_for_status()  
  
        # 获取图片的文件名（从URL中提取，或生成一个唯一的文件名）  
        file_name = os.path.join(download_folder, f'image_{idx + 1}.jpg')  
  
        # 保存图片到本地  
        with open(file_name, 'wb') as file:  
            for chunk in response.iter_content(1024):  
                file.write(chunk)  
  
        print(f'Downloaded {file_name}')  
  
def main():  
    url = 'https://pic.netbian.com/new/index_2.html'  # 替换为你要爬取的网页URL  
    download_folder = 'C:/Users/south/Desktop/test'  
  
    # 获取图片URL  
    image_urls = fetch_image_urls(url)  
  
    # 下载图片  
    download_images(image_urls, download_folder)  
  
if __name__ == '__main__':  
    main()