import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin

def create_directory(dir_name):
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)

def download_image(url, folder_path):
    response = requests.get(url)
    if response.status_code == 200:
        image_content = response.content
        file_path = os.path.join(folder_path, url.split("/")[-1])
        with open(file_path, 'wb') as f:
            f.write(image_content)
        print(f"Downloaded {url}")
    else:
        print(f"Failed to retrieve {url}")

def get_images_from_page(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')
    img_tags = soup.find_all('img')
    img_urls = [urljoin(url, img['src']) for img in img_tags if 'src' in img.attrs]
    return img_urls

def main(base_url, folder_name='downloaded_images'):
    create_directory(folder_name)
    img_urls = get_images_from_page(base_url)
    for img_url in img_urls:
        download_image(img_url, folder_name)

if __name__ == "__main__":
    base_url = input("请输入要爬取图片的网页URL: ")
    main(base_url)