import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse

def is_valid_url(url):
    """
    检查是否是一个有效的 URL
    """
    parsed = urlparse(url)
    return bool(parsed.netloc) and bool(parsed.scheme)

def get_all_images(url):
    """
    获取页面中所有的图片 URL
    """
    # 设置请求头，模拟浏览器访问
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0 Safari/537.36'
    }

    try:
        session = requests.Session()
        response = session.get(url, headers=headers)
        response.raise_for_status()  # 检查请求是否成功
    except Exception as e:
        print(f"无法访问页面 {url}，错误：{e}")
        return []

    soup = BeautifulSoup(response.text, "html.parser")

    img_urls = []
    for img in soup.find_all("img"):
        img_url = img.attrs.get("src") or img.attrs.get("data-src")
        if not img_url:
            continue

        # 处理相对路径
        img_url = urljoin(url, img_url)

        if is_valid_url(img_url):
            img_urls.append(img_url)

    return img_urls

def download_images(img_urls, save_dir="downloaded_images"):
    """
    下载所有图片到本地目录
    """
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    for i, url in enumerate(img_urls):
        try:
            print(f"正在下载第 {i+1} 张图片: {url}")
            headers = {'User-Agent': 'Mozilla/5.0'}
            response = requests.get(url, headers=headers, stream=True)
            response.raise_for_status()

            # 提取文件名
            parsed = urlparse(url)
            # 按顺序命名，比如 image_001.jpg
            filename = f"image_{i+1:03d}.jpg"
            save_path = os.path.join(save_dir, filename)

            with open(save_path, "wb") as f:
                for chunk in response.iter_content(1024):
                    f.write(chunk)

            print(f"图片已保存到: {save_path}")

        except Exception as e:
            print(f"下载失败: {url}，错误：{e}")

if __name__ == "__main__":
    # 🔧 请将此处替换为你当前 Edge 浏览器打开的页面 URL
    page_url = "https://www.cnblogs.com/abc1069/p/16296050.html"
    save_dir = "E:\\files\\download\\images"
    if not page_url.startswith(('http://', 'https://')):
        print("请输入有效的 URL（以 http:// 或 https:// 开头）")
    else:
        print(f"开始抓取页面: {page_url}")
        images = get_all_images(page_url)
        print(f"共找到 {len(images)} 张图片")

        if images:
            download_images(images, save_dir)
        else:
            print("未找到任何图片")