import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
import logging
import hashlib
import re

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def sanitize_filename(filename):
    # 替换非法字符
    sanitized = re.sub(r'[\\/*?:"<>|]', "", filename)
    return sanitized

def download_image(url, folder_path, headers):
    try:
        # 获取图片内容
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            # 验证响应内容类型
            content_type = response.headers.get('Content-Type', '')
            if 'image' not in content_type:
                logging.warning(f"跳过非图片内容: {url}, 内容类型: {content_type}")
                return

            # 提取文件扩展名
            ext = '.' + content_type.split('/')[1] if '/' in content_type else ''

            # 提取图片名称并确保唯一性
            image_name = url.split('/')[-1]
            unique_name = f"{hashlib.md5(image_name.encode()).hexdigest()}{ext}"
            file_path = Path(folder_path) / sanitize_filename(unique_name)

            # 保存图片
            with open(file_path, 'wb') as file:
                file.write(response.content)
            logging.info(f"图片已保存: {file_path}")
        else:
            logging.error(f"无法下载图片: {url}, 状态码: {response.status_code}")
    except requests.exceptions.RequestException as e:
        logging.error(f"请求失败: {url}, 错误: {e}")
    except IOError as e:
        logging.error(f"文件写入失败: {file_path}, 错误: {e}")

def get_next_page_url(soup, base_url):
    # 查找下一页的链接
    next_button = soup.find('a', {'class': 'sb_pagN'})
    if next_button and 'href' in next_button.attrs:
        next_url = urljoin(base_url, next_button['href'])
        return next_url
    return None

def crawl_images(base_url, folder_path, headers, num_pages=1):
    # 创建存储文件夹（如果不存在）
    Path(folder_path).mkdir(parents=True, exist_ok=True)

    current_url = base_url
    for page in range(num_pages):
        # 获取网页内容
        try:
            response = requests.get(current_url, headers=headers)
            soup = BeautifulSoup(response.text, 'html.parser')

            # 查找所有图片标签
            img_tags = soup.find_all('img')
            img_urls = set()

            for img in img_tags:
                src = img.get('src') or img.get('data-src')
                if src:
                    # 处理相对路径
                    img_url = urljoin(current_url, src)
                    img_urls.add(img_url)

            max_workers = min(32, os.cpu_count() + 4)  # 根据CPU核心数动态调整线程池大小
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                futures = [executor.submit(download_image, img_url, folder_path, headers) for img_url in img_urls]
                for future in as_completed(futures):
                    future.result()  # 捕获异常

            # 获取下一页的URL
            current_url = get_next_page_url(soup, base_url)
            if not current_url:
                logging.info("没有更多页面可爬取")
                break

        except requests.exceptions.RequestException as e:
            logging.error(f"请求失败: {current_url}, 错误: {e}")

# 示例使用
base_url = 'https://www.vcg.com/creative-image/shijuezhongguo/'  # 要爬取的网页地址
folder_path = './downloaded_images'  # 保存图片的文件夹路径

# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
}

# 爬取前5页图片
crawl_images(base_url, folder_path, headers, num_pages=5)
