import os
import requests
from lxml import etree
from concurrent.futures import ThreadPoolExecutor, as_completed
from PIL import Image
import time

# 设置保存路径
save_folder = "D:/dog"
if not os.path.exists(save_folder):
    os.makedirs(save_folder)

# 设置请求头
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0'
}

# 设置图片缩放尺寸
target_size = (800, 600)  # 你可以根据需要调整尺寸

# 设置最大重试次数
max_retries = 3

# 设置要下载的图片数量
max_images_to_download = 4000

def download_image(img_src, img_name):
    retries = 0
    while retries < max_retries:
        try:
            # 获取图片内容
            img_data = requests.get(url=img_src, headers=header, timeout=10).content

            # 构建图片保存路径
            img_path = os.path.join(save_folder, img_name)

            # 保存图片
            with open(img_path, 'wb') as fp:
                fp.write(img_data)

            # 缩放图片
            img = Image.open(img_path)
            img = img.resize(target_size, Image.Resampling.LANCZOS)  # 使用 LANCZOS 替代 ANTIALIAS
            img.save(img_path)

            print(f"{img_name} 下载成功")
            return True
        except requests.exceptions.RequestException as e:
            print(f"{img_name} 下载失败，重试中... ({retries + 1}/{max_retries})")
            retries += 1
            time.sleep(2)  # 等待2秒后重试
        except Exception as e:
            print(f"{img_name} 下载失败: 错误原因: {str(e)}")
            return False
    return False

def fetch_images_from_page(page_url):
    """从单个页面中提取图片链接"""
    try:
        response = requests.get(url=page_url, headers=header)
        response.encoding = 'utf-8'
        page_text = response.text
        tree = etree.HTML(page_text)

        # 查找所有包含图片的 <figure> 标签
        figure_list = tree.xpath('//div[@class="gallery_inner"]/figure')
        image_urls = []

        for figure in figure_list:
            try:
                img_src = figure.xpath('./a/img/@data-src')[0]
                img_src = 'https:' + img_src
                image_urls.append(img_src)
            except IndexError:
                print('未成功匹配到字段')
            except Exception as e:
                print(f"解析失败: 错误原因: {str(e)}")

        return image_urls
    except Exception as e:
        print(f"页面 {page_url} 请求失败: {str(e)}")
        return []

def main():
    base_url = 'https://www.vcg.com/creative-image/gou/'  # 替换为搜索“狗”的URL
    downloaded_count = 0

    # 使用线程池进行多线程下载
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = []
        page = 1

        while downloaded_count < max_images_to_download:
            # 构建分页URL
            page_url = f"{base_url}?page={page}"
            print(f"正在爬取第 {page} 页: {page_url}")

            # 获取当前页的图片链接
            image_urls = fetch_images_from_page(page_url)
            if not image_urls:
                print(f"第 {page} 页没有图片，爬取结束。")
                break

            # 提交下载任务到线程池
            for img_src in image_urls:
                if downloaded_count >= max_images_to_download:
                    break

                img_name = img_src.split('/')[-1]
                future = executor.submit(download_image, img_src, img_name)
                futures.append(future)
                downloaded_count += 1

            page += 1

        # 等待所有任务完成
        for future in as_completed(futures):
            try:
                future.result()
            except Exception as e:
                print(f"任务执行失败: {str(e)}")

    print("所有图片下载尝试完毕！")

if __name__ == '__main__':
    main()