import os
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from PIL import Image
from io import BytesIO

# 设置目标网站的基础URL
base_url = "https://www.hippopx.com/en/query?q=cat&page="  # 基础URL
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
}

# 创建目录来保存图片
if not os.path.exists('downloaded_images'):
    os.makedirs('downloaded_images')


# 下载图片并修改尺寸的函数
def download_and_resize_image(i, url):
    try:
        print(f"正在下载图片: {url}")
        img_response = requests.get(url, headers=headers)

        if img_response.status_code == 200:
            # 获取图片的文件名（使用全局的计数器`image_counter`来确保文件名不重复）
            img_name = f"downloaded_images/cat_{i + 1}.jpg"

            # 使用Pillow打开图片并修改尺寸
            img = Image.open(BytesIO(img_response.content))
            img_resized = img.resize((600, 480))  # 修改图片大小为600x480

            # 保存修改后的图片
            img_resized.save(img_name)
            print(f"图片下载并修改成功：{img_name}")
        else:
            print(f"下载失败，状态码：{img_response.status_code}")
    except Exception as e:
        print(f"下载图片时出错：{e}")


# 爬取指定页数
def scrape_page(page_number, start_index):
    URL = f"{base_url}{page_number}"
    response = requests.get(URL, headers=headers)
    print(f"状态码: {response.status_code}")

    if response.status_code == 200:
        print(f"请求成功，正在爬取第 {page_number} 页！")
        soup = BeautifulSoup(response.text, 'html.parser')
        image_tags = soup.find_all('img')  # 查找所有 <img> 标签

        # 提取图片URL
        image_urls = []
        for img in image_tags:
            img_url = img.get('src')
            if img_url:
                # 如果图片链接是相对路径，进行拼接
                if img_url.startswith('/'):
                    img_url = URL + img_url
                image_urls.append(img_url)

        # 使用多线程加速图片下载和修改尺寸
        with ThreadPoolExecutor(max_workers=5) as executor:
            # 为每个图片下载任务提交线程，并使用 `start_index` 来确保图片文件名的连续性
            for i, url in enumerate(image_urls, start=start_index):
                executor.submit(download_and_resize_image, i, url)

        # 返回下一页的起始索引
        return start_index + len(image_urls)

    else:
        print(f"请求失败，状态码：{response.status_code}")
        return start_index


# 初始化图片计数器
image_counter = 0

# 爬取页面2和页面3，并确保文件名从图片计数器接着增加
for page in range(2, 61):  # 从第2页到第3页
    image_counter = scrape_page(page, image_counter)
