import os
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from PIL import Image
from io import BytesIO
from urllib.parse import urljoin
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 设置目标网站的基础URL
base_url = "https://www.hippopx.com/en/query?q=horse&page="
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
}

# 创建目录来保存图片
if not os.path.exists('downloaded_images'):
    os.makedirs('downloaded_images')


# 下载图片并修改尺寸的函数
def download_and_resize_image(i, url):
    try:
        logging.info("正在下载图片: %s", url)
        img_response = requests.get(url, headers=headers, timeout=10)

        if img_response.status_code == 200:
            img_name = f"downloaded_images/horse_{i}.jpg"

            if os.path.exists(img_name):
                logging.info("图片已存在，跳过下载: %s", img_name)
                return

            img = Image.open(BytesIO(img_response.content))
            img_resized = img.resize((224, 224))  # 修改图片大小为224x224
            img_resized.save(img_name, format='JPEG', quality=95)
            logging.info("图片下载并修改成功： %s", img_name)
        else:
            logging.warning("下载失败，状态码：%d", img_response.status_code)
        img_response.close()
    except Exception as e:
        logging.error("下载图片时出错：%s", str(e))


# 爬取指定页数
def scrape_page(page_number, start_index):
    URL = f"{base_url}{page_number}"
    try:
        response = requests.get(URL, headers=headers, timeout=10)
        response.raise_for_status()
        logging.info("状态码: %d", response.status_code)

        soup = BeautifulSoup(response.text, 'html.parser')
        image_tags = soup.find_all('img')

        image_urls = []
        for img in image_tags:
            img_url = img.get('src')
            if img_url:
                img_url = urljoin(base_url, img_url)
                image_urls.append(img_url)

        with ThreadPoolExecutor(max_workers=20) as executor:
            for i, url in enumerate(image_urls, start=start_index):
                executor.submit(download_and_resize_image, i, url)

        return start_index + len(image_urls)
    except requests.exceptions.RequestException as e:
        logging.error("请求失败：%s", str(e))
        return start_index


# 初始化图片计数器
image_counter = 0

# 爬取页面61到135，并确保文件名从图片计数器接着增加
for page in range(2, 61):
    image_counter = scrape_page(page, image_counter)