import os
from os import mkdir
import threading
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from PIL import Image
from io import BytesIO
from urllib.parse import urljoin
import logging

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
}

# 下载图片并修改尺寸的函数
def download_and_resize_image(i, url, aninaim):
    try:
        logging.info("正在下载图片: %s", url)
        img_response = requests.get(url, headers=headers, timeout=10)

        if img_response.status_code == 200:
            img_name = f"downloaded_images_1/{aninaim}/{aninaim}_{i}.jpg"

            if os.path.exists(img_name):
                logging.info("图片已存在，跳过下载: %s", img_name)
                return

            img = Image.open(BytesIO(img_response.content))
            img_resized = img.resize((224, 224))  # 修改图片大小为224x224
            img_resized.save(img_name, format='JPEG', quality=95)
            logging.info("图片下载并修改成功： %s", img_name)
        else:
            logging.warning("下载失败，状态码：%d", img_response.status_code)
        img_response.close()
    except Exception as e:
        logging.error("下载图片时出错：%s", str(e))



def scrape_page(page_number, start_index, aninaim):
    base_url = f"https://www.hippopx.com/en/query?q={aninaim}&page="
    URL = f"{base_url}{page_number}"
    try:
        response = requests.get(URL, headers=headers, timeout=10)
        response.raise_for_status()
        logging.info("状态码: %d", response.status_code)

        soup = BeautifulSoup(response.text, 'html.parser')
        image_tags = soup.find_all('img')

        image_urls = []
        for img in image_tags:
            img_url = img.get('src')
            if img_url:
                img_url = urljoin(base_url, img_url)
                image_urls.append(img_url)

        with ThreadPoolExecutor(max_workers=20) as executor:
            for i, url in enumerate(image_urls, start=start_index):
                executor.submit(download_and_resize_image, i, url, aninaim)
        response.close()
        return start_index + len(image_urls)
    except requests.exceptions.RequestException as e:
        logging.error("请求失败：%s", str(e))
        return start_index


def fenleipaqu(aninaim):
    if os.path.exists(f"./downloaded_images_1/{aninaim}"):
        os.makedirs(f"./downloaded_images_1/{aninaim}", exist_ok=True)
    image_counter = 0
    for page in range(2, 136):
        image_counter = scrape_page(page, image_counter, aninaim)

if __name__ == '__main__':
    aninaim_list = ['dog', "cat","horse", "fish","landscape"]
    threads = [threading.Thread(target=fenleipaqu, args=(aninaim,)) for aninaim in aninaim_list]
    for thread in threads:
        thread.start()
    for thread in threads:
        thread.join()
    print("所有线程都完成了")