import aiohttp
import asyncio
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from PIL import Image
from io import BytesIO
import os
import time

# 设置目标网站的基础URL
base_url = "https://www.hippopx.com/en/query?q=landscape"
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0'
}

# 创建目录来保存图片
directory = './landscape_images'
if not os.path.exists(directory):
    os.makedirs(directory)
    print(f"文件夹 '{directory}' 已创建")
else:
    print(f"文件夹 '{directory}' 已存在")

async def fetch(session, url):
    retry = 3
    for i in range(retry):
        try:
            async with session.get(url, headers=headers, timeout=10) as response:
                return await response.text()
        except Exception as e:
            if i < retry - 1:  # 如果不是最后一次尝试，则等待一段时间后重试
                await asyncio.sleep(2 ** i)  # 指数退避策略
                continue
            else:
                print(f"请求失败: {e}")
                return None

async def fetch_image_urls(session, page_url):
    html = await fetch(session, page_url)
    if not html:
        return []

    soup = BeautifulSoup(html, "html.parser")
    pic_container = soup.find('ul', class_='main_list')  # 根据实际情况修改
    if not pic_container:
        print(f"未找到图片容器: {page_url}")
        return []

    img_tags = []
    for a in pic_container.find_all('a'):
        img_tag = a.find('img')
        if img_tag and img_tag.get('src'):
            img_tags.append(urljoin(page_url, img_tag.get('src')))

    if not img_tags:
        print(f"未找到图片标签: {page_url}")

    return img_tags

async def download_and_resize_image(session, image_url, save_path):
    retry = 3
    for i in range(retry):
        try:
            async with session.get(image_url, headers=headers, timeout=10) as response:
                if response.status == 200:
                    img = Image.open(BytesIO(await response.read()))
                    img_resized = img.resize((600, 480))
                    img_resized.save(save_path)
                    print(f"图片已下载并调整大小保存到 {save_path}")
                    break
                else:
                    print(f"下载图片失败，状态码：{response.status}")
                    if i < retry - 1:
                        await asyncio.sleep(2 ** i)  # 指数退避策略
        except Exception as e:
            print(f"下载图片出错: {e}")
            if i < retry - 1:
                await asyncio.sleep(2 ** i)  # 指数退避策略
            else:
                print(f"放弃下载 {image_url}：达到最大重试次数")

async def main(max_images=4000):
    downloaded_count = 0
    page = 1

    async with aiohttp.ClientSession() as session:
        while downloaded_count < max_images:
            page_url = f"{base_url}&page={page}"
            print(f"正在处理页面: {page_url}")

            img_urls = await fetch_image_urls(session, page_url)
            if not img_urls:
                print("无法找到更多图片，停止爬取。")
                break

            tasks = []
            for i, img_url in enumerate(img_urls):
                if downloaded_count >= max_images:
                    break

                save_path = os.path.join(directory, f"landscape_images_{downloaded_count + 1}.jpg")
                tasks.append(download_and_resize_image(session, img_url, save_path))
                downloaded_count += 1

            await asyncio.gather(*tasks)

            page += 1

    print(f"总共下载了 {downloaded_count} 张图片.")

if __name__ == "__main__":
    start_time = time.time()
    asyncio.run(main())
    end_time = time.time()
    print(f"爬取完成，总耗时: {end_time - start_time:.2f} 秒")