import requests
from bs4 import BeautifulSoup
import os
import time
from urllib.parse import urljoin
import json

def download_image(url, save_path, max_retries=3):
    """下载图片并保存"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Connection': 'keep-alive',
    }
    
    for attempt in range(max_retries):
        try:
            response = requests.get(url, headers=headers, stream=True, timeout=10)
            response.raise_for_status()  # 检查响应状态
            
            if response.status_code == 200:
                # 检查内容类型
                if 'image' in response.headers.get('content-type', ''):
                    with open(save_path, 'wb') as f:
                        for chunk in response.iter_content(chunk_size=8192):
                            if chunk:
                                f.write(chunk)
                    return True
                else:
                    print(f"非图片内容: {url}")
                    return False
                    
        except requests.exceptions.RequestException as e:
            print(f"下载失败 (尝试 {attempt + 1}/{max_retries}): {url}")
            print(f"错误信息: {e}")
            if attempt < max_retries - 1:
                time.sleep(2)  # 失败后等待2秒再重试
            continue
            
    return False

def get_image_urls(search_term, max_images=2000):
    """获取 Wikimedia Commons 的图片 URL"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    }
    
    base_url = "https://commons.wikimedia.org/w/api.php"
    image_urls = []
    continue_token = None
    
    while len(image_urls) < max_images:
        params = {
            "action": "query",
            "format": "json",
            "generator": "search",
            "gsrnamespace": "6",
            "gsrsearch": f"filetype:bitmap {search_term}",
            "gsrlimit": "100",
            "prop": "imageinfo",
            "iiprop": "url",
        }
        
        if continue_token:
            params["gsroffset"] = continue_token
            
        try:
            response = requests.get(base_url, params=params, headers=headers)
            response.raise_for_status()
            data = response.json()
            
            if "query" in data and "pages" in data["query"]:
                for page in data["query"]["pages"].values():
                    if len(image_urls) >= max_images:
                        break
                        
                    if "imageinfo" in page and page["imageinfo"]:
                        url = page["imageinfo"][0]["url"]
                        if url not in image_urls:
                            image_urls.append(url)
            
            if "continue" in data and "gsroffset" in data["continue"]:
                continue_token = data["continue"]["gsroffset"]
            else:
                break
                
            print(f"已获取 {len(image_urls)} 个URL...")
            
        except Exception as e:
            print(f"获取图片URL失败: {e}")
            time.sleep(5)  # 出错后等待5秒
            continue
            
        time.sleep(1)  # 增加请求间隔
    
    return image_urls[:max_images]

def main():
    # 创建保存目录
    categories = {
        "train": "train",
        "airplane": "airplane",
        "ship": "ship",
        "car": "car",
        "bicycle": "bicycle",
        "truck": "truck",
        "person": "person",
        "fire-hydrant": "fire hydrant",
        "traffic-light": "traffic light",
        "zebra-crossing": "zebra crossing"
    }
    
    for folder_name, search_term in categories.items():
        save_dir = os.path.join("dataset", "train", folder_name)
        os.makedirs(save_dir, exist_ok=True)
        
        print(f"\n开始下载 {search_term} 类别的图片...")
        
        # 获取图片URL
        image_urls = get_image_urls(search_term, max_images=2000)
        print(f"找到 {len(image_urls)} 张 {search_term} 的图片")
        
        # 下载图片
        success_count = 0
        for i, url in enumerate(image_urls):
            save_path = os.path.join(save_dir, f"{folder_name}_{i+1}.jpg")
            
            if not os.path.exists(save_path):
                if download_image(url, save_path):
                    success_count += 1
                    print(f"已下载 {success_count}/{len(image_urls)} ({i+1}): {url}")
                else:
                    print(f"下载失败 {i+1}/{len(image_urls)}: {url}")
                
                time.sleep(1)  # 增加下载间隔
            else:
                success_count += 1
                print(f"文件已存在，跳过 {i+1}/{len(image_urls)}")
        
        print(f"\n{folder_name} 类别完成，成功下载 {success_count} 张图片")

if __name__ == "__main__":
    main()
