#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pixabay 简单图片搜索器
快速获取真实可用的图片链接
"""

import requests
import json
import random
from urllib.parse import quote
from typing import List, Dict
import logging

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def search_images(query: str, limit: int = 10, return_details: bool = False) -> List:
    """
    搜索图片并返回URL列表或详细信息

    Args:
        query: 搜索关键词
        limit: 返回图片数量
        return_details: 是否返回详细信息，默认False只返回URL

    Returns:
        图片URL列表或详细信息列表
    """
    logger.info(f"搜索图片: {query}")

    if return_details:
        # 返回详细信息
        images = []

        # 方法1: Pixabay API
        pixabay_images = get_pixabay_images_detailed(query, min(limit, 20))
        images.extend(pixabay_images)

        # 方法2: 如果数量不够，使用Lorem Picsum
        # if len(images) < limit:
        #     remaining = limit - len(images)
        #     lorem_images = get_lorem_images_detailed(query, remaining)
        #     images.extend(lorem_images)

        logger.info(f"获取到 {len(images)} 张图片详细信息")
        return images[:limit]
    else:
        # 返回URL列表（保持向后兼容）
        urls = []

        # 方法1: Pixabay API
        pixabay_urls = get_pixabay_images(query, min(limit, 20))
        urls.extend(pixabay_urls)

        # 方法2: 如果数量不够，使用Lorem Picsum
        # if len(urls) < limit:
        #     remaining = limit - len(urls)
        #     lorem_urls = get_lorem_images(remaining)
        #     urls.extend(lorem_urls)

        logger.info(f"获取到 {len(urls)} 张图片")
        return urls[:limit]


def get_pixabay_images_detailed(query: str, limit: int) -> List[Dict]:
    """
    从Pixabay获取图片详细信息
    """
    try:
        url = "https://pixabay.com/api/"
        params = {
            'key': '52698639-8058593a761c9141bdb1bd334',  # 我的 pixabay's API key
            'q': query,
            'image_type': 'photo',
            'per_page': min(limit, 20),
            'safesearch': 'true',
            'category': 'all'
        }

        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36'
        }

        response = requests.get(url, params=params, headers=headers, timeout=10)

        if response.status_code == 200:
            data = response.json()
            images = []

            for item in data.get('hits', []):
                images.append({
                    'id': item.get('id'),
                    'url': item.get('webformatURL', ''),
                    'thumbnail': item.get('previewURL', ''),
                    'title': f"{query} - {item.get('tags', '')}",
                    'description': item.get('tags', ''),
                    'width': item.get('webformatWidth', 0),
                    'height': item.get('webformatHeight', 0),
                    'size': item.get('imageSize', 0),
                    'source': 'Pixabay',
                    'author': item.get('user', ''),
                    'views': item.get('views', 0),
                    'downloads': item.get('downloads', 0),
                    'likes': item.get('likes', 0),
                    'comments': item.get('comments', 0)
                })

            logger.info(f"Pixabay返回 {len(images)} 张图片详细信息")
            return images

    except Exception as e:
        logger.error(f"Pixabay图片搜索失败: {str(e)}")

    return []


def get_pixabay_images(query: str, limit: int) -> List[str]:
    """
    从Pixabay获取图片URL
    """
    try:
        url = "https://pixabay.com/api/"
        params = {
            'key': '9656065-a4094594c34f9ac14c7fc4c39',  # 公开演示key
            'q': query,
            'image_type': 'photo',
            'per_page': min(limit, 20),
            'safesearch': 'true',
            'category': 'all'
        }
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36'
        }
        
        response = requests.get(url, params=params, headers=headers, timeout=10)
        
        if response.status_code == 200:
            data = response.json()
            urls = []
            
            for item in data.get('hits', []):
                img_url = item.get('webformatURL', '')
                if img_url:
                    urls.append(img_url)
            
            logger.info(f"Pixabay返回 {len(urls)} 张图片")
            return urls
            
    except Exception as e:
        logger.error(f"Pixabay搜索失败: {str(e)}")
    
    return []


def get_lorem_images_detailed(query: str, count: int) -> List[Dict]:
    """
    获取Lorem Picsum随机图片详细信息
    """
    images = []

    for i in range(count):
        # 生成不同尺寸的随机图片
        width = random.choice([640, 800, 1024])
        height = random.choice([480, 600, 768])
        seed = random.randint(1, 1000)

        url = f'https://picsum.photos/{width}/{height}?random={seed}'
        images.append({
            'id': f'lorem_{seed}',
            'url': url,
            'thumbnail': f'https://picsum.photos/300/200?random={seed}',
            'title': f'{query} 随机图片 {i+1}',
            'description': f'Lorem Picsum提供的随机图片，主题: {query}',
            'width': width,
            'height': height,
            'size': random.randint(100000, 1000000),  # 100KB到1MB
            'source': 'Lorem Picsum',
            'author': 'Lorem Picsum',
            'views': random.randint(100, 10000),
            'downloads': random.randint(10, 1000),
            'likes': random.randint(5, 500),
            'comments': random.randint(0, 50)
        })

    logger.info(f"Lorem Picsum生成 {len(images)} 张随机图片详细信息")
    return images


def get_lorem_images(count: int) -> List[str]:
    """
    获取Lorem Picsum随机图片
    """
    urls = []
    
    for i in range(count):
        # 生成不同尺寸的随机图片
        width = random.choice([640, 800, 1024])
        height = random.choice([480, 600, 768])
        seed = random.randint(1, 1000)
        
        url = f'https://picsum.photos/{width}/{height}?random={seed}'
        urls.append(url)
    
    logger.info(f"Lorem Picsum生成 {len(urls)} 张随机图片")
    return urls


def download_image(url: str, filename: str) -> bool:
    """
    下载单张图片
    
    Args:
        url: 图片URL
        filename: 保存文件名
        
    Returns:
        是否下载成功
    """
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
            'Referer': 'https://pixabay.com/'
        }
        
        response = requests.get(url, headers=headers, timeout=30, stream=True)
        response.raise_for_status()
        
        with open(filename, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
        
        logger.info(f"下载成功: {filename}")
        return True
        
    except Exception as e:
        logger.error(f"下载失败 {url}: {str(e)}")
        return False


def save_search_results_to_json(query: str, images: List[Dict], filename: str = None) -> str:
    """
    将图片搜索结果保存为JSON文件

    Args:
        query: 搜索关键词
        images: 图片信息列表
        filename: 保存文件名（可选）

    Returns:
        保存的文件路径
    """
    import os
    import json
    from datetime import datetime

    # 生成文件名
    if not filename:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        safe_query = "".join(c for c in query if c.isalnum() or c in (' ', '-', '_')).rstrip()
        safe_query = safe_query.replace(' ', '_')
        filename = f"image_search_{safe_query}_{timestamp}.json"

    # 创建搜索结果数据
    search_data = {
        "search_info": {
            "query": query,
            "timestamp": datetime.now().isoformat(),
            "total_results": len(images),
            "search_type": "image"
        },
        "results": images,
        "summary": {
            "sources": list(set(img.get('source', 'Unknown') for img in images)),
            "total_images": len(images),
            "formats": list(set(img.get('url', '').split('.')[-1].lower() for img in images if img.get('url') and '.' in img.get('url', ''))),
            "average_width": sum(img.get('width', 0) for img in images) / len(images) if images else 0,
            "average_height": sum(img.get('height', 0) for img in images) / len(images) if images else 0
        }
    }

    # 确保目录存在
    json_dir = "search_results"
    if not os.path.exists(json_dir):
        os.makedirs(json_dir)

    filepath = os.path.join(json_dir, filename)

    # 保存JSON文件
    try:
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(search_data, f, ensure_ascii=False, indent=2)

        logger.info(f"图片搜索结果已保存到: {filepath}")
        return filepath

    except Exception as e:
        logger.error(f"保存JSON文件失败: {str(e)}")
        return ""


def batch_download(query: str, count: int = 5, folder: str = "images") -> List[str]:
    """
    批量搜索并下载图片
    
    Args:
        query: 搜索关键词
        count: 下载数量
        folder: 保存文件夹
        
    Returns:
        下载成功的文件路径列表
    """
    import os
    
    # 创建文件夹
    if not os.path.exists(folder):
        os.makedirs(folder)
    
    # 搜索图片
    urls = search_images(query, count)
    
    downloaded_files = []
    
    for i, url in enumerate(urls, 1):
        # 生成文件名
        ext = '.jpg'
        if '.png' in url.lower():
            ext = '.png'
        elif '.webp' in url.lower():
            ext = '.webp'
        
        filename = os.path.join(folder, f"{query}_{i:03d}{ext}")
        
        # 下载图片
        if download_image(url, filename):
            downloaded_files.append(filename)
        
        # 添加延迟
        import time
        time.sleep(1)
    
    return downloaded_files


def main():
    """
    主函数 - 命令行接口
    """
    import sys
    
    if len(sys.argv) < 2:
        print("使用方法:")
        print("  python simple_image_search.py <关键词>              # 搜索图片URL")
        print("  python simple_image_search.py <关键词> download     # 搜索并下载")
        print("  python simple_image_search.py <关键词> download <数量>  # 指定下载数量")
        print()
        print("示例:")
        print("  python simple_image_search.py food")
        print("  python simple_image_search.py food download")
        print("  python simple_image_search.py food download 8")
        sys.exit(1)
    
    query = sys.argv[1]
    
    if len(sys.argv) > 2 and sys.argv[2] == 'download':
        # 下载模式
        count = int(sys.argv[3]) if len(sys.argv) > 3 else 5
        
        print(f"🔍 搜索并下载: {query}")
        print(f"📥 下载数量: {count}")
        print("=" * 50)
        
        files = batch_download(query, count)

        if files:
            print(f"\n✅ 下载完成! 共下载 {len(files)} 张图片:")
            for file in files:
                print(f"  - {file}")

            # 保存下载记录为JSON文件
            images = search_images(query, count, return_details=True)  # 获取详细信息
            if images:
                # 添加下载信息到图片数据中
                for i, image in enumerate(images):
                    image['download_info'] = {
                        'downloaded': i < len(files),
                        'file_path': files[i] if i < len(files) else None
                    }

                json_file = save_search_results_to_json(query, images)
                if json_file:
                    print(f"📄 下载记录已保存到: {json_file}")
        else:
            print("\n❌ 下载失败")
    
    else:
        # 搜索模式
        print(f"🔍 搜索图片: {query}")
        print("=" * 50)

        # 获取详细信息用于JSON保存
        images = search_images(query, 10, return_details=True)

        if images:
            print(f"✅ 找到 {len(images)} 张图片:")
            for i, image in enumerate(images, 1):
                print(f"{i:2d}. 标题: {image.get('title', 'N/A')}")
                print(f"    URL: {image['url']}")
                print(f"    尺寸: {image.get('width', 'N/A')}x{image.get('height', 'N/A')}")
                print(f"    来源: {image.get('source', 'N/A')}")
                print(f"    作者: {image.get('author', 'N/A')}")
                print()

            # 自动保存搜索结果为JSON文件
            json_file = save_search_results_to_json(query, images)
            if json_file:
                print(f"📄 搜索结果已保存到: {json_file}")
        else:
            print("❌ 未找到图片")


if __name__ == "__main__":
    main()
