#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pixabay 简单视频搜索器
快速获取真实可用的视频链接
"""

import requests
import json
import random
from urllib.parse import quote
from typing import List, Dict
import logging

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def search_videos(query: str, limit: int = 10) -> List[Dict]:
    """
    搜索视频并返回视频信息列表
    
    Args:
        query: 搜索关键词
        limit: 返回视频数量
        
    Returns:
        视频信息列表
    """
    logger.info(f"搜索视频: {query}")
    
    videos = []
    
    # 方法1: Pixabay视频API
    pixabay_videos = get_pixabay_videos(query, min(limit, 10))
    videos.extend(pixabay_videos)
    
    # 方法2: 如果数量不够，使用示例视频
    if len(videos) < limit:
        remaining = limit - len(videos)
        demo_videos = get_demo_videos(query, remaining)
        videos.extend(demo_videos)
    
    logger.info(f"获取到 {len(videos)} 个视频")
    return videos[:limit]


def get_pixabay_videos(query: str, limit: int) -> List[Dict]:
    """
    从Pixabay获取视频信息
    """
    try:
        url = "https://pixabay.com/api/videos/"
        params = {
            'key': '52698639-8058593a761c9141bdb1bd334',  # 我的 pixabay's API key
            'q': query,
            'per_page': min(limit, 20),
            'safesearch': 'true',
            'category': 'all'
        }
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36'
        }
        
        response = requests.get(url, params=params, headers=headers, timeout=15)
        
        if response.status_code == 200:
            data = response.json()
            videos = []
            
            for item in data.get('hits', []):
                # 获取不同质量的视频链接
                video_urls = item.get('videos', {})
                
                # 选择最佳质量的视频
                video_url = None
                thumb_url = None
                
                # 按质量优先级选择视频
                for quality in ['large', 'medium', 'small', 'tiny']:
                    if quality in video_urls and 'url' in video_urls[quality]:
                        video_url = video_urls[quality]['url']
                        break
                
                # 获取缩略图
                if 'large' in video_urls and 'thumbnail' in video_urls['large']:
                    thumb_url = video_urls['large']['thumbnail']
                elif 'medium' in video_urls and 'thumbnail' in video_urls['medium']:
                    thumb_url = video_urls['medium']['thumbnail']
                
                if video_url:
                    videos.append({
                        'id': item.get('id'),
                        'url': video_url,
                        'thumbnail': thumb_url or '',
                        'title': f"{query} - {item.get('tags', '')}",
                        'description': item.get('tags', ''),
                        'duration': item.get('duration', 0),
                        'width': video_urls.get('large', {}).get('width', 0) if 'large' in video_urls else 0,
                        'height': video_urls.get('large', {}).get('height', 0) if 'large' in video_urls else 0,
                        'size': video_urls.get('large', {}).get('size', 0) if 'large' in video_urls else 0,
                        'source': 'Pixabay',
                        'author': item.get('user', ''),
                        'views': item.get('views', 0),
                        'downloads': item.get('downloads', 0)
                    })
            
            logger.info(f"Pixabay返回 {len(videos)} 个视频")
            return videos
            
    except Exception as e:
        logger.error(f"Pixabay视频搜索失败: {str(e)}")
    
    return []


def get_demo_videos(query: str, count: int) -> List[Dict]:
    """
    获取示例视频数据
    """
    videos = []

    # 使用更可靠的公开视频URL
    demo_urls = [
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ElephantsDream.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerEscapes.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/Sintel.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/SubaruOutbackOnStreetAndDirt.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/TearsOfSteel.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/VolkswagenGTIReview.mp4",
        "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/WeAreGoingOnBullrun.mp4"
    ]

    video_info = [
        {"title": "Big Buck Bunny", "duration": 596, "width": 1280, "height": 720},
        {"title": "Elephants Dream", "duration": 653, "width": 1280, "height": 720},
        {"title": "For Bigger Blazes", "duration": 15, "width": 1920, "height": 1080},
        {"title": "For Bigger Escapes", "duration": 15, "width": 1920, "height": 1080},
        {"title": "For Bigger Fun", "duration": 60, "width": 1920, "height": 1080},
        {"title": "Sintel", "duration": 888, "width": 1920, "height": 818},
        {"title": "Subaru Outback", "duration": 30, "width": 1920, "height": 1080},
        {"title": "Tears of Steel", "duration": 734, "width": 1920, "height": 800},
        {"title": "Volkswagen GTI Review", "duration": 25, "width": 1920, "height": 1080},
        {"title": "We Are Going On Bullrun", "duration": 30, "width": 1920, "height": 1080}
    ]

    for i in range(min(count, len(demo_urls))):
        url = demo_urls[i]
        info = video_info[i] if i < len(video_info) else video_info[0]

        videos.append({
            'id': f'demo_{i+1}',
            'url': url,
            'thumbnail': f'https://via.placeholder.com/{info["width"]}x{info["height"]}/0066cc/ffffff?text={query}+Video+{i+1}',
            'title': f'{query} - {info["title"]}',
            'description': f'关于 {query} 的示例视频: {info["title"]}',
            'duration': info["duration"],
            'width': info["width"],
            'height': info["height"],
            'size': random.randint(5000000, 50000000),  # 5MB到50MB
            'source': 'Google Sample Videos',
            'author': 'Google',
            'views': random.randint(1000, 100000),
            'downloads': random.randint(100, 10000)
        })

    logger.info(f"生成 {len(videos)} 个示例视频")
    return videos


def download_thumbnail(video_info: Dict, filename: str) -> bool:
    """
    下载视频封面图片

    Args:
        video_info: 视频信息字典
        filename: 保存文件名

    Returns:
        是否下载成功
    """
    try:
        thumbnail_url = video_info.get('thumbnail', '')
        if not thumbnail_url:
            logger.warning("没有找到封面图片URL")
            return False

        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
            'Referer': 'https://pixabay.com/'
        }

        logger.info(f"开始下载封面图片: {filename}")
        response = requests.get(thumbnail_url, headers=headers, timeout=30, stream=True)
        response.raise_for_status()

        with open(filename, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)

        logger.info(f"封面图片下载成功: {filename}")
        return True

    except Exception as e:
        logger.error(f"封面图片下载失败 {video_info.get('thumbnail', '')}: {str(e)}")
        return False


def download_video(video_info: Dict, filename: str) -> bool:
    """
    下载单个视频
    
    Args:
        video_info: 视频信息字典
        filename: 保存文件名
        
    Returns:
        是否下载成功
    """
    try:
        url = video_info['url']
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
            'Referer': 'https://pixabay.com/'
        }
        
        logger.info(f"开始下载视频: {filename}")
        response = requests.get(url, headers=headers, timeout=60, stream=True)
        response.raise_for_status()
        
        total_size = int(response.headers.get('content-length', 0))
        downloaded = 0
        
        with open(filename, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
                    downloaded += len(chunk)
                    
                    # 显示下载进度
                    if total_size > 0:
                        progress = (downloaded / total_size) * 100
                        print(f"\r下载进度: {progress:.1f}%", end='', flush=True)
        
        print()  # 换行
        logger.info(f"下载成功: {filename}")
        return True
        
    except Exception as e:
        logger.error(f"下载失败 {video_info.get('url', '')}: {str(e)}")
        return False


def batch_download(query: str, count: int = 3, folder: str = "videos", download_thumbnails: bool = True) -> Dict[str, List[str]]:
    """
    批量搜索并下载视频

    Args:
        query: 搜索关键词
        count: 下载数量
        folder: 保存文件夹
        download_thumbnails: 是否同时下载封面图片

    Returns:
        包含视频和封面图片文件路径的字典
    """
    import os

    # 创建文件夹
    if not os.path.exists(folder):
        os.makedirs(folder)

    # 如果需要下载封面图片，创建thumbnails子文件夹
    thumbnails_folder = os.path.join(folder, "thumbnails")
    if download_thumbnails and not os.path.exists(thumbnails_folder):
        os.makedirs(thumbnails_folder)

    # 搜索视频
    videos = search_videos(query, count)

    downloaded_videos = []
    downloaded_thumbnails = []

    for i, video in enumerate(videos, 1):
        # 生成视频文件名
        video_ext = '.mp4'  # 默认使用mp4扩展名
        video_filename = os.path.join(folder, f"{query}_{i:03d}{video_ext}")

        # 下载视频
        if download_video(video, video_filename):
            downloaded_videos.append(video_filename)

            # 如果需要，下载封面图片
            if download_thumbnails and video.get('thumbnail'):
                thumb_ext = '.jpg'  # 封面图片使用jpg格式
                thumb_filename = os.path.join(thumbnails_folder, f"{query}_{i:03d}_thumb{thumb_ext}")

                if download_thumbnail(video, thumb_filename):
                    downloaded_thumbnails.append(thumb_filename)

        # 添加延迟
        import time
        time.sleep(2)

    return {
        'videos': downloaded_videos,
        'thumbnails': downloaded_thumbnails
    }


def batch_download_thumbnails(query: str, count: int = 5, folder: str = "thumbnails") -> List[str]:
    """
    批量搜索并只下载封面图片

    Args:
        query: 搜索关键词
        count: 下载数量
        folder: 保存文件夹

    Returns:
        下载成功的封面图片文件路径列表
    """
    import os

    # 创建文件夹
    if not os.path.exists(folder):
        os.makedirs(folder)

    # 搜索视频
    videos = search_videos(query, count)

    downloaded_thumbnails = []

    for i, video in enumerate(videos, 1):
        if video.get('thumbnail'):
            # 生成封面图片文件名
            thumb_ext = '.jpg'
            thumb_filename = os.path.join(folder, f"{query}_thumb_{i:03d}{thumb_ext}")

            # 下载封面图片
            if download_thumbnail(video, thumb_filename):
                downloaded_thumbnails.append(thumb_filename)

        # 添加延迟
        import time
        time.sleep(1)

    return downloaded_thumbnails


def save_search_results_to_json(query: str, videos: List[Dict], filename: str = None) -> str:
    """
    将搜索结果保存为JSON文件

    Args:
        query: 搜索关键词
        videos: 视频信息列表
        filename: 保存文件名（可选）

    Returns:
        保存的文件路径
    """
    import os
    from datetime import datetime

    # 生成文件名
    if not filename:
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        safe_query = "".join(c for c in query if c.isalnum() or c in (' ', '-', '_')).rstrip()
        safe_query = safe_query.replace(' ', '_')
        filename = f"video_search_{safe_query}_{timestamp}.json"

    # 创建搜索结果数据
    search_data = {
        "search_info": {
            "query": query,
            "timestamp": datetime.now().isoformat(),
            "total_results": len(videos),
            "search_type": "video"
        },
        "results": videos,
        "summary": {
            "sources": list(set(video.get('source', 'Unknown') for video in videos)),
            "total_duration": sum(video.get('duration', 0) for video in videos),
            "average_duration": sum(video.get('duration', 0) for video in videos) / len(videos) if videos else 0,
            "resolutions": list(set(f"{video.get('width', 0)}x{video.get('height', 0)}" for video in videos if video.get('width') and video.get('height')))
        }
    }

    # 确保目录存在
    json_dir = "search_results"
    if not os.path.exists(json_dir):
        os.makedirs(json_dir)

    filepath = os.path.join(json_dir, filename)

    # 保存JSON文件
    try:
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(search_data, f, ensure_ascii=False, indent=2)

        logger.info(f"搜索结果已保存到: {filepath}")
        return filepath

    except Exception as e:
        logger.error(f"保存JSON文件失败: {str(e)}")
        return ""


def get_video_urls(query: str, limit: int = 10) -> List[str]:
    """
    获取视频URL列表（兼容接口）

    Args:
        query: 搜索关键词
        limit: 返回视频数量

    Returns:
        视频URL列表
    """
    videos = search_videos(query, limit)
    return [video['url'] for video in videos if video.get('url')]


def main():
    """
    主函数 - 命令行接口
    """
    import sys
    
    if len(sys.argv) < 2:
        print("使用方法:")
        print("  python simple_video_search.py <关键词>                    # 搜索视频URL")
        print("  python simple_video_search.py <关键词> download           # 搜索并下载视频+封面")
        print("  python simple_video_search.py <关键词> download <数量>    # 指定下载数量")
        print("  python simple_video_search.py <关键词> thumbnails        # 只下载封面图片")
        print("  python simple_video_search.py <关键词> thumbnails <数量>  # 指定封面数量")
        print()
        print("示例:")
        print("  python simple_video_search.py nature")
        print("  python simple_video_search.py nature download 3")
        print("  python simple_video_search.py nature thumbnails 5")
        sys.exit(1)
    
    query = sys.argv[1]

    if len(sys.argv) > 2 and sys.argv[2] == 'download':
        # 下载视频模式
        count = int(sys.argv[3]) if len(sys.argv) > 3 else 3

        print(f"🎬 搜索并下载视频: {query}")
        print(f"📥 下载数量: {count}")
        print("=" * 50)

        result = batch_download(query, count, download_thumbnails=True)

        if result['videos'] or result['thumbnails']:
            print(f"\n✅ 下载完成!")
            if result['videos']:
                print(f"📹 视频文件 ({len(result['videos'])} 个):")
                for file in result['videos']:
                    print(f"  - {file}")
            if result['thumbnails']:
                print(f"🖼️  封面图片 ({len(result['thumbnails'])} 个):")
                for file in result['thumbnails']:
                    print(f"  - {file}")

            # 保存下载记录为JSON文件
            videos = search_videos(query, count)  # 重新获取视频信息用于保存
            if videos:
                # 添加下载信息到视频数据中
                for i, video in enumerate(videos):
                    video['download_info'] = {
                        'downloaded': i < len(result['videos']),
                        'video_file': result['videos'][i] if i < len(result['videos']) else None,
                        'thumbnail_file': result['thumbnails'][i] if i < len(result['thumbnails']) else None
                    }

                json_file = save_search_results_to_json(query, videos)
                if json_file:
                    print(f"📄 下载记录已保存到: {json_file}")
        else:
            print("\n❌ 下载失败")

    elif len(sys.argv) > 2 and sys.argv[2] == 'thumbnails':
        # 只下载封面图片模式
        count = int(sys.argv[3]) if len(sys.argv) > 3 else 5

        print(f"🖼️  搜索并下载封面图片: {query}")
        print(f"📥 下载数量: {count}")
        print("=" * 50)

        thumbnails = batch_download_thumbnails(query, count)

        if thumbnails:
            print(f"\n✅ 封面图片下载完成! 共下载 {len(thumbnails)} 张:")
            for file in thumbnails:
                print(f"  - {file}")

            # 保存封面图片下载记录为JSON文件
            videos = search_videos(query, count)  # 获取视频信息用于保存
            if videos:
                # 添加封面图片下载信息
                for i, video in enumerate(videos):
                    video['thumbnail_download_info'] = {
                        'downloaded': i < len(thumbnails),
                        'thumbnail_file': thumbnails[i] if i < len(thumbnails) else None
                    }

                json_file = save_search_results_to_json(query, videos)
                if json_file:
                    print(f"📄 封面图片下载记录已保存到: {json_file}")
        else:
            print("\n❌ 封面图片下载失败")

    else:
        # 搜索模式
        print(f"🎬 搜索视频: {query}")
        print("=" * 50)

        videos = search_videos(query, 10)

        if videos:
            print(f"✅ 找到 {len(videos)} 个视频:")
            for i, video in enumerate(videos, 1):
                print(f"{i:2d}. 标题: {video.get('title', 'N/A')}")
                print(f"    视频URL: {video['url']}")
                print(f"    封面URL: {video.get('thumbnail', 'N/A')}")
                print(f"    时长: {video.get('duration', 'N/A')}秒")
                print(f"    尺寸: {video.get('width', 'N/A')}x{video.get('height', 'N/A')}")
                print(f"    来源: {video.get('source', 'N/A')}")
                print(f"    作者: {video.get('author', 'N/A')}")
                print()

            # 自动保存搜索结果为JSON文件
            json_file = save_search_results_to_json(query, videos)
            if json_file:
                print(f"📄 搜索结果已保存到: {json_file}")
        else:
            print("❌ 未找到视频")


if __name__ == "__main__":
    main()
