#!/usr/bin/env python3
"""
执行视频素材入库工作流

使用示例:
python run_material_workflow.py dist --use-mock-ai --max-duration 5.0
"""

import sys
import asyncio
import argparse
from pathlib import Path

# 添加项目根目录到Python路径
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))

from src.bootstrap import bootstrap_application, shutdown_application
from src.workflow.video_material_workflow import execute_material_workflow
from src.core.di import injector
from src.services.material_workflow_database_service import MaterialWorkflowDatabaseService
import yaml


def quiet_log_filter(record):
    """简化模式的日志过滤器"""
    message = record.getMessage()

    # 允许的关键信息
    allowed_patterns = [
        "开始执行视频素材入库工作流",
        "已加载配置文件",
        "工作流执行完成",
        "处理结果",
        "详细报告",
        "ERROR",
        "CRITICAL",
        "切分结果",
        "AI分类完成",
        "文件整理完成",
        "元数据提取完成",
        "处理报告已生成"
    ]

    # 检查是否包含允许的模式
    for pattern in allowed_patterns:
        if pattern in message:
            return True

    # 过滤掉其他信息
    return False


def load_workflow_config() -> dict:
    """加载工作流配置文件"""
    config_path = Path("config/video_material_workflow.yaml")

    if config_path.exists():
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                config_data = yaml.safe_load(f)
            print(f"✅ 已加载配置文件: {config_path}")
            return config_data
        except Exception as e:
            print(f"⚠️  配置文件加载失败: {e}")
            return {}
    else:
        print(f"⚠️  配置文件不存在: {config_path}")
        return {}


async def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='执行视频素材入库工作流')
    parser.add_argument('project_directory', help='项目目录路径')
    parser.add_argument('--max-duration', type=float, help='最大切片时长(秒)')
    parser.add_argument('--min-duration', type=float, help='最小切片时长(秒)')
    parser.add_argument('--confidence', type=float, help='AI分类置信度阈值')
    parser.add_argument('--use-mock-ai', action='store_true', help='使用模拟AI服务')
    parser.add_argument('--move-files', action='store_true', help='移动文件而不是复制')
    parser.add_argument('--no-shot-detection', action='store_true', help='禁用镜头检测')
    parser.add_argument('--verbose', '-v', action='store_true', help='详细输出')
    parser.add_argument('--quiet', '-q', action='store_true', help='简化输出（只显示关键信息）')

    args = parser.parse_args()

    # 设置日志级别
    if args.quiet:
        # 简化模式：保持核心功能正常，但减少输出
        from loguru import logger
        logger.remove()
        logger.add(sys.stderr, level="ERROR", format="<red>ERROR</red>: {message}")

        # 设置标准库日志级别 - 保持INFO以确保核心逻辑正常
        import logging

        # 创建自定义处理器，应用过滤器
        handler = logging.StreamHandler()
        handler.addFilter(quiet_log_filter)
        handler.setLevel(logging.INFO)

        # 配置根日志记录器
        logging.basicConfig(level=logging.INFO, handlers=[handler], format='%(message)s')

        # 禁用特定的详细日志
        logging.getLogger("workflow.executor").setLevel(logging.ERROR)
        logging.getLogger("workflow.context").setLevel(logging.ERROR)
        logging.getLogger("ConfigService").setLevel(logging.ERROR)

        # 特别处理FFmpeg日志 - 完全禁用
        logging.getLogger("src.services.ffmpeg_slice_service").setLevel(logging.CRITICAL)

        # 保持核心功能正常，但减少输出
        logging.getLogger("MixVideo").setLevel(logging.WARNING)
        logging.getLogger("workflow").setLevel(logging.ERROR)

        # 设置环境变量来抑制FFmpeg输出
        import os
        os.environ['FFMPEG_HIDE_BANNER'] = '1'

    elif args.verbose:
        # 详细模式
        from loguru import logger
        logger.remove()
        logger.add(sys.stderr, level="DEBUG")

        # 也设置标准库日志级别
        import logging
        logging.basicConfig(level=logging.DEBUG)
    else:
        # 默认模式：适中的信息量
        from loguru import logger
        logger.remove()
        logger.add(sys.stderr, level="INFO", format="<green>{time:HH:mm:ss}</green> | <level>{level}</level> | {message}")

        import logging
        logging.basicConfig(level=logging.INFO)

        # 减少一些详细日志
        logging.getLogger("workflow.executor").setLevel(logging.WARNING)
        logging.getLogger("workflow.context").setLevel(logging.WARNING)
    
    print(f"🚀 开始执行视频素材入库工作流: {args.project_directory}")

    # 初始化应用
    await bootstrap_application()

    try:
        # 加载YAML配置文件
        yaml_config = load_workflow_config()

        # 从YAML配置中提取默认值
        video_processing = yaml_config.get("video_processing", {})
        segmentation = video_processing.get("segmentation", {})
        ai_classification = yaml_config.get("ai_classification", {})
        file_operations = yaml_config.get("file_operations", {})
        directories = yaml_config.get("directories", {})

        # 配置工作流参数（命令行参数优先，YAML配置作为默认值）
        config = {
            "base_directory": args.project_directory,
            "max_segment_duration": args.max_duration if args.max_duration is not None else segmentation.get("max_segment_duration", 3.0),
            "min_segment_duration": args.min_duration if args.min_duration is not None else segmentation.get("min_segment_duration", 1.0),
            "confidence_threshold": args.confidence if args.confidence is not None else ai_classification.get("confidence_threshold", 0.7),
            "use_mock_ai": args.use_mock_ai or ai_classification.get("service", {}).get("use_mock", False),
            "move_files": args.move_files or (file_operations.get("operation_type", "copy") == "move"),
            "enable_shot_detection": not args.no_shot_detection and segmentation.get("enable_shot_detection", True),
            "create_backup": file_operations.get("create_backup", True),

            # 从YAML配置中添加目录配置
            "uncategorized_dir": directories.get("uncategorized_dir", "未分类"),
            "ai_material_dir": directories.get("ai_material_dir", "AI素材"),
            "product_display_dir": directories.get("product_display_dir", "产品展示"),
            "product_usage_dir": directories.get("product_usage_dir", "产品使用"),
            "model_wearing_dir": directories.get("model_wearing_dir", "模特试穿"),
            "waste_dir": directories.get("waste_dir", "废弃素材"),

            # 添加其他配置
            "overwrite_existing": file_operations.get("overwrite_existing", False),
        }
        
        if not args.quiet:
            print("⚙️  工作流配置:")
            print(f"   - 最大切片时长: {config['max_segment_duration']}秒")
            print(f"   - 最小切片时长: {config['min_segment_duration']}秒")
            print(f"   - 镜头检测: {'启用' if config['enable_shot_detection'] else '禁用'}")
            print(f"   - 检测算法: {segmentation.get('shot_detection_algorithm', 'multi_channel_histogram')}")
            print(f"   - AI置信度阈值: {config['confidence_threshold']}")
            print(f"   - 使用模拟AI: {'是' if config['use_mock_ai'] else '否'}")
            print(f"   - 文件操作: {'移动' if config['move_files'] else '复制'}")
            print(f"   - 创建备份: {'是' if config['create_backup'] else '否'}")
            print(f"   - 废弃素材目录: {config['waste_dir']}")
            print()
        else:
            print(f"⚙️  配置: 切片{config['max_segment_duration']}s, AI阈值{config['confidence_threshold']}, {'模拟AI' if config['use_mock_ai'] else '真实AI'}")
            print()
        
        # 执行工作流
        if args.quiet:
            print("🚀 开始处理...")
            print("   📹 视频切分中...")

        result = await execute_material_workflow(args.project_directory, config)

        print("✅ 工作流执行完成!")

        # 保存结果到数据库
        try:
            db_service = injector.get(MaterialWorkflowDatabaseService)
            save_stats = db_service.save_workflow_results(result)

            if args.quiet:
                print(f"💾 数据库: {save_stats['new_videos']}新增, {save_stats['updated_videos']}更新")
            else:
                print(f"\n💾 数据库保存结果:")
                print(f"   - 新增视频: {save_stats['new_videos']} 个")
                print(f"   - 更新视频: {save_stats['updated_videos']} 个")
                print(f"   - 跳过视频: {save_stats['skipped_videos']} 个")
                print(f"   - 新增分类: {save_stats['new_classifications']} 个")

                if save_stats['failed_videos'] > 0:
                    print(f"   - 失败视频: {save_stats['failed_videos']} 个")

        except Exception as e:
            print(f"⚠️  数据库保存失败: {e}")
            if not args.quiet:
                import traceback
                traceback.print_exc()

        # 显示结果摘要
        if 'report' in result and 'statistics' in result['report']:
            stats = result['report']['statistics']

            if args.quiet:
                # 简化输出
                total_videos = stats.get('total_original_videos', 0)
                total_segments = stats.get('total_segments_generated', 0)
                print(f"📊 {total_videos}个视频 → {total_segments}个片段")

                if 'category_distribution' in stats:
                    categories = [f"{cat}:{count}" for cat, count in stats['category_distribution'].items()]
                    print(f"🏷️  分类: {', '.join(categories)}")
            else:
                # 详细输出
                print("\n📊 处理统计:")
                print(f"   - 原始视频: {stats.get('total_original_videos', 0)} 个")
                print(f"   - 生成片段: {stats.get('total_segments_generated', 0)} 个")

                if 'category_distribution' in stats:
                    print(f"   - 分类分布:")
                    for category, count in stats['category_distribution'].items():
                        print(f"     • {category}: {count} 个")

        # 显示报告文件路径
        if 'report_path' in result:
            if args.quiet:
                print(f"📄 报告: {result['report_path']}")
            else:
                print(f"\n📄 详细报告: {result['report_path']}")
            
    except Exception as e:
        print(f"\n❌ 工作流执行失败: {e}")
        if args.verbose:
            import traceback
            traceback.print_exc()
        sys.exit(1)
        
    finally:
        # 清理资源
        await shutdown_application()


if __name__ == "__main__":
    asyncio.run(main())
