"""
视频素材入库工作流

实现视频素材的自动化入库流程：
1. 扫描未分类素材
2. 视频切镜处理
3. AI智能分类
4. 文件整理归档
5. 元数据提取和存储
"""

import os
import json
import asyncio
from typing import Dict, Any, List, Optional
from pathlib import Path
from dataclasses import dataclass, asdict
from datetime import datetime
from loguru import logger

from . import WorkflowNode, ProcessingNode, ConditionalNode, SequentialNode
from .context import WorkflowContext
from .workflow import WorkflowDefinition, WorkflowBuilder
from .executor import WorkflowExecutor

from ..services.video_segmentation_service import VideoSegmentationService
from ..services.gemini_service import GeminiService
from ..services.metadata_service import MetadataService
from ..modules.scanners import MediaDirectoryScanner
from ..modules.scanners.file_manager import FileManager
from ..models import Video, VideoMetadata, VideoClassification, Project
from ..core.di import injector


@dataclass
class MaterialWorkflowConfig:
    """素材入库工作流配置"""
    # 目录配置
    base_directory: str
    uncategorized_dir: str = "未分类"
    ai_material_dir: str = "AI素材"
    product_display_dir: str = "产品展示"
    product_usage_dir: str = "产品使用"
    model_wearing_dir: str = "模特试穿"
    waste_dir: str = "废弃素材"
    
    # 切镜配置
    max_segment_duration: float = 3.0  # 最大切片时长(秒)
    min_segment_duration: float = 1.0  # 最小切片时长(秒)
    enable_shot_detection: bool = True  # 是否启用镜头检测
    enforce_max_duration: bool = True  # 是否强制执行最大时长限制（二次切分）
    
    # AI分类配置
    classification_categories: List[str] = None
    confidence_threshold: float = 0.7
    use_mock_ai: bool = False  # 是否使用模拟AI服务
    product_title: str = ""  # 产品标题，用于更精准的分类判断
    
    # 文件处理配置
    move_files: bool = True  # True=移动文件，False=复制文件
    create_backup: bool = True
    overwrite_existing: bool = False

    # 文件名冲突处理配置
    resolve_conflicts: bool = True  # 自动解决文件名冲突
    conflict_resolution: str = "append_number"  # append_number, timestamp, skip, overwrite
    
    def __post_init__(self):
        if self.classification_categories is None:
            self.classification_categories = [
                "AI素材", "产品展示", "产品使用", "模特试穿"
            ]


def json_serializer(obj):
    """JSON序列化辅助函数，处理datetime对象和枚举"""
    if isinstance(obj, datetime):
        return obj.isoformat()
    if hasattr(obj, 'value'):  # 处理枚举类型
        return obj.value
    if hasattr(obj, '__dict__'):  # 处理其他对象
        return obj.__dict__
    raise TypeError(f"Object of type {type(obj)} is not JSON serializable")


class VideoMaterialWorkflow:
    """视频素材入库工作流"""
    
    def __init__(self, config: MaterialWorkflowConfig):
        self.config = config
        self.logger = logger.bind(component="VideoMaterialWorkflow")

        # 添加配置调试日志
        self.logger.info(f"🔧 VideoMaterialWorkflow 初始化配置:")
        self.logger.info(f"   max_segment_duration: {config.max_segment_duration}")
        self.logger.info(f"   min_segment_duration: {config.min_segment_duration}")
        self.logger.info(f"   enforce_max_duration: {config.enforce_max_duration}")
        self.logger.info(f"   enable_shot_detection: {config.enable_shot_detection}")
        
        # 服务组件
        self.scanner = injector.get(MediaDirectoryScanner)
        self.file_manager = injector.get(FileManager)
        self.video_segmentation = injector.get(VideoSegmentationService)
        self.metadata_service = injector.get(MetadataService)
        self.ai_service = injector.get(GeminiService)
            
    
    def create_workflow(self) -> WorkflowDefinition:
        """创建视频素材入库工作流"""
        builder = WorkflowBuilder(
            name="视频素材入库工作流",
            description="自动化处理视频素材的入库流程"
        )

        # 创建包装函数来绑定self
        def has_materials_condition(input_data, context, config):
            return self._has_materials_to_process(input_data, context)

        # 使用流式API构建工作流
        workflow = (builder
                   .input_node("工作流输入", data={"base_directory": self.config.base_directory})
                   .processing_node("扫描未分类素材", processor=self._scan_uncategorized_materials)
                   .conditional_node("检查素材", condition=has_materials_condition)
                   .processing_node("视频切镜处理", processor=self._process_video_segmentation)
                   .processing_node("AI智能分类", processor=self._classify_video_segments)
                   .processing_node("文件整理归档", processor=self._organize_classified_files)
                   .processing_node("提取元数据", processor=self._extract_and_save_metadata)
                   .processing_node("生成处理报告", processor=self._generate_processing_report)
                   .output_node("工作流输出", output_key="final_result")
                   .set_config(timeout=3600, retry_count=2)
                   .set_tags("video", "material", "workflow", "automation")
                   .build())

        return workflow
    
    async def _scan_uncategorized_materials(self, 
                                          input_data: Dict[str, Any], 
                                          context: WorkflowContext, 
                                          config: Dict[str, Any]) -> Dict[str, Any]:
        """扫描未分类素材"""
        self.logger.info("开始扫描未分类素材...")
        
        uncategorized_path = Path(self.config.base_directory) / self.config.uncategorized_dir
        
        if not uncategorized_path.exists():
            self.logger.warning(f"未分类目录不存在: {uncategorized_path}")
            return {"video_files": [], "total_count": 0}
        
        # 扫描视频文件
        scan_result = await self.scanner.scan(
            str(uncategorized_path),
            recursive=True,
            file_patterns=["*.mp4", "*.avi", "*.mov", "*.mkv", "*.wmv"]
        )
        
        video_files = [f for f in scan_result.files if f.file_type.value == "video"]
        
        self.logger.info(f"扫描完成，找到 {len(video_files)} 个视频文件")
        
        return {
            "video_files": [asdict(f) for f in video_files],
            "total_count": len(video_files),
            "scan_result": asdict(scan_result)
        }
    
    def _has_materials_to_process(self,
                                  input_data: Dict[str, Any],
                                  context: WorkflowContext) -> bool:
        """检查是否有素材需要处理"""
        total_count = input_data.get("total_count", 0)
        has_materials = total_count > 0

        if has_materials:
            self.logger.info(f"发现 {total_count} 个素材需要处理")
        else:
            self.logger.info("没有发现需要处理的素材")

        return has_materials
    
    async def _process_video_segmentation(self,
                                        input_data: Dict[str, Any],
                                        context: WorkflowContext,
                                        config: Dict[str, Any]) -> Dict[str, Any]:
        """处理视频切镜"""
        self.logger.info("开始视频切镜处理...")

        # 调试：打印输入数据的键
        self.logger.debug(f"输入数据键: {list(input_data.keys())}")

        # 从嵌套的input_data中获取video_files
        nested_data = input_data.get("input_data", {})
        video_files = nested_data.get("video_files", [])
        segmented_results = []

        self.logger.info(f"收到 {len(video_files)} 个视频文件进行处理")

        for i, video_file_data in enumerate(video_files):
            self.logger.info(f"处理第 {i+1}/{len(video_files)} 个视频文件")
            self.logger.debug(f"视频文件数据: {video_file_data}")
            video_path = video_file_data["path"]
            self.logger.info(f"处理视频: {video_path}")

            # 检查视频文件是否存在
            if not Path(video_path).exists():
                self.logger.error(f"视频文件不存在: {video_path}")
                continue

            self.logger.info(f"视频文件大小: {Path(video_path).stat().st_size / 1024 / 1024:.2f} MB")

            # 检查文件名是否以AI_开头
            filename = Path(video_path).name
            is_ai_file = filename.startswith("AI_")

            if is_ai_file:
                self.logger.info(f"🤖 检测到AI文件: {filename}，使用AI文件专用处理流程")

            try:
                # 创建临时输出目录
                temp_output = Path(self.config.base_directory) / "temp_segments" / Path(video_path).stem
                temp_output.mkdir(parents=True, exist_ok=True)
                self.logger.info(f"创建输出目录: {temp_output}")

                # 根据文件类型选择处理方式
                if is_ai_file:
                    # AI文件特殊处理：只切分成不少于3秒的片段
                    self.logger.info("🤖 AI文件处理：切分成不少于3秒的片段")
                    result = await self.video_segmentation.segment_video(
                        video_path=video_path,
                        output_dir=str(temp_output),
                        method="time_based",
                        segment_duration=self.config.max_segment_duration,  # 固定3秒切分
                        generate_files=True
                    )
                else:
                    # 普通文件处理
                    if self.config.enable_shot_detection:
                        # 使用镜头检测切分
                        self.logger.info("使用镜头检测切分模式")
                        result = await self.video_segmentation.segment_video(
                            video_path=video_path,
                            output_dir=str(temp_output),
                            method="shots",
                            generate_files=True
                        )
                    else:
                        # 使用时间切分
                        self.logger.info(f"使用时间切分模式，片段时长: {self.config.max_segment_duration}秒")
                        result = await self.video_segmentation.segment_video(
                            video_path=video_path,
                            output_dir=str(temp_output),
                            method="time_based",
                            segment_duration=self.config.max_segment_duration,
                            generate_files=True
                        )

                self.logger.info(f"切分结果: 算法={result.algorithm}, 片段数={result.segment_count}, 处理时间={result.processing_time:.3f}秒")

                # 根据文件类型设置不同的片段处理策略
                if is_ai_file:
                    # AI文件：保存所有片段，不管长度
                    valid_segments = result.segments
                    self.logger.info(f"🤖 AI文件保存所有片段：共 {len(valid_segments)} 个片段")
                else:
                    # 普通文件：使用配置的最小时长过滤
                    min_duration = self.config.min_segment_duration
                    self.logger.info(f"📹 普通文件片段过滤：最小时长 {min_duration} 秒")
                    valid_segments = [
                        seg for seg in result.segments
                        if seg.duration >= min_duration
                    ]

                # AI文件跳过二次切分，普通文件检查并二次切分超长片段
                if is_ai_file:
                    # AI文件不进行二次切分
                    final_segments = valid_segments
                    self.logger.info(f"🤖 AI文件跳过二次切分，保持原始片段")
                elif self.config.enforce_max_duration:
                    # 普通文件进行二次切分
                    final_segments = await self._check_and_split_long_segments(
                        valid_segments, video_path, str(temp_output)
                    )
                else:
                    final_segments = valid_segments

                segmented_results.append({
                    "original_video": video_file_data,
                    "segments": [asdict(seg) for seg in final_segments],
                    "segment_count": len(final_segments),
                    "total_duration": sum(seg.duration for seg in final_segments),
                    "temp_output_dir": str(temp_output)
                })

                self.logger.info(f"视频切分完成: {len(final_segments)} 个有效片段")
                
            except Exception as e:
                self.logger.error(f"视频切分失败 {video_path}: {e}")
                segmented_results.append({
                    "original_video": video_file_data,
                    "segments": [],
                    "segment_count": 0,
                    "error": str(e)
                })
        
        total_segments = sum(r.get("segment_count", 0) for r in segmented_results)
        self.logger.info(f"视频切镜处理完成，共生成 {total_segments} 个片段")
        
        return {
            "segmented_results": segmented_results,
            "total_segments": total_segments
        }

    async def _check_and_split_long_segments(self,
                                           segments: List,
                                           original_video_path: str,
                                           output_dir: str) -> List:
        """
        检查并二次切分超过3秒的片段

        Args:
            segments: 原始片段列表
            original_video_path: 原始视频路径
            output_dir: 输出目录

        Returns:
            处理后的片段列表
        """
        max_duration = self.config.max_segment_duration  # 使用配置的最大时长

        # 添加调试日志
        self.logger.info(f"🔧 二次切分配置检查:")
        self.logger.info(f"   self.config.max_segment_duration: {self.config.max_segment_duration}")
        self.logger.info(f"   使用的 max_duration: {max_duration}")
        self.logger.info(f"   配置类型: {type(self.config)}")

        final_segments = []

        for segment in segments:
            if segment.duration <= max_duration:
                # 片段时长符合要求，直接添加
                final_segments.append(segment)
                self.logger.debug(f"片段 {segment.file_path} 时长 {segment.duration:.2f}s，符合要求")
            else:
                # 片段超过3秒，需要二次切分
                self.logger.info(f"片段 {segment.file_path} 时长 {segment.duration:.2f}s，需要二次切分")

                try:
                    import math

                    # 计算需要切分的子片段数量 - 使用向上取整确保每个片段不超过max_duration
                    sub_segment_count = math.ceil(segment.duration / max_duration)

                    self.logger.info(f"将切分为 {sub_segment_count} 个子片段，每个最多 {max_duration}s")

                    # 生成子片段 - 按照max_duration切分，最后一个片段可能较短
                    for i in range(sub_segment_count):
                        sub_start = segment.start_time + (i * max_duration)
                        sub_end = min(segment.start_time + ((i + 1) * max_duration), segment.end_time)
                        sub_duration_actual = sub_end - sub_start

                        # 跳过过短的子片段
                        if sub_duration_actual < self.config.min_segment_duration:
                            self.logger.debug(f"跳过过短的子片段: {sub_duration_actual:.2f}s")
                            continue

                        # 生成子片段文件名
                        original_name = Path(segment.file_path).stem
                        sub_file_name = f"{original_name}_sub_{i+1:02d}.mp4"
                        sub_file_path = Path(output_dir) / sub_file_name

                        # 使用FFmpeg提取子片段
                        await self._extract_sub_segment(
                            original_video_path,
                            str(sub_file_path),
                            sub_start,
                            sub_duration_actual
                        )

                        # 计算帧数（从原始片段获取帧率信息）
                        if hasattr(segment, 'start_frame') and hasattr(segment, 'end_frame') and segment.duration > 0:
                            # 从原始片段计算帧率
                            fps = (segment.end_frame - segment.start_frame) / segment.duration
                        else:
                            fps = 30.0  # 默认帧率

                        sub_start_frame = int(sub_start * fps)
                        sub_end_frame = int(sub_end * fps)

                        # 创建子片段对象
                        from src.services.video_segmentation_service import VideoSegment
                        sub_segment = VideoSegment(
                            segment_id=len(final_segments) + 1,  # 使用当前片段数量+1作为ID
                            start_time=sub_start,
                            end_time=sub_end,
                            duration=sub_duration_actual,
                            start_frame=sub_start_frame,
                            end_frame=sub_end_frame,
                            confidence=segment.confidence,
                            segment_type=f"{segment.segment_type}_sub",
                            file_path=str(sub_file_path),
                            metadata={
                                **segment.metadata,
                                "parent_segment": segment.file_path,
                                "sub_segment_index": i + 1,
                                "total_sub_segments": sub_segment_count
                            }
                        )

                        final_segments.append(sub_segment)
                        self.logger.debug(f"生成子片段: {sub_file_name} ({sub_duration_actual:.2f}s)")

                    # 删除原始的长片段文件
                    if Path(segment.file_path).exists():
                        Path(segment.file_path).unlink()
                        self.logger.debug(f"删除原始长片段: {segment.file_path}")

                except Exception as e:
                    self.logger.error(f"二次切分失败 {segment.file_path}: {e}")
                    # 切分失败时保留原片段
                    final_segments.append(segment)

        self.logger.info(f"二次切分完成: 原始 {len(segments)} 个片段 -> 最终 {len(final_segments)} 个片段")
        return final_segments

    async def _extract_sub_segment(self,
                                 input_video_path: str,
                                 output_path: str,
                                 start_time: float,
                                 duration: float):
        """
        使用FFmpeg提取子片段

        Args:
            input_video_path: 输入视频路径
            output_path: 输出文件路径
            start_time: 开始时间（秒）
            duration: 持续时间（秒）
        """
        try:
            from src.services.ffmpeg_slice_service import FfmpegSliceService, SliceSegment
            from src.bootstrap import get_service

            # 获取FFmpeg切片服务
            ffmpeg_service = get_service(FfmpegSliceService)

            # 创建切片段配置
            end_time = start_time + duration
            segments = [SliceSegment(start=start_time, end=end_time)]

            # 创建输出选项
            options = ffmpeg_service.create_slice_options(
                quality="high",
                fps=30
            )

            # 确保输出目录存在
            Path(output_path).parent.mkdir(parents=True, exist_ok=True)

            # 使用FFmpeg切片服务提取片段
            results = await ffmpeg_service.slice_video(
                media_path=input_video_path,
                segments=segments,
                options=options,
                output_path=output_path
            )

            if not results:
                raise RuntimeError("FFmpeg切片失败：没有生成输出文件")

            # 验证输出文件是否存在
            if not Path(output_path).exists():
                # 如果指定路径不存在，检查是否生成了其他文件名
                actual_output = results[0][0] if results else None
                if actual_output and Path(actual_output).exists():
                    # 重命名为期望的文件名
                    Path(actual_output).rename(output_path)
                    self.logger.debug(f"重命名输出文件: {actual_output} -> {output_path}")
                else:
                    raise RuntimeError(f"输出文件不存在: {output_path}")

            self.logger.debug(f"成功提取子片段: {output_path} ({duration:.2f}s)")

        except Exception as e:
            self.logger.error(f"提取子片段失败 {output_path}: {e}")
            raise

    async def _classify_video_segments(self,
                                     input_data: Dict[str, Any],
                                     context: WorkflowContext,
                                     config: Dict[str, Any]) -> Dict[str, Any]:
        """AI智能分类视频片段"""
        self.logger.info("开始AI智能分类...")

        # 调试：打印输入数据的键
        self.logger.debug(f"AI分类输入数据键: {list(input_data.keys())}")

        # 直接从input_data中获取segmented_results（不需要嵌套访问）
        segmented_results = input_data.get("segmented_results", [])
        self.logger.info(f"收到 {len(segmented_results)} 个切分结果进行分类")
        classified_results = []

        for result in segmented_results:
            segments = result.get("segments", [])
            classified_segments = []

            # 检查原始视频是否为AI文件
            original_video = result.get("original_video", {})
            original_path = original_video.get("path", "")
            original_filename = Path(original_path).name
            is_ai_file = original_filename.startswith("AI_")

            for segment_data in segments:
                segment_file = segment_data.get("file_path")
                if not segment_file or not os.path.exists(segment_file):
                    self.logger.warning(f"片段文件不存在: {segment_file}")
                    continue

                try:
                    # 检查是否为AI文件，如果是则直接分类为AI素材
                    if is_ai_file:
                        self.logger.info(f"🤖 AI文件片段直接分类为AI素材: {segment_file}")
                        classified_segments.append({
                            **segment_data,
                            "classification": "AI素材",
                            "confidence": 1.0,
                            "target_directory": self.config.ai_material_dir,
                            "reasoning": "文件名以AI_开头，自动分类为AI素材",
                            "skip_ai_analysis": True
                        })
                        continue

                    # 普通文件进行AI分类 - 支持直接视频输入模式
                    """
                    {
                        "success": True,
                        "category": category,
                        "confidence": confidence,
                        "reasoning": analysis_data.get("reasoning", "AI分析结果"),
                        "features": analysis_data.get("features", []),
                        "product_match": product_match,
                        "quality_score": quality_score,
                        "video_info": {
                            "file_name": Path(video_path).name,
                            "file_path": str(video_path),
                            "file_size": os.path.getsize(video_path),
                            "analysis_time": time.strftime("%Y-%m-%d %H:%M:%S"),
                            "model_used": self.new_mode_config.model_name
                        },
                        "analysis_result": analysis_data,
                        "metadata": {
                            "response_length": len(analysis_text),
                            "candidates_count": len(candidates),
                            "mode": "new_mode"
                        },
                        "raw_response": analysis_text
                    }
                    """
                    classification_result = await self.ai_service.analyze_video(
                        video_path=segment_file,
                        product_title=self.config.product_title,
                        categories=self.config.classification_categories,
                        use_direct_video=True,  # 使用直接视频输入模式
                        max_frames=5  # 帧提取模式的备用参数
                    )

                    # 检查置信度
                    confidence = classification_result.get("confidence", 0.0)
                    category = classification_result.get("category", "unclassified")

                    if confidence < self.config.confidence_threshold:
                        category = "unclassified"
                        self.logger.warning(f"分类置信度过低: {confidence:.2f} < {self.config.confidence_threshold}")

                    # 映射分类到目录
                    target_dir = self._map_category_to_directory(category)

                    classified_segments.append({
                        **segment_data,
                        "classification": classification_result,
                        "target_category": category,
                        "target_directory": target_dir,
                        "confidence": confidence
                    })

                    self.logger.info(f"reasoning: {classification_result.get("reasoning", "")} 片段分类完成: {Path(segment_file).name} -> {category} ({confidence:.2f})")

                except Exception as e:
                    self.logger.error(f"AI分类失败 {segment_file}: {e}")
                    classified_segments.append({
                        **segment_data,
                        "classification": {"error": str(e)},
                        "target_category": "unclassified",
                        "target_directory": self.config.waste_dir,
                        "confidence": 0.0
                    })

            classified_results.append({
                **result,
                "classified_segments": classified_segments
            })

        total_classified = sum(len(r.get("classified_segments", [])) for r in classified_results)
        self.logger.info(f"AI分类完成，共分类 {total_classified} 个片段")

        return {
            "classified_results": classified_results,
            "total_classified": total_classified
        }

    def _map_category_to_directory(self, category: str) -> str:
        """映射分类到目录名"""
        category_mapping = {
            "AI素材": self.config.ai_material_dir,
            "ai_generated": self.config.ai_material_dir,
            "产品展示": self.config.product_display_dir,
            "product_display": self.config.product_display_dir,
            "产品使用": self.config.product_usage_dir,
            "product_usage": self.config.product_usage_dir,
            "模特试穿": self.config.model_wearing_dir,
            "model_wearing": self.config.model_wearing_dir,
            "unclassified": self.config.waste_dir,
            "废弃素材": self.config.waste_dir,
            "废弃": self.config.waste_dir,
        }

        return category_mapping.get(category, self.config.waste_dir)

    def _resolve_filename_conflict(self, target_file: Path) -> Path:
        """
        解决文件名冲突

        Args:
            target_file: 目标文件路径

        Returns:
            解决冲突后的文件路径
        """
        if not target_file.exists():
            return target_file

        if not self.config.resolve_conflicts:
            return target_file

        strategy = self.config.conflict_resolution.lower()

        if strategy == "overwrite":
            # 直接覆盖
            return target_file

        elif strategy == "skip":
            # 跳过，返回None表示不处理
            return None

        elif strategy == "timestamp":
            # 添加时间戳
            from datetime import datetime
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            stem = target_file.stem
            suffix = target_file.suffix
            new_name = f"{stem}_{timestamp}{suffix}"
            return target_file.parent / new_name

        else:  # append_number (默认)
            # 添加数字后缀
            counter = 1
            original_stem = target_file.stem
            suffix = target_file.suffix
            parent = target_file.parent

            while target_file.exists():
                new_name = f"{original_stem}_{counter}{suffix}"
                target_file = parent / new_name
                counter += 1

            return target_file

    async def _organize_classified_files(self,
                                       input_data: Dict[str, Any],
                                       context: WorkflowContext,
                                       config: Dict[str, Any]) -> Dict[str, Any]:
        """整理分类后的文件"""
        self.logger.info("开始文件整理归档...")

        # 直接从input_data中获取classified_results
        classified_results = input_data.get("classified_results", [])
        organization_results = []

        for result in classified_results:
            classified_segments = result.get("classified_segments", [])
            organized_segments = []

            for segment_data in classified_segments:
                source_file = segment_data.get("file_path")
                target_dir = segment_data.get("target_directory")

                if not source_file or not os.path.exists(source_file):
                    continue

                try:
                    # 创建目标目录
                    target_path = Path(self.config.base_directory) / target_dir
                    target_path.mkdir(parents=True, exist_ok=True)

                    # 生成目标文件名
                    source_path = Path(source_file)
                    target_file = target_path / source_path.name

                    # 处理文件名冲突
                    resolved_target = self._resolve_filename_conflict(target_file)

                    # 如果策略是跳过且文件已存在，则跳过此文件
                    if resolved_target is None:
                        self.logger.warning(f"跳过文件（已存在）: {source_path.name}")
                        continue

                    target_file = resolved_target

                    # 移动或复制文件
                    if self.config.move_files:
                        import shutil
                        shutil.move(str(source_path), str(target_file))
                        operation = "moved"
                    else:
                        import shutil
                        shutil.copy2(str(source_path), str(target_file))
                        operation = "copied"

                    organized_segments.append({
                        **segment_data,
                        "final_path": str(target_file),
                        "operation": operation
                    })

                    self.logger.info(f"文件{operation}: {source_path.name} -> {target_dir}")

                except Exception as e:
                    self.logger.error(f"文件整理失败 {source_file}: {e}")
                    organized_segments.append({
                        **segment_data,
                        "error": str(e)
                    })

            organization_results.append({
                **result,
                "organized_segments": organized_segments
            })

        total_organized = sum(len(r.get("organized_segments", [])) for r in organization_results)
        self.logger.info(f"文件整理完成，共处理 {total_organized} 个文件")

        return {
            "organization_results": organization_results,
            "total_organized": total_organized
        }

    async def _extract_and_save_metadata(self,
                                        input_data: Dict[str, Any],
                                        context: WorkflowContext,
                                        config: Dict[str, Any]) -> Dict[str, Any]:
        """提取元数据并保存到数据库"""
        self.logger.info("开始提取元数据...")

        # 直接从input_data中获取organization_results
        organization_results = input_data.get("organization_results", [])
        metadata_results = []

        for result in organization_results:
            organized_segments = result.get("organized_segments", [])
            segments_with_metadata = []

            for segment_data in organized_segments:
                final_path = segment_data.get("final_path")
                if not final_path or not os.path.exists(final_path):
                    continue

                try:
                    # 提取元数据
                    metadata = await self.metadata_service.extract_metadata(final_path)

                    # 保存到数据库（这里简化为保存到JSON文件）
                    db_path = Path(self.config.base_directory) / "db.json"
                    await self._save_to_database(final_path, metadata, segment_data, db_path, context)

                    segments_with_metadata.append({
                        **segment_data,
                        "metadata": asdict(metadata) if hasattr(metadata, '__dict__') else metadata
                    })

                    self.logger.info(f"元数据提取完成: {Path(final_path).name}")

                except Exception as e:
                    self.logger.error(f"元数据提取失败 {final_path}: {e}")
                    segments_with_metadata.append({
                        **segment_data,
                        "metadata_error": str(e)
                    })

            metadata_results.append({
                **result,
                "segments_with_metadata": segments_with_metadata
            })

        total_metadata = sum(len(r.get("segments_with_metadata", [])) for r in metadata_results)
        self.logger.info(f"元数据提取完成，共处理 {total_metadata} 个文件")

        return {
            "metadata_results": metadata_results,
            "total_metadata": total_metadata
        }

    async def _save_to_database(self,
                              file_path: str,
                              metadata: Any,
                              segment_data: Dict[str, Any],
                              db_path: Path,
                              context: WorkflowContext) -> None:
        """保存数据到数据库（JSON文件）"""
        try:
            # 读取现有数据
            if db_path.exists():
                try:
                    with open(db_path, 'r', encoding='utf-8') as f:
                        db_data = json.load(f)
                except (json.JSONDecodeError, ValueError) as e:
                    self.logger.warning(f"数据库文件损坏，重新创建: {e}")
                    # 备份损坏的文件
                    backup_path = db_path.with_suffix('.json.backup')
                    if db_path.exists():
                        db_path.rename(backup_path)
                    db_data = {"videos": [], "last_updated": None}
            else:
                db_data = {"videos": [], "last_updated": None}

            # 创建视频记录
            video_record = {
                "id": len(db_data["videos"]) + 1,
                "file_path": file_path,
                "file_name": Path(file_path).name,
                "category": segment_data.get("classification", "unclassified"),  # 使用classification字段
                "confidence": segment_data.get("confidence", 0.0),
                "classification": segment_data.get("classification", "unclassified"),
                "metadata": asdict(metadata) if hasattr(metadata, '__dict__') else metadata,
                "created_at": context.start_time.isoformat(),
                "segment_info": {
                    "start_time": segment_data.get("start_time", 0.0),
                    "end_time": segment_data.get("end_time", 0.0),
                    "duration": segment_data.get("duration", 0.0)
                }
            }

            db_data["videos"].append(video_record)
            db_data["last_updated"] = context.start_time.isoformat()

            # 保存数据
            with open(db_path, 'w', encoding='utf-8') as f:
                json.dump(db_data, f, indent=2, ensure_ascii=False, default=json_serializer)

        except Exception as e:
            self.logger.error(f"保存数据库失败: {e}")
            raise

    async def _generate_processing_report(self,
                                        input_data: Dict[str, Any],
                                        context: WorkflowContext,
                                        config: Dict[str, Any]) -> Dict[str, Any]:
        """生成处理报告"""
        self.logger.info("生成处理报告...")

        metadata_results = input_data.get("metadata_results", [])

        # 统计信息
        total_original_videos = len(metadata_results)
        total_segments = sum(len(r.get("segments_with_metadata", [])) for r in metadata_results)

        # 按分类统计
        category_stats = {}
        for result in metadata_results:
            for segment in result.get("segments_with_metadata", []):
                category = segment.get("target_category", "unclassified")
                category_stats[category] = category_stats.get(category, 0) + 1

        # 生成报告
        from datetime import datetime, timezone
        current_time = datetime.now(timezone.utc)

        # 确保start_time也有时区信息
        start_time = context.start_time
        if start_time and start_time.tzinfo is None:
            start_time = start_time.replace(tzinfo=timezone.utc)

        report = {
            "workflow_id": context.workflow_id,
            "execution_time": start_time.isoformat() if start_time else current_time.isoformat(),
            "processing_duration": (current_time - start_time).total_seconds() if start_time else 0,
            "statistics": {
                "total_original_videos": total_original_videos,
                "total_segments_generated": total_segments,
                "category_distribution": category_stats
            },
            "configuration": asdict(self.config),
            "results": metadata_results
        }

        # 保存报告
        report_path = Path(self.config.base_directory) / f"processing_report_{start_time.strftime('%Y%m%d_%H%M%S') if start_time else current_time.strftime('%Y%m%d_%H%M%S')}.json"
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(report, f, indent=2, ensure_ascii=False, default=json_serializer)

        self.logger.info(f"处理报告已生成: {report_path}")

        return {
            "report": report,
            "report_path": str(report_path)
        }

    async def execute_workflow(self, project_directory: str) -> Dict[str, Any]:
        """执行完整的视频素材入库工作流"""
        self.logger.info(f"开始执行视频素材入库工作流: {project_directory}")

        # 更新配置中的基础目录
        self.config.base_directory = project_directory

        # 创建工作流
        workflow = self.create_workflow()

        # 创建执行器
        executor = WorkflowExecutor()

        # 执行工作流
        try:
            result = await executor.execute_workflow(
                workflow=workflow,
                initial_data={"base_directory": project_directory}
            )

            self.logger.info("视频素材入库工作流执行完成")
            return result

        except Exception as e:
            self.logger.error(f"工作流执行失败: {e}")
            raise


# 工厂函数
def create_video_material_workflow(config: Dict[str, Any]) -> VideoMaterialWorkflow:
    """创建视频素材入库工作流实例"""
    workflow_config = MaterialWorkflowConfig(**config)
    return VideoMaterialWorkflow(workflow_config)


# 便捷执行函数
async def execute_material_workflow(project_directory: str,
                                  config: Dict[str, Any] = None) -> Dict[str, Any]:
    """执行视频素材入库工作流的便捷函数"""
    if config is None:
        config = {}

    workflow = create_video_material_workflow(config)
    return await workflow.execute_workflow(project_directory)
