"""
视频分析服务
基于 LLaVA-NeXT-Video 模型的视频内容分析服务
"""
import av
import torch
import numpy as np
import os
import logging
import uuid
import time
import requests
import tempfile
import asyncio
import subprocess
from urllib.parse import urlparse
from typing import Optional, Dict, Any, List, Tuple
from datetime import datetime
from pathlib import Path
import cv2
import json
from scenedetect import detect, ContentDetector, split_video_ffmpeg
from scenedetect.frame_timecode import FrameTimecode

from transformers import AutoModel, AutoTokenizer
import torch
from app.core.config import settings
from app.models.video import VideoAnalysisStatus

logger = logging.getLogger(__name__)

class VideoAnalysisService:
    """视频分析服务类"""
    
    def __init__(self):
        self.model = None
        self.tokenizer = None
        self.image_processor = None
        self.device = None
        self.model_loaded = False
        self._analysis_cache: Dict[str, Dict] = {}
        self._initialization_lock = asyncio.Lock()  # 添加异步锁
        self._initializing = False  # 初始化状态标记
        
        # Ollama配置
        self.ollama_base_url = "http://192.168.0.151:11434"
        self.ollama_model = "mistral-nemo:12b"
        
    async def initialize_model(self) -> bool:
        """
        初始化模型 - 确保单例，只初始化一次
        使用异步锁防止并发初始化
        """
        # 如果模型已经加载，直接返回成功
        if self.model_loaded and self.model is not None and self.tokenizer is not None and self.image_processor is not None:
            logger.info("模型已加载，跳过初始化")
            return True
        
        # 使用异步锁确保只有一个协程能执行初始化
        async with self._initialization_lock:
            # 双重检查：获得锁后再次检查是否已初始化
            if self.model_loaded and self.model is not None and self.tokenizer is not None and self.image_processor is not None:
                logger.info("模型已在其他协程中加载完成")
                return True
            
            # 如果正在初始化中，等待完成
            if self._initializing:
                logger.info("模型正在其他协程中初始化，等待完成...")
                while self._initializing:
                    await asyncio.sleep(0.1)
                return self.model_loaded
            
            # 开始初始化
            self._initializing = True
            try:
                logger.info("开始初始化视频分析模型...")
                
                # 检查 CUDA 可用性
                cuda_available = torch.cuda.is_available()
                logger.info(f"CUDA 可用: {cuda_available}")
                
                if cuda_available:
                    logger.info(f"PyTorch CUDA 版本: {torch.version.cuda}")
                    logger.info(f"GPU 数量: {torch.cuda.device_count()}")
                    self.device = "cuda:0"
                else:
                    logger.warning("CUDA 不可用，将使用 CPU")
                    self.device = "cpu"
                
                # 加载模型
                logger.info(f"正在加载模型: {settings.MODEL_ID}")
                
                # 确定数据类型
                if hasattr(settings, 'TORCH_DTYPE') and settings.TORCH_DTYPE == "bfloat16":
                    torch_dtype = torch.bfloat16
                else:
                    torch_dtype = torch.float16 if cuda_available else torch.float32
                
                # 加载VideoChat-Flash模型
                self.tokenizer = AutoTokenizer.from_pretrained(settings.MODEL_ID, trust_remote_code=True)
                self.model = AutoModel.from_pretrained(
                    settings.MODEL_ID, 
                    trust_remote_code=True
                ).to(torch_dtype)
                
                if cuda_available:
                    self.model = self.model.cuda()
                
                # 获取图像处理器
                self.image_processor = self.model.get_vision_tower().image_processor
                
                # 配置模型压缩选项
                if hasattr(settings, 'MM_LLM_COMPRESS') and settings.MM_LLM_COMPRESS:
                    logger.info("启用VideoChat-Flash压缩模式")
                    self.model.config.mm_llm_compress = True
                    self.model.config.llm_compress_type = "uniform0_attention"
                    self.model.config.llm_compress_layer_list = [4, 18]
                    self.model.config.llm_image_token_ratio_list = [1, 0.75, 0.25]
                else:
                    self.model.config.mm_llm_compress = False
                
                self.model_loaded = True
                logger.info("模型初始化完成")
                return True
                
            except Exception as e:
                logger.error(f"模型初始化失败: {str(e)}")
                self.model_loaded = False
                self.model = None
                self.tokenizer = None
                self.image_processor = None
                return False
            finally:
                self._initializing = False
    
    def extract_scene_change_frames(self, video_path: str, threshold: float = 30.0, target_size: int = 224) -> Optional[np.ndarray]:
        """
        使用PySceneDetect提取场景切换帧，只保留变化大的帧
        Args:
            video_path: 视频文件路径
            threshold: 场景检测阈值，数值越小检测越敏感
            target_size: 目标分辨率（正方形）
        Returns:
            视频帧数组或None
        """
        try:
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            logger.info(f"开始使用PySceneDetect分析场景切换: {video_path}")
            logger.info(f"检测阈值: {threshold}, 目标分辨率: {target_size}x{target_size}")
            
            # 使用PySceneDetect检测场景切换
            scene_list = detect(video_path, ContentDetector(threshold=threshold))
            logger.info(f"检测到 {len(scene_list)} 个场景")
            
            # 调试信息：打印场景列表结构
            if scene_list:
                logger.info(f"第一个场景的类型: {type(scene_list[0])}")
                if len(scene_list[0]) >= 2:
                    logger.info(f"场景开始时间类型: {type(scene_list[0][0])}, 值: {scene_list[0][0]}")
                    logger.info(f"场景结束时间类型: {type(scene_list[0][1])}, 值: {scene_list[0][1]}")
            
            # 打开视频文件
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                raise ValueError(f"无法打开视频文件: {video_path}")
            
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            logger.info(f"视频FPS: {fps:.2f}, 总帧数: {total_frames}")
            
            frames = []
            
            if len(scene_list) == 0:
                logger.warning("未检测到场景切换，提取第一帧")
                # 如果没有检测到场景，至少提取第一帧
                cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                ret, frame = cap.read()
                if ret:
                    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    frame_resized = cv2.resize(frame_rgb, (target_size, target_size))
                    frames.append(frame_resized)
                    logger.info(f"成功提取第0帧作为备用，分辨率: {frame_resized.shape}")
            else:
                # 为每个场景提取第一帧
                for i, scene in enumerate(scene_list):
                    try:
                        start_time, end_time = scene  # scene是(start_time, end_time)元组
                        
                        # 将时间转换为帧号 - 处理不同类型的时间对象
                        start_seconds = None
                        
                        if hasattr(start_time, 'get_seconds'):
                            # FrameTimecode对象
                            start_seconds = start_time.get_seconds()
                        elif hasattr(start_time, 'seconds'):
                            # 可能是其他时间对象
                            start_seconds = start_time.seconds
                            logger.info(f"使用seconds属性: {start_seconds}")
                        elif isinstance(start_time, (int, float)):
                            # 如果是数字，直接使用
                            start_seconds = float(start_time)
                            logger.info(f"直接使用数值: {start_seconds}")
                        else:
                            # 尝试转换为字符串然后解析
                            try:
                                start_seconds = float(str(start_time))
                                logger.info(f"字符串转换结果: {start_seconds}")
                            except (ValueError, TypeError):
                                logger.error(f"无法解析场景时间: {start_time}, 类型: {type(start_time)}, 属性: {dir(start_time)}")
                                continue
                        
                        if start_seconds is None:
                            logger.warning(f"场景 {i+1} 时间解析失败，跳过")
                            continue
                        
                        frame_number = int(start_seconds * fps)
                        frame_number = min(max(0, frame_number), total_frames - 1)  # 确保帧号在有效范围内
                        
                        
                        # 定位到指定帧
                        cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
                        ret, frame = cap.read()
                        
                        if ret:
                            # 转换颜色格式 BGR -> RGB
                            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                            
                            # 缩放到目标分辨率
                            frame_resized = cv2.resize(frame_rgb, (target_size, target_size))
                            frames.append(frame_resized)
                            
                        else:
                            logger.warning(f"无法读取第 {frame_number} 帧")
                    
                    except Exception as scene_error:
                        logger.error(f"处理场景 {i+1} 时出错: {str(scene_error)}")
                        continue
            
            cap.release()
            
            if len(frames) == 0:
                raise ValueError("未能提取到任何有效帧")
            
            logger.info(f"总共提取了 {len(frames)} 个场景变化帧")
            return np.stack(frames)
            
        except Exception as e:
            logger.error(f"提取场景切换帧失败: {str(e)}")
            return None
    
    def split_frames_into_batches(self, frames: np.ndarray, max_frames_per_batch) -> List[np.ndarray]:
        """
        将帧数组分割成多个批次
        Args:
            frames: 输入的帧数组
            max_frames_per_batch: 每批次最大帧数
        Returns:
            帧批次列表
        """
        if len(frames) <= max_frames_per_batch:
            return [frames]
        
        batches = []
        for i in range(0, len(frames), max_frames_per_batch):
            batch = frames[i:i + max_frames_per_batch]
            batches.append(batch)
        
        logger.info(f"将 {len(frames)} 帧分割为 {len(batches)} 个批次")
        return batches
    
    def split_video_by_scenes(self, video_path: str, threshold: float = 30.0, output_dir: str = None) -> List[str]:
        """
        使用PySceneDetect将视频按场景切割成多个片段
        Args:
            video_path: 输入视频路径
            threshold: 场景检测阈值
            output_dir: 输出目录，如果为None则使用临时目录
        Returns:
            切割后的视频片段路径列表
        """
        try:
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            # 创建输出目录
            if output_dir is None:
                output_dir = os.path.join(tempfile.gettempdir(), f"video_splits_{uuid.uuid4().hex[:8]}")
            os.makedirs(output_dir, exist_ok=True)
            
            logger.info(f"开始使用PySceneDetect切割视频: {video_path}")
            logger.info(f"输出目录: {output_dir}, 检测阈值: {threshold}")
            
            # 检测场景
            scene_list = detect(video_path, ContentDetector(threshold=threshold))
            logger.info(f"检测到 {len(scene_list)} 个场景")
            
            if not scene_list:
                logger.warning("未检测到场景切换，将整个视频作为一个片段")
                # 如果没有场景切换，复制整个视频
                output_path = os.path.join(output_dir, "scene_001.mp4")
                import shutil
                shutil.copy2(video_path, output_path)
                return [output_path]
            
            # 使用PySceneDetect的split_video_ffmpeg功能切割视频
            # 注意：split_video_ffmpeg返回的是切割的片段数量，不是文件路径列表
            num_segments = split_video_ffmpeg(
                input_video_path=video_path,
                scene_list=scene_list,
                output_dir=output_dir,
                output_file_template='scene_$SCENE_NUMBER.mp4',
                show_progress=True
            )
            
            # 手动构建文件路径列表
            video_segments = []
            
            # 添加调试信息
            logger.info(f"切分操作返回片段数量: {num_segments}")
            
            # 先扫描输出目录，看看实际生成了哪些文件
            try:
                actual_files = [f for f in os.listdir(output_dir) if f.endswith('.mp4')]
                logger.debug(f"输出目录中的实际文件: {actual_files}")
            except Exception as scan_error:
                logger.error(f"扫描输出目录失败: {scan_error}")
                actual_files = []
            
            # PySceneDetect的文件名格式可能是 scene_1.mp4, scene_2.mp4 等
            for i in range(1, num_segments + 1):
                # 尝试多种可能的文件名格式
                possible_names = [
                    f"scene_{i}.mp4",           # scene_1.mp4
                    f"scene_{i:03d}.mp4",       # scene_001.mp4  
                    f"scene_{i:02d}.mp4",       # scene_01.mp4
                ]
                
                found = False
                for filename in possible_names:
                    segment_path = os.path.join(output_dir, filename)
                    if os.path.exists(segment_path):
                        video_segments.append(segment_path)
                        found = True
                        break
                
                if not found:
                    logger.warning(f"未找到第{i}个片段文件")
            
            # 如果通过索引找不到文件，直接使用扫描到的文件
            if not video_segments and actual_files:
                logger.info("使用扫描到的实际文件列表")
                for filename in sorted(actual_files):  # 排序确保顺序
                    segment_path = os.path.join(output_dir, filename)
                    video_segments.append(segment_path)
            
            logger.info(f"成功切割视频为 {len(video_segments)} 个片段")
            return video_segments
            
        except Exception as e:
            logger.error(f"视频切割失败: {str(e)}")
            return []
    
    def split_video_by_scenes_direct(self, video_path: str, threshold: float = 30.0, output_dir: str = None) -> List[str]:
        """
        直接场景切分方法 - 优化版本，减少不必要的检测日志
        Args:
            video_path: 输入视频路径
            threshold: 场景检测阈值
            output_dir: 输出目录，如果为None则使用临时目录
        Returns:
            切割后的视频片段路径列表
        """
        try:
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            # 创建输出目录
            if output_dir is None:
                output_dir = os.path.join(tempfile.gettempdir(), f"video_splits_{uuid.uuid4().hex[:8]}")
            os.makedirs(output_dir, exist_ok=True)
            
            logger.info(f"直接按场景切分视频: {video_path}")
            logger.info(f"输出目录: {output_dir}, 阈值: {threshold}")
            
            # 直接检测并切分，不单独输出场景数量
            scene_list = detect(video_path, ContentDetector(threshold=threshold))
            
            if not scene_list:
                logger.warning("未检测到场景切换，将整个视频作为一个片段")
                # 如果没有场景切换，复制整个视频
                output_path = os.path.join(output_dir, "scene_001.mp4")
                import shutil
                shutil.copy2(video_path, output_path)
                return [output_path]
            
            # 直接切分视频
            num_segments = split_video_ffmpeg(
                input_video_path=video_path,
                scene_list=scene_list,
                output_dir=output_dir,
                output_file_template='scene_$SCENE_NUMBER.mp4',
                show_progress=True
            )
            
            # 手动构建文件路径列表
            video_segments = []
            
            # 添加调试信息
            logger.info(f"切分操作返回片段数量: {num_segments}")
            logger.info(f"检查输出目录: {output_dir}")
            
            # 先扫描输出目录，看看实际生成了哪些文件
            try:
                actual_files = [f for f in os.listdir(output_dir) if f.endswith('.mp4')]
                logger.info(f"输出目录中的实际文件: {actual_files}")
            except Exception as scan_error:
                logger.error(f"扫描输出目录失败: {scan_error}")
                actual_files = []
            
            # PySceneDetect的文件名格式可能是 scene_1.mp4, scene_2.mp4 等
            for i in range(1, num_segments + 1):
                # 尝试多种可能的文件名格式
                possible_names = [
                    f"scene_{i}.mp4",           # scene_1.mp4
                    f"scene_{i:03d}.mp4",       # scene_001.mp4  
                    f"scene_{i:02d}.mp4",       # scene_01.mp4
                ]
                
                found = False
                for filename in possible_names:
                    segment_path = os.path.join(output_dir, filename)
                    if os.path.exists(segment_path):
                        video_segments.append(segment_path)
                        logger.debug(f"找到片段文件: {filename}")
                        found = True
                        break
                
                if not found:
                    logger.warning(f"未找到第{i}个片段文件，尝试的格式: {possible_names}")
            
            # 如果通过索引找不到文件，直接使用扫描到的文件
            if not video_segments and actual_files:
                logger.info("使用扫描到的实际文件列表")
                for filename in sorted(actual_files):  # 排序确保顺序
                    segment_path = os.path.join(output_dir, filename)
                    video_segments.append(segment_path)
                    
            logger.info(f"视频切分完成，共生成 {len(video_segments)} 个场景片段")
            return video_segments
            
        except Exception as e:
            logger.error(f"视频场景切分失败: {str(e)}")
            return []
    
    def read_video_frames(self, video_path: str, max_frames: int = 8, scene_threshold: float = 30.0) -> Optional[np.ndarray]:
        """
        读取视频帧 - 使用场景切换检测替代原有的均匀采样
        Args:
            video_path: 视频文件路径
            max_frames: 最大帧数（如果场景帧超过此数量，会进行采样）
            scene_threshold: 场景检测阈值
        Returns:
            视频帧数组或None
        """
        try:
            # 使用新的场景检测方法提取帧
            frames = self.extract_scene_change_frames(video_path, threshold=scene_threshold, target_size=224)
            
            if frames is None:
                logger.error("场景检测失败，尝试使用备用方法")
                return self._fallback_frame_extraction(video_path, max_frames)
            
            # 如果提取的帧数超过max_frames，进行均匀采样
            if len(frames) > max_frames:
                logger.info(f"场景帧数 ({len(frames)}) 超过最大限制 ({max_frames})，进行采样")
                indices = np.linspace(0, len(frames) - 1, max_frames, dtype=int)
                frames = frames[indices]
                logger.info(f"采样后保留 {len(frames)} 帧")
            
            logger.info(f"最终提取 {len(frames)} 帧用于分析")
            return frames
            
        except Exception as e:
            logger.error(f"读取视频帧失败: {str(e)}")
            return self._fallback_frame_extraction(video_path, max_frames)
    
    def _fallback_frame_extraction(self, video_path: str, max_frames: int) -> Optional[np.ndarray]:
        """
        备用帧提取方法 - 当场景检测失败时使用均匀采样
        Args:
            video_path: 视频文件路径  
            max_frames: 最大帧数
        Returns:
            视频帧数组或None
        """
        try:
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            logger.info("使用备用方法进行均匀帧采样")
            
            container = av.open(video_path)
            if not container.streams.video:
                raise ValueError("视频文件中没有视频流")
            
            # 获取总帧数
            total_frames = container.streams.video[0].frames
            if total_frames == 0:
                # 如果无法获取总帧数，尝试遍历所有帧
                logger.warning("无法获取总帧数，将遍历所有帧")
                frames = []
                for i, frame in enumerate(container.decode(video=0)):
                    frames.append(frame)
                    if len(frames) >= max_frames * 2:  # 防止内存溢出
                        break
                total_frames = len(frames)
                container.seek(0)  # 重置容器位置
            
            logger.info(f"视频总帧数: {total_frames}")
            
            # 均匀采样帧索引
            if total_frames <= max_frames:
                indices = list(range(total_frames))
            else:
                indices = np.linspace(0, total_frames - 1, max_frames, dtype=int)
            
            logger.info(f"采样帧索引: {indices}")
            
            # 读取帧并调整尺寸
            frames = []
            container.seek(0)
            current_frame_idx = 0
            
            for frame in container.decode(video=0):
                if current_frame_idx in indices:
                    frame_array = frame.to_ndarray(format="rgb24")
                    # 调整到224x224分辨率
                    frame_resized = cv2.resize(frame_array, (224, 224))
                    frames.append(frame_resized)
                current_frame_idx += 1
                
                # 如果已经读取了所有需要的帧，提前退出
                if len(frames) >= len(indices):
                    break
            
            container.close()
            
            if len(frames) == 0:
                raise ValueError("未能读取到任何视频帧")
            
            logger.info(f"备用方法成功读取 {len(frames)} 帧")
            return np.stack(frames)
            
        except Exception as e:
            logger.error(f"备用帧提取失败: {str(e)}")
            return None
    
    async def analyze_video_segments(
        self, 
        video_path: str, 
        prompt: str = "描述这个视频片段的内容",
        max_tokens: int = 200,
        scene_threshold: float = 30.0
    ) -> Dict[str, Any]:
        """
        新的视频分析方法：直接使用视频片段而非图像帧
        Args:
            video_path: 视频文件路径
            prompt: 分析提示词
            max_tokens: 最大生成token数
            scene_threshold: 场景检测阈值
        Returns:
            分析结果字典
        """
        start_time = time.time()
        analysis_id = str(uuid.uuid4())
        
        try:
            logger.info(f"开始新的视频片段分析: {analysis_id}")
            
            # 确保模型已初始化
            if not await self.initialize_model():
                raise RuntimeError("模型初始化失败")
            
            # 切割视频为片段
            video_segments = self.split_video_by_scenes(video_path, threshold=scene_threshold)
            
            if not video_segments:
                raise ValueError("未能切割出任何视频片段")
            
            logger.info(f"成功切割出 {len(video_segments)} 个视频片段")
            
            # 分析每个视频片段
            segment_results = []
            
            for i, segment_path in enumerate(video_segments):
                logger.info(f"分析第 {i+1}/{len(video_segments)} 个片段: {segment_path}")
                
                try:
                    # 使用SmolVLM2处理视频片段
                    result = await self._analyze_single_video_segment(
                        segment_path, 
                        f"第{i+1}个片段: {prompt}",
                        max_tokens
                    )
                    segment_results.append(result)
                    
                except Exception as e:
                    logger.error(f"分析片段 {i+1} 失败: {str(e)}")
                    segment_results.append(f"片段{i+1}分析失败: {str(e)}")
            
            # 使用Ollama总结所有片段结果
            logger.info("开始使用Ollama进行最终总结...")
            combined_results = "\n\n".join([f"片段{i+1}: {result}" for i, result in enumerate(segment_results)])
            
            final_summary = await self._generate_ollama_summary(
                combined_results, 
                "请基于以下视频片段分析结果，生成一个完整的视频内容总结"
            )
            
            # 清理临时文件
            self._cleanup_temp_files(video_segments)
            
            processing_time = time.time() - start_time
            
            result = {
                "analysis_id": analysis_id,
                "status": VideoAnalysisStatus.COMPLETED,
                "result": final_summary,
                "error_message": None,
                "processing_time": processing_time,
                "completed_at": datetime.now(),
                "metadata": {
                    "segments_processed": len(video_segments),
                    "prompt": prompt,
                    "max_tokens": max_tokens,
                    "device": self.device,
                    "method": "video_segments",
                    "scene_threshold": scene_threshold
                }
            }
            
            logger.info(f"视频片段分析完成，用时: {processing_time:.2f}秒")
            self._analysis_cache[analysis_id] = result
            return result
            
        except Exception as e:
            processing_time = time.time() - start_time
            error_message = str(e)
            
            logger.error(f"视频片段分析失败: {error_message}")
            
            result = {
                "analysis_id": analysis_id,
                "status": VideoAnalysisStatus.FAILED,
                "result": None,
                "error_message": error_message,
                "processing_time": processing_time,
                "completed_at": datetime.now(),
                "metadata": {
                    "prompt": prompt,
                    "max_tokens": max_tokens,
                    "method": "video_segments",
                    "scene_threshold": scene_threshold
                }
            }
            
            self._analysis_cache[analysis_id] = result
            return result

    async def _analyze_video_raw(
        self, 
        video_path: str, 
        prompt: str = "视频内容总结",
        max_frames: int = 8,
        max_tokens: int = 200,
        scene_threshold: float = 30.0
    ) -> Dict[str, Any]:
        """
        原始视频内容分析 - 不包含Ollama总结
        Args:
            video_path: 视频文件路径
            prompt: 分析提示词
            max_frames: 每批次最大帧数
            max_tokens: 最大生成token数
            scene_threshold: 场景检测阈值
        Returns:
            分析结果字典
        """
        analysis_id = str(uuid.uuid4())
        start_time = time.time()
        
        try:
            # 确保模型已准备好
            if not await self.ensure_model_ready():
                raise RuntimeError("模型初始化失败")
            
            logger.info(f"开始分析视频: {video_path}")
            logger.info(f"分析ID: {analysis_id}")
            logger.info(f"提示词: {prompt}")
            logger.info(f"场景检测阈值: {scene_threshold}")
            
            # 使用场景检测提取帧
            all_frames = self.extract_scene_change_frames(video_path, threshold=scene_threshold, target_size=224)
            if all_frames is None:
                raise ValueError("无法提取视频帧")
            
            logger.info(f"提取到 {len(all_frames)} 个场景变化帧")
            
            # 检查是否需要分段处理
            if len(all_frames) > 30:
                logger.info(f"帧数 ({len(all_frames)}) 超过30，将进行分段处理")
                return await self._analyze_video_in_batches(
                    all_frames, prompt, analysis_id, start_time, max_tokens
                )
            else:
                # 直接处理
                logger.info(f"帧数 ({len(all_frames)}) 在限制内，直接处理")
                return await self._analyze_single_batch(
                    all_frames, prompt, analysis_id, start_time, max_tokens
                )
            
        except Exception as e:
            processing_time = time.time() - start_time
            error_message = str(e)
            
            logger.error(f"视频分析失败: {error_message}")
            
            result = {
                "analysis_id": analysis_id,
                "status": VideoAnalysisStatus.FAILED,
                "result": None,
                "error_message": error_message,
                "processing_time": processing_time,
                "completed_at": datetime.now(),
                "metadata": {
                    "prompt": prompt,
                    "max_tokens": max_tokens,
                    "device": self.device if hasattr(self, 'device') else None,
                }
            }
            
            self._analysis_cache[analysis_id] = result
            return result
    
    async def _analyze_single_batch(
        self, 
        frames: np.ndarray, 
        prompt: str, 
        analysis_id: str, 
        start_time: float, 
        max_tokens: int
    ) -> Dict[str, Any]:
        """
        分析单个批次的帧
        Args:
            frames: 视频帧数组
            prompt: 分析提示词
            analysis_id: 分析ID
            start_time: 开始时间
            max_tokens: 最大生成token数
        Returns:
            分析结果字典
        """
        try:
            logger.info("使用 VideoChat-Flash 分析视频片段...")
            
            # 使用 VideoChat-Flash 的 chat 方法
            response = self.model.chat(
                self.tokenizer,
                video=frames,  # 直接传入帧数据
                question=prompt,
                generation_config=dict(
                    max_new_tokens=max_tokens,
                    temperature=0.0,
                    top_p=0.1,
                    do_sample=False,
                )
            )
            
            processing_time = time.time() - start_time
            
            result = {
                "analysis_id": analysis_id,
                "status": VideoAnalysisStatus.COMPLETED,
                "result": response,
                "error_message": None,
                "processing_time": processing_time,
                "completed_at": datetime.now(),
                "metadata": {
                    "frames_processed": len(frames),
                    "prompt": prompt,
                    "max_tokens": max_tokens,
                    "device": self.device,
                    "batch_count": 1,
                    "frame_extraction_method": "scene_detection"
                }
            }
            
            logger.info(f"视频分析完成，用时: {processing_time:.2f}秒")
            logger.info(f"分析结果长度: {len(response)} 字符")
            
            # 缓存结果
            self._analysis_cache[analysis_id] = result
            
            return result
            
        except Exception as e:
            raise RuntimeError(f"单批次分析失败: {str(e)}")
    
    async def _analyze_video_in_batches(
        self, 
        all_frames: np.ndarray, 
        prompt: str, 
        analysis_id: str, 
        start_time: float, 
        max_tokens: int
    ) -> Dict[str, Any]:
        """
        分批次分析视频帧
        Args:
            all_frames: 所有视频帧
            prompt: 分析提示词
            analysis_id: 分析ID
            start_time: 开始时间
            max_tokens: 每批次最大生成token数
        Returns:
            合并后的分析结果字典
        """
        try:
            # 分割为批次
            frame_batches = self.split_frames_into_batches(all_frames, max_frames_per_batch=10)
            batch_results = []
            
            logger.info(f"开始分批次处理 {len(frame_batches)} 个批次")
            
            for i, batch_frames in enumerate(frame_batches):
                logger.info(f"处理第 {i+1}/{len(frame_batches)} 批次，包含 {len(batch_frames)} 帧")
                
                # 为每个批次构建专门的提示词
                batch_prompt = f"这是视频的第{i+1}部分（共{len(frame_batches)}部分）。{prompt}"
                
                try:
                    # 使用 VideoChat-Flash 的 chat 方法
                    response = self.model.chat(
                        self.tokenizer,
                        video=batch_frames,
                        question=batch_prompt,
                        generation_config=dict(
                            max_new_tokens=max_tokens,
                            temperature=0.0,
                            top_p=0.1,
                            do_sample=False,
                        )
                    )
                    
                    batch_results.append(f"第{i+1}部分: {response}")
                    logger.info(f"第 {i+1} 批次处理完成，结果长度: {len(response)} 字符")
                    
                except Exception as batch_error:
                    error_msg = f"第{i+1}批次处理失败: {str(batch_error)}"
                    logger.error(error_msg)
                    batch_results.append(f"第{i+1}部分: 处理失败 - {error_msg}")
            
            # 合并所有批次的结果
            combined_result = "\n\n".join(batch_results)
            
            # 生成最终总结
            summary_prompt = f"请基于以下分段分析结果，生成一个完整的视频内容总结：\n\n{combined_result}"
            
            # 对合并结果进行总结（可选，如果结果太长）
            if len(combined_result) > 2000:  # 如果合并结果太长，生成总结
                logger.info("合并结果较长，生成最终总结...")
                
                try:
                    # 使用 VideoChat-Flash 生成总结（仅文本）
                    summary_response = self.model.chat(
                        self.tokenizer,
                        video=None,  # 总结阶段不需要视频
                        question=summary_prompt,
                        generation_config=dict(
                            max_new_tokens=max_tokens,
                            temperature=0.0,
                            top_p=0.1,
                            do_sample=False,
                        )
                    )
                    
                    final_result = f"{summary_response}\n\n--- 详细分段分析 ---\n{combined_result}"
                    
                except Exception as summary_error:
                    logger.error(f"总结生成失败: {str(summary_error)}")
                    final_result = combined_result
            else:
                final_result = combined_result
            
            processing_time = time.time() - start_time
            
            result = {
                "analysis_id": analysis_id,
                "status": VideoAnalysisStatus.COMPLETED,
                "result": final_result,
                "error_message": None,
                "processing_time": processing_time,
                "completed_at": datetime.now(),
                "metadata": {
                    "frames_processed": len(all_frames),
                    "prompt": prompt,
                    "max_tokens": max_tokens,
                    "device": self.device,
                    "batch_count": len(frame_batches),
                    "frame_extraction_method": "scene_detection"
                }
            }
            
            logger.info(f"分批次视频分析完成，用时: {processing_time:.2f}秒")
            logger.info(f"总计处理 {len(frame_batches)} 个批次，最终结果长度: {len(final_result)} 字符")
            
            # 缓存结果
            self._analysis_cache[analysis_id] = result
            
            return result
            
        except Exception as e:
            raise RuntimeError(f"分批次分析失败: {str(e)}")
    
    def get_analysis_result(self, analysis_id: str) -> Optional[Dict[str, Any]]:
        """获取分析结果"""
        return self._analysis_cache.get(analysis_id)
    
    def get_all_analyses(self) -> List[Dict[str, Any]]:
        """获取所有分析结果"""
        return list(self._analysis_cache.values())
    
    def clear_cache(self) -> None:
        """清空缓存"""
        self._analysis_cache.clear()
        logger.info("分析结果缓存已清空")
    
    def download_video_from_url(self, video_url: str) -> str:
        """
        从URL下载视频文件
        Args:
            video_url: 视频URL
        Returns:
            本地文件路径
        """
        try:
            logger.info(f"正在下载视频: {video_url}")
            
            # 解析URL获取文件名
            parsed_url = urlparse(video_url)
            filename = os.path.basename(parsed_url.path)
            if not filename or '.' not in filename:
                filename = f"video_{uuid.uuid4().hex[:8]}.mp4"
            
            # 创建临时文件路径
            temp_path = os.path.join(settings.TEMP_DIR, filename)
            
            # 下载文件
            response = requests.get(video_url, stream=True, timeout=30)
            response.raise_for_status()
            
            with open(temp_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
            
            logger.info(f"视频下载完成: {temp_path}")
            return temp_path
            
        except Exception as e:
            logger.error(f"视频下载失败: {str(e)}")
            raise ValueError(f"无法下载视频: {str(e)}")
    
    async def predict_video(
        self,
        video_path_or_url: str,
        prompt: str,
        scene_threshold: float = 30.0,
        max_tokens: int = 200
    ) -> Dict[str, Any]:
        """
        预测视频内容（支持URL和本地文件，基于场景检测采样）
        Args:
            video_path_or_url: 视频路径或URL
            prompt: 分析提示词
            scene_threshold: 场景检测阈值
            max_tokens: 最大生成token数
        Returns:
            预测结果字典
        """
        start_time = time.time()
        temp_file_path = None
        
        try:
            # 确保模型已准备好
            if not await self.ensure_model_ready():
                raise RuntimeError("模型初始化失败")
            
            logger.info(f"开始预测视频: {video_path_or_url}")
            
            # 判断是URL还是本地路径
            if video_path_or_url.startswith(('http://', 'https://')):
                # 从URL下载视频
                video_path = self.download_video_from_url(video_path_or_url)
                temp_file_path = video_path  # 记录临时文件路径，用于清理
            else:
                # 使用本地路径
                video_path = video_path_or_url
            
            # 使用场景检测提取帧
            frames = self.extract_scene_change_frames(video_path, threshold=scene_threshold, target_size=224)
            if frames is None:
                raise ValueError("无法提取视频帧")
            
            logger.info(f"提取到 {len(frames)} 个场景变化帧")
            
            # 执行视频预测
            if len(frames) > 30:
                logger.info(f"帧数 ({len(frames)}) 超过30，将进行分段处理")
                prediction_result = await self._predict_video_in_batches(frames, prompt, max_tokens, start_time)
            else:
                # 直接处理
                prediction_result = await self._predict_single_batch(frames, prompt, max_tokens, start_time)
            
            # 清理GPU内存
            logger.info("清理GPU内存...")
            self.cleanup_gpu_memory()
            
            # 默认使用Ollama生成总结
            if prediction_result.get("output"):
                logger.info("使用Ollama生成预测总结...")
                summary_result = await self.summarize_with_ollama(
                    video_analysis_result=prediction_result["output"],
                    custom_prompt=f"请总结以下视频预测结果：{prompt}"
                )
                
                # 将总结结果添加到预测结果中
                prediction_result["ollama_summary"] = summary_result
                prediction_result["ollama_enabled"] = True
                prediction_result["ollama_model"] = self.ollama_model
                
                logger.info("Ollama总结完成")
            else:
                prediction_result["ollama_summary"] = None
                prediction_result["ollama_enabled"] = False
            
            return prediction_result
            
        except Exception as e:
            processing_time = time.time() - start_time
            error_message = str(e)
            
            logger.error(f"视频预测失败: {error_message}")
            raise RuntimeError(f"视频预测失败: {error_message}")
            
        finally:
            # 清理临时文件
            if temp_file_path and os.path.exists(temp_file_path):
                try:
                    os.remove(temp_file_path)
                    logger.info(f"临时文件已清理: {temp_file_path}")
                except Exception as e:
                    logger.warning(f"清理临时文件失败: {e}")
    
    async def _predict_single_batch(
        self, 
        frames: np.ndarray, 
        prompt: str, 
        max_tokens: int, 
        start_time: float
    ) -> Dict[str, Any]:
        """
        预测单个批次的帧
        Args:
            frames: 视频帧数组
            prompt: 分析提示词
            max_tokens: 最大生成token数
            start_time: 开始时间
        Returns:
            预测结果字典
        """
        logger.info("使用 VideoChat-Flash 预测视频内容...")
        
        # 使用 VideoChat-Flash 的 chat 方法
        response = self.model.chat(
            self.tokenizer,
            video=frames,
            question=prompt,
            generation_config=dict(
                max_new_tokens=max_tokens,
                temperature=0.0,
                top_p=0.1,
                do_sample=False,
            )
        )
        
        processing_time = time.time() - start_time
        
        result = {
            "output": response,
            "processing_time": processing_time,
            "frames_processed": len(frames),
            "frame_extraction_method": "scene_detection"
        }
        
        logger.info(f"视频预测完成，用时: {processing_time:.2f}秒")
        logger.info(f"处理帧数: {len(frames)}, 结果长度: {len(response)} 字符")
        
        return result
    
    async def _predict_video_in_batches(
        self, 
        all_frames: np.ndarray, 
        prompt: str, 
        max_tokens: int, 
        start_time: float
    ) -> Dict[str, Any]:
        """
        分批次预测视频内容
        Args:
            all_frames: 所有视频帧
            prompt: 分析提示词
            max_tokens: 每批次最大生成token数
            start_time: 开始时间
        Returns:
            合并后的预测结果字典
        """
        # 分割为批次
        frame_batches = self.split_frames_into_batches(all_frames, max_frames_per_batch=30)
        batch_results = []
        
        logger.info(f"开始分批次处理 {len(frame_batches)} 个批次")
        
        for i, batch_frames in enumerate(frame_batches):
            logger.info(f"处理第 {i+1}/{len(frame_batches)} 批次，包含 {len(batch_frames)} 帧")
            
            # 为每个批次构建专门的提示词
            batch_prompt = f"这是视频的第{i+1}部分（共{len(frame_batches)}部分）。{prompt}"
            
            try:
                # 使用 VideoChat-Flash 的 chat 方法
                response = self.model.chat(
                    self.tokenizer,
                    video=batch_frames,
                    question=batch_prompt,
                    generation_config=dict(
                        max_new_tokens=max_tokens,
                        temperature=0.0,
                        top_p=0.1,
                        do_sample=False,
                    )
                )
                
                batch_results.append(f"第{i+1}部分: {response}")
                logger.info(f"第 {i+1} 批次处理完成，结果长度: {len(response)} 字符")
                
            except Exception as batch_error:
                error_msg = f"第{i+1}批次处理失败: {str(batch_error)}"
                logger.error(error_msg)
                batch_results.append(f"第{i+1}部分: 处理失败 - {error_msg}")
        
        # 合并所有批次的结果
        combined_result = "\n\n".join(batch_results)
        
        processing_time = time.time() - start_time
        
        result = {
            "output": combined_result,
            "processing_time": processing_time,
            "frames_processed": len(all_frames),
            "batch_count": len(frame_batches),
            "frame_extraction_method": "scene_detection"
        }
        
        logger.info(f"分批次视频预测完成，用时: {processing_time:.2f}秒")
        logger.info(f"总计处理 {len(frame_batches)} 个批次，最终结果长度: {len(combined_result)} 字符")
        
        return result
    
    async def force_reinitialize_model(self) -> bool:
        """
        强制重新初始化模型
        用于模型损坏或需要重新加载的情况
        """
        logger.info("强制重新初始化模型...")
        
        async with self._initialization_lock:
            # 清除现有模型
            if self.model is not None:
                del self.model
                self.model = None
                
            if self.tokenizer is not None:
                del self.tokenizer
                self.tokenizer = None
                
            if self.image_processor is not None:
                del self.image_processor
                self.image_processor = None
                
            self.model_loaded = False
            self._initializing = False
            
            # 强制垃圾回收
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            logger.info("现有模型已清除，开始重新初始化...")
            
        # 重新初始化
        return await self.initialize_model()
    
    def is_model_ready(self) -> bool:
        """
        检查模型是否完全准备好
        进行更严格的检查
        """
        return (
            self.model_loaded and 
            self.model is not None and 
            self.tokenizer is not None and
            self.image_processor is not None and
            self.device is not None and
            not self._initializing
        )
    
    async def ensure_model_ready(self) -> bool:
        """
        确保模型已准备好，如果没有则初始化
        这是一个便捷方法，在所有需要模型的操作前调用
        """
        if self.is_model_ready():
            return True
        
        logger.info("模型未准备好，正在初始化...")
        return await self.initialize_model()
    
    async def analyze_video(
        self, 
        video_path: str, 
        prompt: str = "视频内容总结",
        max_frames: int = 8,
        max_tokens: int = 200,
        scene_threshold: float = 30.0,
        custom_summary_prompt: Optional[str] = None,
        use_keyframe_reconstruction: bool = False,
        use_direct_split: bool = False  # 新增参数
    ) -> Dict[str, Any]:
        """
        分析视频内容 - 支持三种方法：传统方法、关键帧重构方法和直接场景切分方法
        Args:
            video_path: 视频文件路径
            prompt: 分析提示词
            max_frames: 每批次最大帧数（仅传统方法使用）
            max_tokens: 最大生成token数
            scene_threshold: 场景检测阈值
            custom_summary_prompt: 自定义总结提示词
            use_keyframe_reconstruction: 是否使用关键帧重构方法
            use_direct_split: 是否使用直接场景切分模式（测试功能，仅在关键帧重构模式下生效）
        Returns:
            包含原始分析和Ollama总结的结果字典
        """
        if use_keyframe_reconstruction:
            # 使用关键帧重构方法（可选择直接场景切分模式）
            return await self.analyze_video_with_keyframe_reconstruction(
                video_path_or_url=video_path,
                prompt=prompt,
                scene_threshold=scene_threshold,
                max_tokens=max_tokens,
                custom_summary_prompt=custom_summary_prompt,
                use_direct_split=use_direct_split  # 传递新参数
            )
        else:
            # 使用传统方法
            return await self._analyze_video_traditional(
                video_path=video_path,
                prompt=prompt,
                max_frames=max_frames,
                max_tokens=max_tokens,
                scene_threshold=scene_threshold,
                custom_summary_prompt=custom_summary_prompt
            )
    
    async def _analyze_video_traditional(
        self, 
        video_path: str, 
        prompt: str = "视频内容总结",
        max_frames: int = 8,
        max_tokens: int = 200,
        scene_threshold: float = 30.0,
        custom_summary_prompt: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        传统的视频分析方法 - 默认使用Ollama生成总结
        """
        logger.info("开始传统视频分析与Ollama总结流程...")
        
        # 1. 执行原始视频分析
        logger.info("步骤1: 执行视频内容分析...")
        analysis_result = await self._analyze_video_raw(
            video_path=video_path,
            prompt=prompt,
            max_frames=max_frames,
            max_tokens=max_tokens,
            scene_threshold=scene_threshold
        )
        
        # 如果分析失败，直接返回
        if analysis_result["status"] != VideoAnalysisStatus.COMPLETED:
            logger.error("视频分析失败，跳过Ollama总结")
            return analysis_result
        
        # 2. 清理GPU内存
        logger.info("步骤2: 清理GPU内存...")
        self.cleanup_gpu_memory()
        
        # 3. 默认使用Ollama生成总结
        if analysis_result["result"]:
            logger.info("步骤3: 使用Ollama生成视频总结...")
            
            summary_result = await self.summarize_with_ollama(
                video_analysis_result=analysis_result["result"],
                custom_prompt=custom_summary_prompt
            )
            
            # 将总结结果添加到原始分析结果中
            analysis_result["ollama_summary"] = summary_result
            analysis_result["metadata"]["ollama_enabled"] = True
            analysis_result["metadata"]["ollama_model"] = self.ollama_model
            
        else:
            logger.warning("分析结果为空，跳过Ollama总结")
            analysis_result["ollama_summary"] = None
            analysis_result["metadata"]["ollama_enabled"] = False
        
        logger.info("传统视频分析与Ollama总结流程完成")
        return analysis_result
    
    def get_initialization_status(self) -> Dict[str, Any]:
        """获取详细的初始化状态信息"""
        return {
            "model_loaded": self.model_loaded,
            "model_exists": self.model is not None,
            "processor_exists": self.tokenizer is not None and self.image_processor is not None,
            "device_set": self.device is not None,
            "currently_initializing": self._initializing,
            "ready": self.is_model_ready(),
        }
    
    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        initialization_status = self.get_initialization_status()
        
        return {
            "model_id": settings.MODEL_ID,
            "model_loaded": self.model_loaded,
            "model_ready": self.is_model_ready(),
            "device": getattr(self, 'device', None),
            "cuda_available": torch.cuda.is_available(),
            "gpu_count": torch.cuda.device_count() if torch.cuda.is_available() else 0,
            "initialization_status": initialization_status,
        }
    
    def cleanup_gpu_memory(self) -> None:
        """
        清理GPU内存
        在视频识别完成后调用，为Ollama LLM释放内存
        """
        logger.info("正在清理GPU内存...")
        
        if torch.cuda.is_available():
            # 清空CUDA缓存
            torch.cuda.empty_cache()
            
            # 获取内存使用情况
            memory_allocated = torch.cuda.memory_allocated()
            memory_reserved = torch.cuda.memory_reserved()
            
            logger.info(f"GPU内存清理完成 - 已分配: {memory_allocated / 1024**2:.2f}MB, 已保留: {memory_reserved / 1024**2:.2f}MB")
            
            # 等待1秒让GPU内存释放完全生效
            time.sleep(1.0)
        else:
            logger.info("CUDA不可用，跳过GPU内存清理")
    
    async def _generate_ollama_summary(self, content: str, custom_prompt: str = None) -> str:
        """
        使用Ollama生成总结 - 简化版本，直接返回总结文本
        Args:
            content: 要总结的内容
            custom_prompt: 自定义提示词
        Returns:
            总结文本
        """
        try:
            summary_result = await self.summarize_with_ollama(
                video_analysis_result=content,
                custom_prompt=custom_prompt
            )
            
            if summary_result["status"] == "success":
                return summary_result["summary"]
            else:
                logger.error(f"Ollama总结失败: {summary_result.get('error')}")
                return f"总结失败: {summary_result.get('error', '未知错误')}"
                
        except Exception as e:
            logger.error(f"Ollama总结异常: {str(e)}")
            return f"总结异常: {str(e)}"

    async def summarize_with_ollama(self, video_analysis_result: str, custom_prompt: Optional[str] = None) -> Dict[str, Any]:
        """
        使用Ollama本地LLM总结视频分析结果
        Args:
            video_analysis_result: 视频分析的原始结果
            custom_prompt: 自定义提示词
        Returns:
            总结结果字典
        """
        start_time = time.time()
        
        try:
            # 构建总结提示词
            if custom_prompt:
                prompt = custom_prompt
            else:
                prompt = f"""请基于以下视频分析结果，生成一个简洁明了的总结：

视频分析结果：
{video_analysis_result}

请提供：
1. 视频的主要内容概述
2. 关键场景和重要细节
3. 整体印象和特点

请用中文回答，保持简洁但全面。"""
            
            logger.info("正在调用Ollama进行视频总结...")
            logger.info(f"使用模型: {self.ollama_model}")
            
            # 准备请求数据
            ollama_data = {
                "model": self.ollama_model,
                "prompt": prompt,
                "stream": False,
                "options": {
                    "temperature": 0.7,
                    "top_p": 0.9,
                    "max_tokens": 1000
                }
            }
            
            # 发送请求到Ollama
            response = requests.post(
                f"{self.ollama_base_url}/api/generate",
                json=ollama_data,
                timeout=120  # 2分钟超时
            )
            
            if response.status_code == 200:
                result = response.json()
                summary = result.get("response", "").strip()
                
                processing_time = time.time() - start_time
                
                logger.info(f"Ollama总结完成，用时: {processing_time:.2f}秒")
                logger.info(f"总结长度: {len(summary)} 字符")
                
                return {
                    "summary": summary,
                    "original_analysis": video_analysis_result,
                    "processing_time": processing_time,
                    "model_used": self.ollama_model,
                    "status": "success"
                }
            else:
                error_msg = f"Ollama请求失败，状态码: {response.status_code}, 响应: {response.text}"
                logger.error(error_msg)
                
                return {
                    "summary": None,
                    "original_analysis": video_analysis_result,
                    "processing_time": time.time() - start_time,
                    "model_used": self.ollama_model,
                    "status": "failed",
                    "error": error_msg
                }
        
        except requests.exceptions.Timeout:
            error_msg = "Ollama请求超时"
            logger.error(error_msg)
            
            return {
                "summary": None,
                "original_analysis": video_analysis_result,
                "processing_time": time.time() - start_time,
                "model_used": self.ollama_model,
                "status": "failed",
                "error": error_msg
            }
        
        except Exception as e:
            error_msg = f"Ollama总结失败: {str(e)}"
            logger.error(error_msg)
            
            return {
                "summary": None,
                "original_analysis": video_analysis_result,
                "processing_time": time.time() - start_time,
                "model_used": self.ollama_model,
                "status": "failed",
                "error": error_msg
            }
    
    async def analyze_video_with_keyframe_reconstruction(
        self,
        video_path_or_url: str,
        prompt: str = "请分析这个视频片段的内容",
        scene_threshold: float = 30.0,
        max_tokens: int = 200,
        custom_summary_prompt: Optional[str] = None,
        fps_for_keyframe_video: int = 1,
        max_frames_per_segment: int = 40,
        cleanup_temp_files: bool = True,
        use_direct_split: bool = False  # 新增参数：是否使用直接场景切分模式
    ) -> Dict[str, Any]:
        """
        使用关键帧重构方法分析视频：
        
        标准模式 (use_direct_split=False):
        1. 提取关键帧
        2. 拼接成1fps视频
        3. 按帧数切割成多个片段
        4. 用SmolVLM2分析每个片段
        5. 合并分析结果
        
        直接切分模式 (use_direct_split=True) - 测试用:
        1. 直接按场景切分原视频
        2. 用SmolVLM2分析每个场景片段
        3. 合并分析结果
        
        Args:
            video_path_or_url: 视频路径或URL
            prompt: 分析提示词
            scene_threshold: 场景检测阈值
            max_tokens: 每个片段最大生成token数
            custom_summary_prompt: 自定义总结提示词
            fps_for_keyframe_video: 关键帧视频的帧率（默认1fps，即1秒一帧）
            max_frames_per_segment: 每个片段最大帧数（仅标准模式使用）
            cleanup_temp_files: 是否清理临时文件
            use_direct_split: 是否使用直接场景切分模式（测试功能）
        Returns:
            分析结果字典
        """
        start_time = time.time()
        temp_files_to_cleanup = []
        analysis_id = str(uuid.uuid4())
        
        try:
            # 确保模型已准备好
            if not await self.ensure_model_ready():
                raise RuntimeError("模型初始化失败")
            
            mode_name = "直接场景切分模式" if use_direct_split else "关键帧重构模式"
            logger.info(f"开始{mode_name}视频分析流程: {video_path_or_url}")
            
            # 步骤1: 处理输入（URL下载或本地文件）
            if video_path_or_url.startswith(('http://', 'https://')):
                video_path = self.download_video_from_url(video_path_or_url)
                temp_files_to_cleanup.append(video_path)
            else:
                video_path = video_path_or_url
            
            # 根据模式选择不同的处理流程
            if use_direct_split:
                # 直接场景切分模式（测试用）
                logger.info("步骤1: 直接按场景切分原视频...")
                video_segments = self.split_video_by_scenes_direct(
                    video_path, 
                    threshold=scene_threshold
                )
                
                if not video_segments:
                    raise ValueError("视频场景切分失败")
                
                # 记录临时文件用于清理
                temp_files_to_cleanup.extend(video_segments)
                if video_segments:
                    segment_dir = os.path.dirname(video_segments[0])
                    temp_files_to_cleanup.append(segment_dir)
                
                logger.info(f"切分为 {len(video_segments)} 个场景片段")
                
            else:
                # 标准关键帧重构模式
                # 步骤2: 提取关键帧
                logger.info("步骤1: 提取关键帧...")
                keyframes = self.extract_scene_change_frames(
                    video_path, 
                    threshold=scene_threshold, 
                    target_size=224
                )
                
                if keyframes is None or len(keyframes) == 0:
                    raise ValueError("无法提取关键帧")
                
                logger.info(f"提取到 {len(keyframes)} 个关键帧")
                
                # 步骤3: 拼接关键帧为1fps视频
                logger.info("步骤2: 拼接关键帧为视频...")
                keyframe_video_dir = os.path.join(settings.TEMP_DIR, f"keyframe_video_{analysis_id}")
                keyframe_video_path = os.path.join(keyframe_video_dir, "keyframes_1fps.mp4")
                
                if not self.create_video_from_keyframes(
                    keyframes, 
                    keyframe_video_path, 
                    fps=fps_for_keyframe_video
                ):
                    raise ValueError("关键帧视频拼接失败")
                
                temp_files_to_cleanup.append(keyframe_video_path)
                temp_files_to_cleanup.append(keyframe_video_dir)
                
                # 步骤4: 按帧数切割视频
                logger.info("步骤3: 切割视频为多个片段...")
                segment_dir = os.path.join(settings.TEMP_DIR, f"segments_{analysis_id}")
                video_segments = self.split_video_into_segments(
                    keyframe_video_path,
                    max_frames_per_segment=max_frames_per_segment,
                    output_dir=segment_dir
                )
                
                if not video_segments:
                    raise ValueError("视频切割失败")
                
                temp_files_to_cleanup.extend(video_segments)
                temp_files_to_cleanup.append(segment_dir)
                
                logger.info(f"切割为 {len(video_segments)} 个片段")
            
            # 分析每个视频片段
            step_name = "步骤2: 分析每个场景片段..." if use_direct_split else "步骤4: 分析每个视频片段..."
            logger.info(step_name)
            segment_analyses = []
            
            for i, segment_path in enumerate(video_segments):
                logger.info(f"分析片段 {i+1}/{len(video_segments)}: {os.path.basename(segment_path)}")
                
                # 根据模式调整提示词
                if use_direct_split:
                    segment_prompt = f"这是视频的第{i+1}个场景（共{len(video_segments)}个场景）。{prompt}"
                else:
                    segment_prompt = f"{prompt}"
                
                try:
                    # 分析单个片段 - 都使用直接视频路径模式
                    segment_result = await self._analyze_single_video_segment(
                        segment_path, 
                        segment_prompt, 
                        max_tokens,
                        use_scene_detection=False  # 两种模式都直接使用视频文件
                    )
                    
                    segment_analyses.append({
                        "segment_number": i + 1,
                        "segment_path": os.path.basename(segment_path),
                        "analysis": segment_result,
                        "success": True
                    })
                    
                    logger.info(f"片段 {i+1} 分析完成，结果长度: {len(segment_result)} 字符")
                except Exception as segment_error:
                    logger.error(f"片段 {i+1} 分析失败: {str(segment_error)}")
                    segment_analyses.append({
                        "segment_number": i + 1,
                        "segment_path": os.path.basename(segment_path),
                        "analysis": f"分析失败: {str(segment_error)}",
                        "success": False
                    })
            
            # 合并分析结果
            step_name = "步骤3: 合并场景分析结果..." if use_direct_split else "步骤5: 合并片段分析结果..."
            logger.info(step_name)
            successful_analyses = [seg for seg in segment_analyses if seg["success"]]
            failed_analyses = [seg for seg in segment_analyses if not seg["success"]]
            
            if not successful_analyses:
                raise ValueError("所有片段分析都失败了")
            
            # 构建合并结果
            combined_analysis = []
            for segment in successful_analyses:
                combined_analysis.append(
                    f"片段 {segment['segment_number']}: {segment['analysis']}"
                )
            
            combined_result = "\n\n".join(combined_analysis)
            
            # 添加失败信息（如果有）
            if failed_analyses:
                failed_info = [f"片段 {seg['segment_number']}: {seg['analysis']}" for seg in failed_analyses]
                combined_result += f"\n\n--- 失败的片段 ---\n" + "\n".join(failed_info)
            
            processing_time = time.time() - start_time
            
            # 构建结果
            result = {
                "analysis_id": analysis_id,
                "status": VideoAnalysisStatus.COMPLETED,
                "result": combined_result,
                "error_message": None,
                "processing_time": processing_time,
                "completed_at": datetime.now(),
                "metadata": {
                    "keyframes_extracted": 0 if use_direct_split else (len(keyframes) if 'keyframes' in locals() else 0),
                    "segments_created": len(video_segments),
                    "segments_analyzed_successfully": len(successful_analyses),
                    "segments_failed": len(failed_analyses),
                    "prompt": prompt,
                    "max_tokens": max_tokens,
                    "device": self.device,
                    "fps_for_keyframe_video": fps_for_keyframe_video,
                    "max_frames_per_segment": max_frames_per_segment,
                    "scene_threshold": scene_threshold,
                    "method": "direct_scene_split" if use_direct_split else "keyframe_reconstruction",
                    "use_direct_split": use_direct_split
                }
            }
            
            # 步骤7: 使用Ollama生成总结（可选）
            if combined_result:
                logger.info("步骤6: 使用Ollama生成总结...")
                
                # 清理GPU内存为Ollama让路
                self.cleanup_gpu_memory()
                
                summary_result = await self.summarize_with_ollama(
                    video_analysis_result=combined_result,
                    custom_prompt=custom_summary_prompt or "请基于以下分段分析结果，生成视频内容的完整总结"
                )
                
                result["ollama_summary"] = summary_result
                result["metadata"]["ollama_enabled"] = True
                result["metadata"]["ollama_model"] = self.ollama_model
                
                logger.info("Ollama总结完成")
            else:
                result["ollama_summary"] = None
                result["metadata"]["ollama_enabled"] = False
            
            # 缓存结果
            self._analysis_cache[analysis_id] = result
            
            mode_display = "直接场景切分" if use_direct_split else "关键帧重构"
            logger.info(f"{mode_display}视频分析完成，总用时: {processing_time:.2f}秒")
            logger.info(f"成功分析 {len(successful_analyses)}/{len(video_segments)} 个片段")
            
            return result
            
        except Exception as e:
            processing_time = time.time() - start_time
            error_message = str(e)
            
            logger.error(f"关键帧重构视频分析失败: {error_message}")
            
            result = {
                "analysis_id": analysis_id,
                "status": VideoAnalysisStatus.FAILED,
                "result": None,
                "error_message": error_message,
                "processing_time": processing_time,
                "completed_at": datetime.now(),
                "metadata": {
                    "method": "keyframe_reconstruction",
                    "prompt": prompt
                }
            }
            
            self._analysis_cache[analysis_id] = result
            return result
            
        finally:
            # 清理临时文件
            if cleanup_temp_files and temp_files_to_cleanup:
                logger.info("清理临时文件...")
                self._cleanup_temp_files(temp_files_to_cleanup)
    
    async def _analyze_video_raw(
        self, 
        video_path: str, 
        prompt: str = "视频内容总结",
        max_frames: int = 8,
        max_tokens: int = 1000,
        scene_threshold: float = 30.0
    ) -> Dict[str, Any]:
        """
        分析视频内容 - 默认使用Ollama生成总结
        Args:
            video_path: 视频文件路径
            prompt: 分析提示词
            max_frames: 每批次最大帧数
            max_tokens: 最大生成token数
            scene_threshold: 场景检测阈值
        Returns:
            包含原始分析和Ollama总结的结果字典
        """
        logger.info("开始视频分析与Ollama总结流程...")
        
        # 1. 执行原始视频分析
        logger.info("步骤1: 执行视频内容分析...")
        analysis_result = await self._analyze_video_raw(
            video_path=video_path,
            prompt=prompt,
            max_frames=max_frames,
            max_tokens=max_tokens,
            scene_threshold=scene_threshold
        )
        
        # 如果分析失败，直接返回
        if analysis_result["status"] != VideoAnalysisStatus.COMPLETED:
            logger.error("视频分析失败，跳过Ollama总结")
            return analysis_result
        
        # 2. 清理GPU内存
        logger.info("步骤2: 清理GPU内存...")
        self.cleanup_gpu_memory()
        
        # 3. 默认使用Ollama生成总结
        if analysis_result["result"]:
            logger.info("步骤3: 使用Ollama生成视频总结...")
            
            summary_result = await self.summarize_with_ollama(
                video_analysis_result=analysis_result["result"],
                custom_prompt=None
            )
            
            # 将总结结果添加到原始分析结果中
            analysis_result["ollama_summary"] = summary_result
            analysis_result["metadata"]["ollama_enabled"] = True
            analysis_result["metadata"]["ollama_model"] = self.ollama_model
            
        else:
            logger.warning("分析结果为空，跳过Ollama总结")
            analysis_result["ollama_summary"] = None
            analysis_result["metadata"]["ollama_enabled"] = False
        
        logger.info("视频分析与Ollama总结流程完成")
        return analysis_result
    
    def _read_all_video_frames(self, video_path: str, target_size: int = 224) -> Optional[np.ndarray]:
        """
        读取视频中的所有帧（用于关键帧重构方法）
        Args:
            video_path: 视频文件路径
            target_size: 目标分辨率（正方形）
        Returns:
            视频帧数组或None
        """
        try:
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            logger.info(f"读取视频所有帧: {video_path}")
            
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                raise ValueError(f"无法打开视频文件: {video_path}")
            
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            logger.info(f"视频FPS: {fps:.2f}, 总帧数: {total_frames}")
            
            frames = []
            frame_idx = 0
            
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                
                # 转换颜色格式 BGR -> RGB
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                
                # 缩放到目标分辨率
                frame_resized = cv2.resize(frame_rgb, (target_size, target_size))
                frames.append(frame_resized)
                
                frame_idx += 1
            
            cap.release()
            
            if len(frames) == 0:
                raise ValueError("未能读取到任何视频帧")
            
            logger.info(f"成功读取 {len(frames)} 帧")
            return np.stack(frames)
            
        except Exception as e:
            logger.error(f"读取视频所有帧失败: {str(e)}")
            return None

    async def _analyze_single_video_segment(self, segment_path: str, prompt: str, max_tokens: int, use_scene_detection: bool = True) -> str:
        """
        分析单个视频片段
        Args:
            segment_path: 视频片段路径
            prompt: 分析提示词
            max_tokens: 最大生成token数
            use_scene_detection: 是否使用场景检测（关键帧重构时应设为False）
        Returns:
            分析结果文本
        """
        try:
            if use_scene_detection:
                # 传统方法：使用场景检测提取帧
                logger.info("传统模式：使用场景检测提取帧")
                frames = self.read_video_frames(segment_path, max_frames=8, scene_threshold=50.0)
                
                if frames is None or len(frames) == 0:
                    return f"无法读取视频片段: {segment_path}"
                
                # 使用 VideoChat-Flash 分析帧
                response = self.model.chat(
                    self.tokenizer,
                    video=frames,
                    question=prompt,
                    generation_config=dict(
                        max_new_tokens=max_tokens,
                        temperature=0.0,
                        top_p=0.1,
                        do_sample=False,
                    )
                )

                return response
                
            else:
                # 关键帧重构方法：直接使用视频路径
                logger.info("关键帧重构模式：直接使用视频路径")
                
                if not os.path.exists(segment_path):
                    return f"视频文件不存在: {segment_path}"
                
                logger.info(f"直接分析视频文件: {segment_path}")

                # 使用 VideoChat-Flash 直接处理视频文件
                # 注意：VideoChat-Flash 可能需要先提取帧再处理
                frames = self.read_video_frames(segment_path, max_frames=30)
                if frames is None or len(frames) == 0:
                    return f"无法从视频文件提取帧: {segment_path}"
                    
                response = self.model.chat(
                    self.tokenizer,
                    video=frames,
                    question=prompt,
                    generation_config=dict(
                        max_new_tokens=max_tokens,
                        temperature=0.0,
                        top_p=0.1,
                        do_sample=False,
                    )
                )
                
                return response

        except Exception as e:
            logger.error(f"分析视频片段失败 {segment_path}: {str(e)}")
            return f"分析失败: {str(e)}"
    
    def create_video_from_keyframes(self, frames: np.ndarray, output_path: str, fps: int = 1) -> bool:
        """
        将关键帧拼接成视频文件
        Args:
            frames: 关键帧数组 (N, H, W, 3)
            output_path: 输出视频路径
            fps: 帧率，默认1秒一帧
        Returns:
            是否成功创建视频
        """
        try:
            if frames is None or len(frames) == 0:
                logger.error("没有关键帧可以拼接")
                return False
                
            logger.info(f"开始拼接 {len(frames)} 个关键帧为视频，FPS: {fps}")
            
            # 获取帧的尺寸
            height, width, channels = frames[0].shape
            logger.info(f"视频分辨率: {width}x{height}")
            
            # 创建输出目录
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 使用OpenCV创建视频写入器
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
            
            if not video_writer.isOpened():
                logger.error(f"无法创建视频写入器: {output_path}")
                return False
            
            # 写入每一帧
            for i, frame in enumerate(frames):
                # 将RGB转换为BGR (OpenCV使用BGR格式)
                frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                video_writer.write(frame_bgr)
                
                if i % 10 == 0:
                    logger.info(f"已写入 {i+1}/{len(frames)} 帧")
            
            # 释放视频写入器
            video_writer.release()
            
            # 验证输出文件
            if not os.path.exists(output_path):
                logger.error(f"视频文件创建失败: {output_path}")
                return False
                
            file_size = os.path.getsize(output_path)
            logger.info(f"视频拼接完成: {output_path} (大小: {file_size / 1024 / 1024:.2f}MB)")
            return True
            
        except Exception as e:
            logger.error(f"拼接视频失败: {str(e)}")
            return False
    
    def split_video_into_segments(self, video_path: str, max_frames_per_segment: int = 30, output_dir: str = None) -> List[str]:
        """
        将视频按帧数切割成多个片段
        Args:
            video_path: 输入视频路径
            max_frames_per_segment: 每个片段最大帧数（对于1fps视频，30帧=30秒）
            output_dir: 输出目录，如果为None则使用临时目录
        Returns:
            切割后的视频片段路径列表
        """
        try:
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            # 创建输出目录
            if output_dir is None:
                output_dir = os.path.join(settings.TEMP_DIR, f"video_segments_{uuid.uuid4().hex[:8]}")
            os.makedirs(output_dir, exist_ok=True)
            
            logger.info(f"开始切割视频: {video_path}")
            logger.info(f"每段最大帧数: {max_frames_per_segment}, 输出目录: {output_dir}")
            
            # 打开视频文件
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                raise ValueError(f"无法打开视频文件: {video_path}")
            
            # 获取视频信息
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            
            logger.info(f"视频信息 - FPS: {fps}, 总帧数: {total_frames}, 分辨率: {width}x{height}")
            
            # 计算需要的段数
            num_segments = (total_frames + max_frames_per_segment - 1) // max_frames_per_segment
            logger.info(f"将切割为 {num_segments} 个片段")
            
            segment_paths = []
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            
            current_segment = 1
            frame_count_in_segment = 0
            segment_writer = None
            
            frame_number = 0
            
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                
                # 如果需要开始新的片段
                if frame_count_in_segment == 0:
                    # 关闭上一个片段的写入器
                    if segment_writer is not None:
                        segment_writer.release()
                    
                    # 创建新片段的路径
                    segment_filename = f"segment_{current_segment:03d}.mp4"
                    segment_path = os.path.join(output_dir, segment_filename)
                    segment_paths.append(segment_path)
                    
                    # 创建新的视频写入器
                    segment_writer = cv2.VideoWriter(segment_path, fourcc, fps, (width, height))
                    if not segment_writer.isOpened():
                        raise ValueError(f"无法创建视频写入器: {segment_path}")
                    
                    logger.info(f"开始创建片段 {current_segment}: {segment_filename}")
                
                # 写入帧到当前片段
                segment_writer.write(frame)
                frame_count_in_segment += 1
                frame_number += 1
                
                # 检查当前片段是否已满
                if frame_count_in_segment >= max_frames_per_segment:
                    segment_writer.release()
                    segment_writer = None
                    
                    # 验证片段文件
                    if os.path.exists(segment_paths[-1]):
                        file_size = os.path.getsize(segment_paths[-1])
                        logger.info(f"片段 {current_segment} 完成: {file_size / 1024 / 1024:.2f}MB")
                    
                    current_segment += 1
                    frame_count_in_segment = 0
            
            # 关闭最后一个片段的写入器
            if segment_writer is not None:
                segment_writer.release()
                
                # 验证最后一个片段
                if segment_paths and os.path.exists(segment_paths[-1]):
                    file_size = os.path.getsize(segment_paths[-1])
                    logger.info(f"最后片段 {current_segment-1} 完成: {file_size / 1024 / 1024:.2f}MB")
            
            # 关闭输入视频
            cap.release()
            
            logger.info(f"视频切割完成，共创建 {len(segment_paths)} 个片段")
            return segment_paths
            
        except Exception as e:
            logger.error(f"视频切割失败: {str(e)}")
            return []
    
    def _cleanup_temp_files(self, file_paths: List[str]):
        """
        清理临时文件
        Args:
            file_paths: 要清理的文件路径列表
        """
        for file_path in file_paths:
            try:
                if os.path.exists(file_path):
                    os.remove(file_path)
                    logger.debug(f"已清理临时文件: {file_path}")
                    
                # 清理空的目录
                parent_dir = os.path.dirname(file_path)
                if os.path.exists(parent_dir) and not os.listdir(parent_dir):
                    os.rmdir(parent_dir)
                    logger.debug(f"已清理空目录: {parent_dir}")
                    
            except Exception as e:
                logger.warning(f"清理文件失败 {file_path}: {str(e)}")
    
    async def analyze_video_multi_turn(self, video_path: str,
                                        questions: List,
                                        subtitle: str,
                                          max_num_frames: int = 60) -> Dict[str, Any]:
        """
        多轮对话视频分析方法，使用 VideoChat-Flash
        Args:
            video_path: 视频文件路径
            questions: 问题列表，包含 id 和 question 字段
            max_num_frames: 最大帧数
        Returns:
            多轮分析结果字典
        """
        try:
            start_time = time.time()
            
            # 确保模型已初始化
            if not await self.initialize_model():
                raise RuntimeError("模型初始化失败")
            
            # 检查视频文件是否存在
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            logger.info(f"开始多轮对话分析视频: {video_path}")
            logger.info(f"问题数量: {len(questions)}")
            logger.info(f"最大帧数: {max_num_frames}")
            
            answers = []
            chat_history = None  # 用于存储对话历史
            
            # 按ID顺序处理每个问题
            for question_item in sorted(questions, key=lambda x: x.id):
                question_id = question_item.id
                question_text = question_item.question
                
                logger.info(f"处理问题 {question_id}: {question_text}")
                
                try:
                    # 使用 VideoChat-Flash 的 chat 方法
                    if chat_history is None:
                        # 第一次对话，不传入历史记录
                        response, chat_history = self.model.chat(
                            video_path=video_path,
                            tokenizer=self.tokenizer,
                            user_prompt=question_text,
                            return_history=True,  # 返回对话历史
                            max_num_frames=max_num_frames,
                            generation_config=dict(
                                max_new_tokens=1024*8,
                                top_p=0.9,
                                num_beams=1
                            )
                        )
                    else:
                        # 后续对话，传入历史记录
                        response, chat_history = self.model.chat(
                            video_path=video_path,
                            tokenizer=self.tokenizer,
                            user_prompt=question_text,
                            return_history=True,
                            chat_history=chat_history,
                            max_num_frames=max_num_frames,
                            generation_config=dict(
                                max_new_tokens=1024*8,
                                top_p=0.9,
                                num_beams=1
                            )
                        )
                    # 添加答案到结果列表
                    answers.append({
                        "ans_id": question_id,
                        "answer": response
                    })
                    
                    logger.info(f"问题 {question_id} 处理完成，答案长度: {len(response)} 字符")
                    
                except Exception as question_error:
                    error_msg = f"处理问题 {question_id} 失败: {str(question_error)}"
                    logger.error(error_msg)
                    answers.append({
                        "ans_id": question_id,
                        "answer": f"处理失败: {str(question_error)}"
                    })
            
            processing_time = time.time() - start_time
            
            result = {
                "answers": answers,
                "processing_time": processing_time,
                "frames_processed": max_num_frames,
                "total_questions": len(questions)
            }
            
            logger.info(f"多轮对话视频分析完成，用时: {processing_time:.2f}秒")
            logger.info(f"处理了 {len(questions)} 个问题")
            
            return result
            
        except Exception as e:
            logger.error(f"多轮对话视频分析失败: {str(e)}")
            raise RuntimeError(f"多轮对话视频分析失败: {str(e)}")

    async def analyze_video_simple(self, video_path: str, question: str, max_num_frames: int = 60) -> Dict[str, Any]:
        """
        简化的视频分析方法，使用 VideoChat-Flash
        Args:
            video_path: 视频文件路径
            question: 分析问题
            max_num_frames: 最大帧数
        Returns:
            分析结果字典
        """
        try:
            start_time = time.time()
            
            # 确保模型已初始化
            if not await self.initialize_model():
                raise RuntimeError("模型初始化失败")
            
            # 检查视频文件是否存在
            if not os.path.exists(video_path):
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            logger.info(f"开始分析视频: {video_path}")
            logger.info(f"问题: {question}")
            logger.info(f"最大帧数: {max_num_frames}")
            
            # 使用 VideoChat-Flash 的 chat 方法直接分析视频
            response, _ = self.model.chat(
                video_path=video_path,  # 直接传入视频路径
                tokenizer=self.tokenizer,
                user_prompt=question,
                max_num_frames=max_num_frames,
                generation_config=dict(
                    do_sample=False,
                    temperature=0.0,
                    max_new_tokens=1024,
                    top_p=0.1,
                    num_beams=1
                )
            )
            
            processing_time = time.time() - start_time
            
            result = {
                "output": response,
                "processing_time": processing_time,
                "frames_processed": max_num_frames,  # VideoChat-Flash 内部处理的帧数
            }
            
            logger.info(f"视频分析完成，用时: {processing_time:.2f}秒")
            logger.info(f"分析结果长度: {len(response)} 字符")
            
            return result
            
        except Exception as e:
            logger.error(f"简化视频分析失败: {str(e)}")
            raise RuntimeError(f"视频分析失败: {str(e)}")

# 创建全局服务实例
video_service = VideoAnalysisService()