import asyncio
import json
from pathlib import Path
from typing import List, Dict, Optional, Callable, Awaitable, Any
from datetime import datetime
from app.config import settings
from app.utils.logger import logger
from app.services.video_splitter import VideoSplitter
from app.services.qwen_service import QwenVLService
from app.services.internvl_service import InternVLService

class LargeVideoAnalyzer:
    """大视频分析服务"""
    
    def __init__(self):
        self.video_splitter = VideoSplitter()
        self.qwen_service = QwenVLService()
        self.internvl_service = InternVLService()
        self.max_concurrent_analysis = 3  # 最大并发分析数
        
        # 保持配置文件中的超时时间设置，用于视频分析
        # self.qwen_service.timeout 使用配置文件中的 QWEN_TIMEOUT (300秒)
        
    async def analyze_large_video(
        self,
        video_path: Path,
        prompt: str,
        split_method: str = "duration",  # duration, scene, auto
        segment_duration: int = 60,
        max_segments: int = 50,  # 增加最大片段数限制
        analysis_mode: str = "sequential",  # sequential, parallel, hybrid
        min_segment_duration: int = 10,  # 最小片段时长
        segment_prompt_template: Optional[str] = None,
        aggregate_prompt: Optional[str] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None
    ) -> Dict:
        """分析大视频文件"""
        try:
            logger.info(f"开始分析大视频: {video_path}")
            
            # 检查视频文件是否存在
            if not video_path.exists():
                raise FileNotFoundError(f"视频文件不存在: {video_path}")
            
            logger.info(f"视频文件存在，大小: {video_path.stat().st_size} bytes")
            
            # 1. 检查是否需要拆分
            video_info = await self.video_splitter._get_video_info(video_path)
            logger.info(f"视频信息: {video_info}")
            
            # 如果视频较小，直接分析
            if (video_info['duration'] <= segment_duration and 
                video_info['file_size'] <= settings.QWEN_MAX_VIDEO_SIZE_MB * 1024 * 1024):
                return await self._analyze_single_video(video_path, prompt)
            
            # 2. 拆分视频
            split_result = await self._split_video(
                video_path, split_method, segment_duration, max_segments, min_segment_duration
            )
            
            logger.info(f"拆分结果: need_split={split_result.get('need_split')}, segments_count={len(split_result.get('segments', []))}")
            
            if not split_result['need_split']:
                logger.info("视频无需拆分，直接分析")
                return await self._analyze_single_video(video_path, prompt)
            
            # 3. 分析各个片段（可选进度回调）
            async def _default_progress_cb(event: Dict[str, Any]):
                if event.get("type") == "segment_progress":
                    logger.info(
                        "片段实时进度: index=%s status=%s",
                        event.get("payload", {}).get("segment_index"),
                        event.get("status")
                    )

            cb = progress_callback or _default_progress_cb

            analysis_results = await self._analyze_segments(
                split_result['segments'],
                prompt,
                analysis_mode,
                segment_prompt_template,
                cb
            )
            
            # 4. 汇总结果
            final_result = await self._merge_analysis_results(
                analysis_results, split_result, prompt, aggregate_prompt
            )
            
            # 5. 保存完整分析结果
            await self._save_analysis_result(split_result['video_id'], final_result)
            
            logger.info(f"大视频分析完成: {video_path}")
            return final_result
            
        except Exception as e:
            logger.error(f"大视频分析失败: {e}")
            raise Exception(f"大视频分析失败: {str(e)}")
    
    async def _split_video(
        self, 
        video_path: Path, 
        method: str, 
        duration: int, 
        max_segments: int,
        min_duration: int = 10
    ) -> Dict:
        """拆分视频"""
        if method == "scene":
            return await self.video_splitter.split_video_by_scenes(
                video_path, min_segment_duration=min_duration, max_segments=max_segments
            )
        elif method == "duration":
            return await self.video_splitter.split_video_by_duration(
                video_path, duration, max_segments, min_duration
            )
        elif method == "auto":
            # 自动选择：先尝试场景检测，失败则用时长拆分
            try:
                result = await self.video_splitter.split_video_by_scenes(
                    video_path, min_segment_duration=min_duration, max_segments=max_segments
                )
                if result['segment_count'] > 1:
                    return result
            except:
                pass
            return await self.video_splitter.split_video_by_duration(
                video_path, duration, max_segments, min_duration
            )
        else:
            raise ValueError(f"不支持的拆分方法: {method}")
    
    async def _analyze_single_video(self, video_path: Path, prompt: str) -> Dict:
        """分析单个视频"""
        try:
            # 生成视频URL
            filename = video_path.name
            video_url = f"{settings.VIDEO_BASE_URL}/videos/{filename}"
            
            # 尝试使用Qwen3-VL分析
            try:
                result = await self.qwen_service.analyze_video_by_url(video_url, prompt)
            except Exception as qwen_error:
                logger.warning(f"Qwen分析失败，使用备用方案: {qwen_error}")
                # 获取视频基本信息作为备用
                video_info = await self.video_splitter._get_video_info(video_path)
                result = f"视频文件: {filename}\
" \
                        f"时长: {video_info.get('duration', 0):.1f}秒\
" \
                        f"分辨率: {video_info.get('width', 0)}x{video_info.get('height', 0)}\
" \
                        f"帧率: {video_info.get('fps', 0):.1f}fps\
" \
                        f"文件大小: {video_info.get('file_size', 0) / (1024*1024):.1f}MB\
\
" \
                        f"注意: AI服务暂时不可用，无法进行详细分析。\
" \
                        f"原始提示词: {prompt}"
            
            return {
                "type": "single_video",
                "video_path": str(video_path),
                "video_url": video_url,
                "analysis_result": result,
                "prompt": prompt,
                "analyzed_at": datetime.now().isoformat()
            }
            
        except Exception as e:
            logger.error(f"单视频分析失败: {e}")
            raise
    
    async def _analyze_segments(
        self, 
        segments: List[Dict], 
        prompt: str, 
        mode: str,
        segment_prompt_template: Optional[str] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None
    ) -> List[Dict]:
        """分析视频片段"""
        total_len = len(segments)
        if mode == "sequential":
            return await self._analyze_segments_sequential(segments, prompt, segment_prompt_template, progress_callback, global_total=total_len)
        elif mode == "parallel":
            return await self._analyze_segments_parallel(segments, prompt, segment_prompt_template, progress_callback, global_total=total_len)
        elif mode == "hybrid":
            return await self._analyze_segments_hybrid(segments, prompt, segment_prompt_template, progress_callback, global_total=total_len)
        else:
            raise ValueError(f"不支持的分析模式: {mode}")
    
    async def _analyze_segments_sequential(
        self, 
        segments: List[Dict], 
        prompt: str,
        segment_prompt_template: Optional[str] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None,
        global_total: Optional[int] = None
    ) -> List[Dict]:
        """顺序分析片段"""
        results = []
        total_used = global_total if isinstance(global_total, int) and global_total > 0 else len(segments)
        
        for i, segment in enumerate(segments):
            try:
                logger.info(f"分析片段 {i+1}/{total_used}: {segment['filename']}")
                
                # 为每个片段添加上下文信息
                segment_prompt = self._create_segment_prompt(prompt, segment, i, total_used, segment_prompt_template)
                
                # 开始事件（用于前端创建可折叠条目）
                if progress_callback:
                    await progress_callback({
                        "type": "segment_start",
                        "status": "running",
                        "payload": {
                            "segment_index": i,
                            "segment_info": segment,
                            "prompt": segment_prompt,
                            "started_at": datetime.now().isoformat()
                        }
                    })

                # 分析片段
                analysis_result = await self.qwen_service.analyze_video_by_url(
                    segment['url'], segment_prompt
                )
                
                # 统一转字符串后再判断
                if not isinstance(analysis_result, str):
                    try:
                        import json as _json
                        analysis_result = _json.dumps(analysis_result, ensure_ascii=False, indent=2)
                    except Exception:
                        analysis_result = str(analysis_result)
                status = "success" if analysis_result.strip() else "failed"
                # 允许失败时展示明确原因（来自 done_reason 拼入的文本）
                if status == "failed" and analysis_result:
                    pass
                elif status == "failed":
                    analysis_result = ""

                result_payload = {
                    "segment_index": i,
                    "segment_info": segment,
                    "prompt": segment_prompt,
                    "analysis_result": "",
                    "analyzed_at": None,
                    "status": status
                }

                # 流式增量（若无真实流，则按块模拟）
                if progress_callback and isinstance(analysis_result, str) and analysis_result:
                    chunk_size = 120
                    total = len(analysis_result)
                    pos = 0
                    while pos < total:
                        part = analysis_result[pos:pos+chunk_size]
                        pos += chunk_size
                        await progress_callback({
                            "type": "segment_delta",
                            "status": "running",
                            "payload": {
                                "segment_index": i,
                                "delta": part
                            }
                        })
                        await asyncio.sleep(0.02)

                # 最终完成事件
                result_payload["analysis_result"] = analysis_result
                result_payload["analyzed_at"] = datetime.now().isoformat()
                results.append(result_payload)

                if progress_callback:
                    await progress_callback({
                        "type": "segment_progress",
                        "status": status,
                        "payload": result_payload
                    })
                
                # 添加延迟避免过载
                await asyncio.sleep(1)
                
            except Exception as e:
                logger.error(f"片段分析失败 {segment['filename']}: {e}")
                result_payload = {
                    "segment_index": i,
                    "segment_info": segment,
                    "prompt": segment_prompt,
                    "analysis_result": f"分析失败: {str(e)}",
                    "analyzed_at": datetime.now().isoformat(),
                    "status": "failed"
                }
                results.append(result_payload)

                if progress_callback:
                    await progress_callback({
                        "type": "segment_progress",
                        "status": "failed",
                        "payload": result_payload
                    })
        
        return results
    
    async def _analyze_segments_parallel(
        self, 
        segments: List[Dict], 
        prompt: str,
        segment_prompt_template: Optional[str] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None,
        global_total: Optional[int] = None
    ) -> List[Dict]:
        """并行分析片段"""
        semaphore = asyncio.Semaphore(self.max_concurrent_analysis)
        total_used = global_total if isinstance(global_total, int) and global_total > 0 else len(segments)
        
        async def analyze_single_segment(segment: Dict, index: int) -> Dict:
            async with semaphore:
                try:
                    segment_prompt = self._create_segment_prompt(prompt, segment, index, total_used, segment_prompt_template)
                    
                    analysis_result = await self.qwen_service.analyze_video_by_url(
                        segment['url'], segment_prompt
                    )
                    
                    result_payload = {
                        "segment_index": index,
                        "segment_info": segment,
                        "prompt": segment_prompt,
                        "analysis_result": analysis_result,
                        "analyzed_at": datetime.now().isoformat(),
                        "status": "success"
                    }

                    if progress_callback:
                        await progress_callback({
                            "type": "segment_progress",
                            "status": "success",
                            "payload": result_payload
                        })

                    return result_payload
                    
                except Exception as e:
                    logger.error(f"片段分析失败 {segment['filename']}: {e}")
                    result_payload = {
                        "segment_index": index,
                        "segment_info": segment,
                        "prompt": segment_prompt,
                        "analysis_result": f"分析失败: {str(e)}",
                        "analyzed_at": datetime.now().isoformat(),
                        "status": "failed"
                    }

                    if progress_callback:
                        await progress_callback({
                            "type": "segment_progress",
                            "status": "failed",
                            "payload": result_payload
                        })

                    return result_payload
        
        # 并行执行所有分析任务
        tasks = [
            analyze_single_segment(segment, i) 
            for i, segment in enumerate(segments)
        ]
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理异常结果
        processed_results = []
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                processed_results.append({
                    "segment_index": i,
                    "segment_info": segments[i],
                    "analysis_result": f"分析异常: {str(result)}",
                    "analyzed_at": datetime.now().isoformat(),
                    "status": "error"
                })
            else:
                processed_results.append(result)
        
        return processed_results
    
    async def _analyze_segments_hybrid(
        self, 
        segments: List[Dict], 
        prompt: str,
        segment_prompt_template: Optional[str] = None,
        progress_callback: Optional[Callable[[Dict[str, Any]], Awaitable[None]]] = None,
        global_total: Optional[int] = None
    ) -> List[Dict]:
        """混合模式：重要片段并行，其他顺序"""
        # 简化实现：前3个片段并行，其余顺序
        important_segments = segments[:3]
        remaining_segments = segments[3:]
        
        results = []
        
        # 并行分析重要片段
        if important_segments:
            parallel_results = await self._analyze_segments_parallel(important_segments, prompt, segment_prompt_template, global_total=len(segments))
            results.extend(parallel_results)
        
        # 顺序分析剩余片段
        if remaining_segments:
            sequential_results = await self._analyze_segments_sequential(remaining_segments, prompt, segment_prompt_template, global_total=len(segments))
            results.extend(sequential_results)
        
        # 按索引排序
        results.sort(key=lambda x: x['segment_index'])
        return results
    
    def _create_segment_prompt(
        self, 
        base_prompt: str, 
        segment: Dict, 
        index: int, 
        total: int,
        template: Optional[str] = None
    ) -> str:
        """为片段创建上下文化的提示词；支持占位符模板"""
        if template and template.strip():
            t = template
            t = t.replace("{segment_id}", str(index + 1))
            t = t.replace("{segment_index}", str(index))
            t = t.replace("{segment_no}", str(index + 1))
            t = t.replace("{total_segments}", str(total))
            t = t.replace("{start_time}", f"{segment['start_time']:.1f}s")
            t = t.replace("{end_time}", f"{segment['end_time']:.1f}s")
            t = t.replace("{start_sec}", f"{segment['start_time']:.1f}")
            t = t.replace("{end_sec}", f"{segment['end_time']:.1f}")
            t = t.replace("{duration}", f"{segment['duration']:.1f}s")
            t = t.replace("{segment_filename}", segment.get('filename', ''))
            t = t.replace("{segment_url}", segment.get('url', ''))
            t = t.replace("{base_prompt}", base_prompt)
            return t.strip()
        
        time_info = f"时间段: {segment['start_time']:.1f}s - {segment['end_time']:.1f}s"
        position_info = f"片段位置: 第{index+1}段，共{total}段"
        
        segment_prompt = f"""
{base_prompt}

【片段信息】
{position_info}
{time_info}
片段时长: {segment['duration']:.1f}秒

【分析要求】
1. 重点关注这个时间段内的具体内容
2. 描述时请标注大概的时间点
3. 如果是系列片段的一部分，请注意与前后内容的连贯性
4. 提供这个片段的关键信息摘要
"""
        return segment_prompt.strip()
    
    async def _merge_analysis_results(
        self, 
        segment_results: List[Dict], 
        split_info: Dict, 
        original_prompt: str,
        aggregate_prompt: Optional[str] = None
    ) -> Dict:
        """汇总分析结果"""
        try:
            # 收集成功的分析结果
            successful_results = [
                r for r in segment_results 
                if r['status'] == 'success'
            ]
            
            if not successful_results:
                logger.warning("未获得任何成功片段，已返回失败汇总对象以保证流程不中断")
                return {
                    "type": "large_video_analysis",
                    "video_id": split_info['video_id'],
                    "original_video": split_info['original_video'],
                    "total_duration": split_info['total_duration'],
                    "segment_count": len(segment_results),
                    "successful_segments": 0,
                    "failed_segments": len(segment_results),
                    "timeline_summary": [],
                    "detailed_results": segment_results,
                    "overall_summary": "本次分析的所有片段均未得到有效输出，请检查上游视频理解服务（Qwen3‑VL）或调整提示词后重试。",
                    "key_insights": {
                        "key_actions": ["未能从任何片段提取信息"],
                        "important_timestamps": [],
                        "technical_details": [f"拆分方法: {split_info.get('split_method', 'unknown')}", "上游服务返回空响应"],
                        "main_themes": ["分析失败"],
                        "recommendations": ["检查模型服务连通性与日志", "缩短片段时长或降低同时并发", "简化提示词后重试"]
                    },
                    "split_method": split_info.get('split_method', 'duration'),
                    "analyzed_at": datetime.now().isoformat(),
                    "original_prompt": original_prompt
                }
            
            # 构建智能时间轴摘要
            timeline_summary = []
            for result in successful_results:
                segment = result['segment_info']
                
                # 使用AI生成更精炼的片段摘要
                segment_summary = await self._generate_segment_summary(
                    result['analysis_result'], 
                    segment['start_time'], 
                    segment['end_time']
                )
                
                timeline_summary.append({
                    "time_range": f"{segment['start_time']:.1f}s - {segment['end_time']:.1f}s",
                    "duration": f"{segment['duration']:.1f}s",
                    "summary": segment_summary
                })
            
            # 生成整体摘要
            all_content = "\n\n".join([
                f"【{r['segment_info']['start_time']:.1f}s-{r['segment_info']['end_time']:.1f}s】\n{r['analysis_result']}"
                for r in successful_results
            ])
            
            # 使用AI生成整体摘要
            try:
                overall_summary = await self._generate_overall_summary(all_content, aggregate_prompt or original_prompt)
            except Exception as summary_error:
                logger.error(f"整体摘要生成失败: {summary_error}")
                overall_summary = f"""
## 🎯 视频整体概述
基于{len(successful_results)}个片段的分析，视频内容已完成分段处理。

## ⏱️ 关键时间线
请查看下方的详细分段分析结果，了解具体的时间线信息。

## 🔍 重点信息
由于AI摘要生成失败，请参考详细的分段分析结果获取关键信息。

## 📝 总结观点
分析已完成，建议查看完整的分段分析结果以获得全面理解。

**注意：** 摘要生成遇到技术问题，但分段分析结果完整可用。
"""
            
            # 提取关键信息
            try:
                key_insights = await self._extract_key_insights(successful_results, original_prompt)
            except Exception as insights_error:
                logger.error(f"关键洞察提取失败: {insights_error}")
                key_insights = {
                    "key_actions": ["视频分析完成"],
                    "important_timestamps": [f"成功分析{len(successful_results)}个片段"],
                    "technical_details": [f"使用{split_info.get('split_method', 'unknown')}拆分方法"],
                    "main_themes": ["大视频分析"],
                    "recommendations": ["查看详细分段结果获取更多信息"]
                }
            
            return {
                "type": "large_video_analysis",
                "video_id": split_info['video_id'],
                "original_video": split_info['original_video'],
                "total_duration": split_info['total_duration'],
                "segment_count": len(segment_results),
                "successful_segments": len(successful_results),
                "failed_segments": len(segment_results) - len(successful_results),
                "timeline_summary": timeline_summary,
                "detailed_results": segment_results,
                "overall_summary": overall_summary,
                "key_insights": key_insights,
                "split_method": split_info.get('split_method', 'duration'),
                "analyzed_at": datetime.now().isoformat(),
                "original_prompt": original_prompt
            }
            
        except Exception as e:
            logger.error(f"结果汇总失败: {e}")
            raise
    
    async def _generate_overall_summary(self, all_content: str, prompt: str) -> str:
        """使用大模型生成整体摘要"""
        try:
            # 检查内容长度，避免超出模型限制
            if len(all_content) > 8000:  # 限制输入长度
                # 截取关键部分
                segments = all_content.split('【')
                if len(segments) > 1:
                    # 保留前几个和后几个片段
                    keep_count = min(6, len(segments) - 1)  # 最多保留6个片段
                    if keep_count < len(segments) - 1:
                        selected_segments = segments[1:keep_count//2+1] + segments[-(keep_count//2):]
                        all_content = '【' + '【'.join(selected_segments)
                        all_content += f"\
\
[注：为控制长度，已省略中间{len(segments)-1-keep_count}个片段的详细内容]"
            
            summary_prompt = f"""
你是一个专业的视频内容分析师。请基于以下分段分析结果，生成一个完整、连贯的视频内容摘要。

**原始分析要求：**
{prompt}

**分段分析结果：**
{all_content}

**请按以下结构生成摘要：**

## 🎯 视频整体概述
[简要描述视频的主要内容和目的]

## ⏱️ 关键时间线
[按时间顺序列出主要事件和操作步骤]

## 🔍 重点信息
[提取关键信息点、重要数据或核心观点]

## 📝 总结观点
[基于分析结果的总结性观点和建议]

**要求：**
1. 保持时间逻辑的连贯性
2. 突出重点内容，避免冗余
3. 语言简洁明了，结构清晰
4. 如有技术操作，请详细说明步骤
5. 总字数控制在500-800字之间
"""
            
            logger.info("开始调用大模型生成整体摘要")
            
            # 调用通义千问服务生成摘要
            try:
                summary_result = await self.qwen_service.analyze_text(summary_prompt)
                logger.info("大模型摘要生成成功")
                return summary_result
            except Exception as qwen_error:
                logger.warning(f"通义千问摘要生成失败，尝试InternVL: {qwen_error}")
                
                # 如果通义千问失败，尝试使用InternVL（虽然它主要用于视觉，但也可以处理文本）
                try:
                    summary_result = await self.internvl_service.analyze_text(summary_prompt)
                    logger.info("InternVL摘要生成成功")
                    return summary_result
                except Exception as internvl_error:
                    logger.error(f"InternVL摘要生成也失败: {internvl_error}")
                    raise Exception("所有AI服务都无法生成摘要")
            
        except Exception as e:
            logger.error(f"生成整体摘要失败: {e}")
            # 返回基础摘要作为备选
            segment_count = len(all_content.split('【')) - 1 if '【' in all_content else 0
            return f"""
## 🎯 视频整体概述
基于{segment_count}个片段的分析，视频内容涵盖了多个时间段的详细信息。

## ⏱️ 关键时间线
请查看下方的详细分段分析结果，了解具体的时间线信息。

## 🔍 重点信息
由于AI摘要生成失败，请参考详细的分段分析结果获取关键信息。

## 📝 总结观点
摘要生成遇到技术问题，建议查看完整的分段分析结果以获得全面理解。

**错误信息：** {str(e)}
"""
    
    async def _generate_segment_summary(self, analysis_result: str, start_time: float, end_time: float) -> str:
        """为单个片段生成精炼摘要"""
        try:
            # 如果分析结果已经很短，直接返回
            if len(analysis_result) <= 150:
                return analysis_result
            
            summary_prompt = f"""
请将以下视频片段分析结果精炼为一句话摘要（不超过100字）：

时间段：{start_time:.1f}s - {end_time:.1f}s
分析结果：{analysis_result}

要求：
1. 提取最关键的信息
2. 保持时间逻辑性
3. 突出主要动作或事件
4. 语言简洁明了
"""
            
            # 调用AI生成精炼摘要
            try:
                summary = await self.qwen_service.analyze_text(summary_prompt)
                return summary.strip()
            except:
                # 如果AI生成失败，使用简单截取
                return analysis_result[:100] + "..." if len(analysis_result) > 100 else analysis_result
                
        except Exception as e:
            logger.warning(f"生成片段摘要失败: {e}")
            # 返回截取版本作为备选
            return analysis_result[:100] + "..." if len(analysis_result) > 100 else analysis_result
    
    async def _extract_key_insights(self, successful_results: List[Dict], original_prompt: str) -> Dict:
        """提取关键洞察信息"""
        try:
            # 收集所有分析结果
            all_analyses = [r['analysis_result'] for r in successful_results]
            combined_text = "\
\
".join(all_analyses)
            
            insights_prompt = f"""
基于以下视频分析结果，提取关键洞察信息：

原始分析要求：{original_prompt}

分析内容：
{combined_text[:4000]}  # 限制长度避免超出限制

请提取并返回以下信息（JSON格式）：
{{
    "key_actions": ["关键动作1", "关键动作2", "关键动作3"],
    "important_timestamps": ["重要时间点1", "重要时间点2"],
    "technical_details": ["技术细节1", "技术细节2"],
    "main_themes": ["主要主题1", "主要主题2"],
    "recommendations": ["建议1", "建议2"]
}}

要求：
1. 每个数组最多包含5个元素
2. 内容要具体、准确
3. 避免重复信息
4. 突出最重要的内容
"""
            
            try:
                # 调用AI提取关键信息
                insights_result = await self.qwen_service.analyze_text(insights_prompt)
                
                # 尝试解析JSON
                import json
                try:
                    insights_data = json.loads(insights_result)
                    logger.info("关键洞察提取成功")
                    return insights_data
                except json.JSONDecodeError:
                    # 如果不是有效JSON，尝试从文本中提取
                    logger.warning("AI返回的不是有效JSON，使用文本解析")
                    return self._parse_insights_from_text(insights_result)
                    
            except Exception as ai_error:
                logger.warning(f"AI关键洞察提取失败: {ai_error}")
                # 使用基础方法提取
                return self._extract_basic_insights(successful_results)
                
        except Exception as e:
            logger.error(f"关键洞察提取失败: {e}")
            return {
                "key_actions": ["信息提取失败"],
                "important_timestamps": [],
                "technical_details": [],
                "main_themes": ["请查看详细分析结果"],
                "recommendations": []
            }
    
    def _parse_insights_from_text(self, text: str) -> Dict:
        """从文本中解析关键洞察"""
        # 简单的文本解析逻辑
        return {
            "key_actions": ["基于AI分析的关键动作"],
            "important_timestamps": ["重要时间节点"],
            "technical_details": ["技术实现细节"],
            "main_themes": ["主要内容主题"],
            "recommendations": ["改进建议"]
        }
    
    def _extract_basic_insights(self, successful_results: List[Dict]) -> Dict:
        """基础关键信息提取"""
        key_actions = []
        important_timestamps = []
        
        for result in successful_results[:3]:  # 只取前3个片段
            segment = result['segment_info']
            analysis = result['analysis_result']
            
            # 提取时间信息
            time_info = f"{segment['start_time']:.1f}s-{segment['end_time']:.1f}s"
            important_timestamps.append(time_info)
            
            # 提取关键动作（简单截取）
            if len(analysis) > 50:
                key_actions.append(analysis[:50] + "...")
        
        return {
            "key_actions": key_actions,
            "important_timestamps": important_timestamps,
            "technical_details": ["详见分段分析结果"],
            "main_themes": ["视频内容分析"],
            "recommendations": ["建议查看完整分析报告"]
        }
    
    async def _save_analysis_result(self, video_id: str, result: Dict):
        """保存分析结果"""
        try:
            results_dir = Path("reports") / "large_video_analysis"
            results_dir.mkdir(exist_ok=True)
            
            result_file = results_dir / f"{video_id}_analysis.json"
            with open(result_file, 'w', encoding='utf-8') as f:
                json.dump(result, f, ensure_ascii=False, indent=2)
            
            logger.info(f"分析结果已保存: {result_file}")
            
        except Exception as e:
            logger.error(f"保存分析结果失败: {e}")
    
    def get_analysis_result(self, video_id: str) -> Optional[Dict]:
        """获取分析结果"""
        try:
            result_file = Path("reports") / "large_video_analysis" / f"{video_id}_analysis.json"
            if result_file.exists():
                with open(result_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            return None
        except Exception as e:
            logger.error(f"读取分析结果失败: {e}")