# app/processors/video_processor.py

import os
import cv2
from typing import Dict, Any, List
from .base import BaseProcessor

class VideoProcessor(BaseProcessor):
    """视频文件处理器"""
    
    def __init__(self):
        super().__init__()
        self.supported_types = ['mp4', 'avi', 'mov', 'mkv']
        self.max_frames = 10
    
    def process(self, file_path: str, file_type: str, knowledge_base: str = "default", oss_url: str = None) -> Dict[str, Any]:
        """处理视频文件"""
        from app.core.logging import get_logger
        logger = get_logger(__name__)
        
        self.validate_file(file_path, file_type)
        logger.info(f"开始处理视频文件: {file_path}, 知识库: {knowledge_base}")
        
        # 提取关键帧
        images = self.extract_images(file_path)
        logger.info(f"提取到 {len(images)} 个关键帧")
        
        # 提取音频文本（可选）
        audio_text = self.extract_audio_text(file_path)
        if audio_text:
            logger.info(f"提取到音频文本: {len(audio_text)} 字符")
        
        # 向量化并存储关键帧
        stored_image_count = 0
        if images:
            logger.info("开始向量化存储视频关键帧...")
            stored_image_count = self.store_image_vectors(images, file_path, file_type, knowledge_base)
            logger.info(f"成功存储 {stored_image_count} 个视频关键帧向量到知识库 {knowledge_base}")
        
        # 向量化并存储音频文本（如果有）
        stored_text_count = 0
        if audio_text:
            logger.info("开始向量化存储音频文本...")
            text_chunks = [audio_text]  # 将音频文本作为一个块
            stored_text_count = self.store_vectors(text_chunks, file_path, file_type, knowledge_base, oss_url)
            logger.info(f"成功存储 {stored_text_count} 个音频文本向量到知识库 {knowledge_base}")
        
        return {
            'text_chunks_count': 1 if audio_text else 0,
            'images_count': len(images),
            'text_chunks': [audio_text] if audio_text else [],
            'images': images,
            'stored_vectors': stored_image_count + stored_text_count,
            'stored_text_vectors': stored_text_count,
            'stored_image_vectors': stored_image_count,
            'knowledge_base': knowledge_base
        }
    
    def extract_text(self, file_path: str) -> List[str]:
        """视频文件不包含文本"""
        return []
    
    def extract_images(self, file_path: str) -> List[str]:
        """从视频提取关键帧"""
        from app.core.logging import get_logger
        logger = get_logger(__name__)
        
        cap = cv2.VideoCapture(file_path)
        if not cap.isOpened():
            logger.error(f"无法打开视频文件: {file_path}")
            return []
        
        frame_paths = []
        frame_count = 0
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        duration = total_frames / fps if fps > 0 else 0
        
        logger.info(f"视频信息: 总帧数={total_frames}, FPS={fps:.2f}, 时长={duration:.2f}秒")
        
        # 智能关键帧提取策略
        if total_frames <= self.max_frames:
            # 如果总帧数不多，提取所有帧
            interval = 1
            max_frames = total_frames
        else:
            # 根据视频时长智能调整提取策略
            if duration <= 30:  # 短视频，提取更多帧
                max_frames = min(self.max_frames, total_frames)
                interval = max(1, total_frames // max_frames)
            elif duration <= 120:  # 中等视频
                max_frames = min(8, total_frames)
                interval = max(1, total_frames // max_frames)
            else:  # 长视频，提取关键帧
                max_frames = min(5, total_frames)
                interval = max(1, total_frames // max_frames)
        
        logger.info(f"关键帧提取策略: 最大帧数={max_frames}, 间隔={interval}")
        
        extracted_count = 0
        while extracted_count < max_frames:
            ret, frame = cap.read()
            if not ret:
                break
                
            if frame_count % interval == 0:
                # 检查帧是否有内容（避免黑屏帧）
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                if cv2.mean(gray)[0] > 10:  # 避免太暗的帧
                    frame_filename = f"{os.path.splitext(os.path.basename(file_path))[0]}_frame{frame_count:04d}.jpg"
                    frame_path = os.path.join("data/processed/frames", frame_filename)
                    os.makedirs(os.path.dirname(frame_path), exist_ok=True)
                    
                    # 保存帧
                    cv2.imwrite(frame_path, frame)
                    frame_paths.append(frame_path)
                    extracted_count += 1
                    
                    logger.info(f"提取关键帧 {extracted_count}: {frame_filename}")
            
            frame_count += 1
            
            # 防止无限循环
            if frame_count > total_frames:
                break
        
        cap.release()
        logger.info(f"成功提取 {len(frame_paths)} 个关键帧")
        return frame_paths
    
    def store_image_vectors(self, image_paths: List[str], file_path: str, file_type: str, knowledge_base: str = "default") -> int:
        """存储视频关键帧向量到图片集合"""
        from app.core.logging import get_logger
        from app.core.database import get_image_collection
        from app.core.knowledge_base_manager import get_kb_manager
        from app.api.rag.utils import call_aliyun_image_embedding_api
        from config.settings import settings
        import threading
        import time
        import concurrent.futures
        
        logger = get_logger(__name__)
        
        def vectorize_with_timeout():
            try:
                # 获取图片集合名称
                kb_manager = get_kb_manager()
                collection_names = kb_manager.get_collection_names(knowledge_base)
                image_collection_name = collection_names["image_collection"]
                
                # 获取图片集合
                collection = get_image_collection(image_collection_name)
                
                # 批量向量化
                logger.info(f"开始向量化 {len(image_paths)} 个视频关键帧...")
                chunk_vectors = []
                
                # 分批处理，避免API限制
                batch_size = 3  # 视频处理较慢，减少批次大小
                for i in range(0, len(image_paths), batch_size):
                    batch_paths = image_paths[i:i + batch_size]
                    logger.info(f"并发处理第 {i+1}-{min(i+batch_size, len(image_paths))}/{len(image_paths)} 个视频关键帧...")
                    
                    batch_vectors = []
                    
                    def process_chunk(image_path, index):
                        try:
                            vector = call_aliyun_image_embedding_api(image_path)
                            logger.info(f"  - 第 {index + 1} 个视频关键帧向量化成功")
                            return vector
                        except Exception as e:
                            logger.error(f"  - 第 {index + 1} 个视频关键帧向量化失败: {e}")
                            # 使用随机向量作为降级方案
                            import numpy as np
                            return np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                    
                    # 使用线程池并发处理
                    with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:  # 减少并发数
                        futures = [executor.submit(process_chunk, path, idx) for idx, path in enumerate(batch_paths)]
                        for future in concurrent.futures.as_completed(futures):
                            try:
                                vector = future.result()
                                batch_vectors.append(vector)
                            except Exception as e:
                                logger.error(f"并发处理异常: {e}")
                                # 使用随机向量作为降级方案
                                import numpy as np
                                fallback_vector = np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                                batch_vectors.append(fallback_vector)
                    
                    chunk_vectors.extend(batch_vectors)
                    logger.info(f"  - 并发处理 {len(batch_paths)} 个视频关键帧完成")
                    
                    # 每批处理后稍作等待，避免API限制
                    time.sleep(1.0)  # 视频处理较慢，增加等待时间
                
                logger.info(f"批量向量化完成，成功生成 {len(chunk_vectors)} 个视频关键帧向量")
                
                # 准备实体数据
                entities = []
                for i, (image_path, vector) in enumerate(zip(image_paths[:len(chunk_vectors)], chunk_vectors)):
                    # 从文件名提取时间戳信息
                    timestamp = self._extract_timestamp_from_filename(image_path, file_path)
                    
                    entity = {
                        "image_path": image_path,  # 使用实际图片路径
                        "source": file_path,
                        "file_type": file_type,
                        "vector": vector.tolist() if hasattr(vector, 'tolist') else vector,
                        "timestamp": timestamp,
                        "frame_index": i
                    }
                    entities.append(entity)
                
                if not entities:
                    logger.warning("没有有效的视频关键帧向量数据")
                    result = 0
                    return
                
                # 插入到Milvus图片集合
                logger.info(f"插入 {len(entities)} 个视频关键帧向量到Milvus集合 {image_collection_name}...")
                
                # 如果知识库不是default，使用分区
                if knowledge_base != "default":
                    # 确保分区存在
                    partition_name = knowledge_base
                    if not collection.has_partition(partition_name):
                        logger.info(f"创建视频关键帧分区: {partition_name}")
                        collection.create_partition(partition_name)
                    
                    # 插入到指定分区
                    logger.info(f"插入视频关键帧到分区: {partition_name}")
                    collection.insert(entities, partition_name=partition_name)
                else:
                    # 对于default知识库，插入到默认分区
                    collection.insert(entities)
                
                collection.flush()
                
                logger.info(f"成功存储 {len(entities)} 个视频关键帧向量")
                
                # 向量化完成后删除本地图片文件
                logger.info("开始清理本地视频关键帧文件...")
                cleaned_count = 0
                for image_path in image_paths:
                    try:
                        if os.path.exists(image_path):
                            os.remove(image_path)
                            cleaned_count += 1
                            logger.debug(f"删除本地视频关键帧文件: {image_path}")
                    except Exception as e:
                        logger.error(f"删除本地视频关键帧文件 {image_path} 时出错: {e}")
                
                logger.info(f"清理完成，删除了 {cleaned_count}/{len(image_paths)} 个本地视频关键帧文件")
                result = len(entities)
                
            except Exception as e:
                logger.error(f"视频关键帧向量化存储失败: {str(e)}")
                import traceback
                logger.error(traceback.format_exc())
                
                # 即使失败也要清理本地视频关键帧文件
                logger.info("向量化失败，开始清理本地视频关键帧文件...")
                cleaned_count = 0
                for image_path in image_paths:
                    try:
                        if os.path.exists(image_path):
                            os.remove(image_path)
                            cleaned_count += 1
                            logger.debug(f"删除本地视频关键帧文件: {image_path}")
                    except Exception as del_e:
                        logger.error(f"删除本地视频关键帧文件 {image_path} 时出错: {del_e}")
                
                logger.info(f"清理完成，删除了 {cleaned_count}/{len(image_paths)} 个本地视频关键帧文件")
                result = 0
        
        result = None
        thread = threading.Thread(target=vectorize_with_timeout)
        thread.daemon = True
        thread.start()
        thread.join(timeout=180)  # 增加超时时间到180秒
        
        if thread.is_alive():
            logger.error("视频关键帧向量化处理超时")
            return 0
        
        return result if result is not None else 0
    
    def extract_audio_text(self, file_path: str) -> str:
        """从视频提取音频文本（可选功能）"""
        from app.core.logging import get_logger
        logger = get_logger(__name__)
        
        try:
            # 这里可以集成语音识别API
            # 目前返回空字符串，表示不提取音频文本
            logger.info("音频文本提取功能暂未启用")
            return ""
            
            # 未来可以集成以下功能：
            # 1. 使用ffmpeg提取音频
            # 2. 使用语音识别API转换为文本
            # 3. 返回识别的文本内容
            
        except Exception as e:
            logger.error(f"音频文本提取失败: {e}")
            return "" 
    
    def _extract_timestamp_from_filename(self, image_path: str, video_path: str) -> float:
        """从文件名提取时间戳信息"""
        try:
            # 从文件名中提取帧号，格式如: video_frame0001.jpg
            filename = os.path.basename(image_path)
            if '_frame' in filename:
                # 提取帧号
                frame_part = filename.split('_frame')[1]
                frame_num = int(frame_part.split('.')[0])
                
                # 获取视频信息
                cap = cv2.VideoCapture(video_path)
                fps = cap.get(cv2.CAP_PROP_FPS)
                cap.release()
                
                # 计算时间戳（秒）
                timestamp = frame_num / fps if fps > 0 else 0
                return timestamp
            else:
                return 0.0
        except Exception as e:
            logger.error(f"提取时间戳失败: {e}")
            return 0.0 