"""
镜头检测服务

提供视频镜头检测和切分功能的服务封装。
"""

import os
import json
import asyncio
from typing import List, Dict, Any, Optional, Union
from pathlib import Path
from dataclasses import asdict
from loguru import logger

from ..detectors import (
    BaseDetector, MultiDetector, ShotBoundary, DetectionResult,
    FrameDifferenceDetector, EnhancedFrameDifferenceDetector,
    HistogramDetector, MultiChannelHistogramDetector, AdaptiveHistogramDetector
)

from src.core.di import Injectable
@Injectable()
class ShotDetectionService:
    """镜头检测服务"""
    
    def __init__(self):
        """
        初始化镜头检测服务
        """
        self.config = {}
        self.detectors: Dict[str, BaseDetector] = {}
        self.multi_detector = MultiDetector()
        self.logger = logger.bind(service="ShotDetection")

    def configure(self, config: Optional[Dict[str, Any]] = None):
        """
        配置服务

        Args:
            config: 配置字典，包含检测器参数和服务设置
        """
        # 从全局配置加载shot_detection配置
        try:
            from src.services.config_service import config_service
            global_config = config_service.get_section("video_segmentation.shot_detection")
            if global_config and isinstance(global_config, dict):
                self.logger.info("从全局配置加载shot_detection配置")
                self.default_config = global_config
            else:
                self.logger.warning(f"全局shot_detection配置格式不正确 (类型: {type(global_config)})，使用默认值")
                self.default_config = self._get_fallback_config()
        except Exception as e:
            self.logger.warning(f"加载全局配置失败，使用默认值: {e}")
            self.default_config = self._get_fallback_config()

        # 合并配置 - 确保config参数是字典类型
        if config:
            if isinstance(config, dict):
                self.config.update(config)
            else:
                self.logger.warning(f"传入的config参数不是字典类型: {type(config)}, 忽略")

        # 确保default_config是字典类型
        if not isinstance(self.default_config, dict):
            self.logger.error(f"default_config不是字典类型: {type(self.default_config)}, 值: {self.default_config}")
            self.default_config = self._get_fallback_config()

        for detector_name, default_params in self.default_config.items():
            # 跳过非字典类型的配置项（如default_algorithm等）
            if not isinstance(default_params, dict):
                self.logger.debug(f"跳过非字典配置项: {detector_name} = {default_params}")
                continue

            if detector_name not in self.config:
                self.config[detector_name] = {}

            # 确保self.config[detector_name]也是字典类型
            if not isinstance(self.config[detector_name], dict):
                self.logger.warning(f"配置项 {detector_name} 不是字典类型: {type(self.config[detector_name])}, 重置为空字典")
                self.config[detector_name] = {}

            self.config[detector_name] = {**default_params, **self.config[detector_name]}

        self._initialize_detectors()
        return self

    def _get_fallback_config(self) -> Dict[str, Any]:
        """获取回退配置"""
        return {
            'frame_diff': {
                'threshold': 0.35,
                'min_scene_length': 15,
                'resize_height': 240
            },
            'enhanced_frame_diff': {
                'threshold': 0.35,
                'min_scene_length': 15,
                'adaptive_threshold': True,
                'edge_enhancement': True,
                'motion_compensation': True
            },
            'histogram': {
                'threshold': 0.35,
                'bins': 256,
                'min_scene_length': 15,
                'color_space': 'RGB'
            },
            'multi_channel_histogram': {
                'threshold': 0.35,
                'bins': 256,
                'use_spatial_histogram': True,
                'grid_size': 4
            },
            'adaptive_histogram': {
                'threshold': 0.35,
                'bins': 256,
                'adaptation_window': 30
            }
        }
    
    def _initialize_detectors(self):
        """初始化所有检测器"""
        try:
            # 帧差检测器
            self.detectors['frame_diff'] = FrameDifferenceDetector(**self.config['frame_diff'])
            
            # 增强帧差检测器
            self.detectors['enhanced_frame_diff'] = EnhancedFrameDifferenceDetector(
                **self.config['enhanced_frame_diff']
            )
            
            # 直方图检测器
            self.detectors['histogram'] = HistogramDetector(**self.config['histogram'])
            
            # 多通道直方图检测器
            self.detectors['multi_channel_histogram'] = MultiChannelHistogramDetector(
                **self.config['multi_channel_histogram']
            )
            
            # 自适应直方图检测器
            self.detectors['adaptive_histogram'] = AdaptiveHistogramDetector(
                **self.config['adaptive_histogram']
            )
            
            # 初始化所有检测器
            for name, detector in self.detectors.items():
                if detector.initialize():
                    self.logger.info(f"Successfully initialized {name} detector")
                else:
                    self.logger.error(f"Failed to initialize {name} detector")
            
            self.logger.info(f"Initialized {len(self.detectors)} shot detection algorithms")
            
        except Exception as e:
            self.logger.error(f"Error initializing detectors: {e}")
            raise
    
    async def detect_shots(self, 
                          video_path: str, 
                          algorithm: str = 'auto',
                          output_dir: Optional[str] = None,
                          save_results: bool = True) -> DetectionResult:
        """
        检测视频中的镜头边界
        
        Args:
            video_path: 视频文件路径
            algorithm: 检测算法名称 ('auto', 'frame_diff', 'histogram', 'ensemble', etc.)
            output_dir: 结果保存目录
            save_results: 是否保存检测结果
            
        Returns:
            DetectionResult: 检测结果
        """
        if not os.path.exists(video_path):
            raise FileNotFoundError(f"Video file not found: {video_path}")
        
        self.logger.info(f"Starting shot detection for {video_path} using {algorithm}")
        
        try:
            # 选择检测算法
            if algorithm == 'auto':
                result = await self._auto_detect(video_path)
            elif algorithm == 'ensemble':
                result = await self._ensemble_detect(video_path)
            elif algorithm in self.detectors:
                detector = self.detectors[algorithm]
                result = await asyncio.to_thread(detector.detect_shots, video_path)
            else:
                raise ValueError(f"Unknown algorithm: {algorithm}")
            
            # 保存结果
            if save_results and output_dir:
                await self._save_results(result, video_path, output_dir)
            
            self.logger.info(f"Shot detection completed: {len(result.boundaries)} shots found")
            return result
            
        except Exception as e:
            self.logger.error(f"Error in shot detection: {e}")
            raise
    
    async def _auto_detect(self, video_path: str) -> DetectionResult:
        """自动选择最佳检测算法"""
        # 简单策略：先用快速算法检测，如果结果不理想再用复杂算法
        
        # 1. 先尝试帧差检测（快速）
        frame_diff_result = await asyncio.to_thread(
            self.detectors['frame_diff'].detect_shots, video_path
        )
        
        # 2. 评估结果质量
        if self._evaluate_result_quality(frame_diff_result):
            self.logger.info("Frame difference detection result is good enough")
            return frame_diff_result
        
        # 3. 如果结果不理想，使用增强算法
        self.logger.info("Using enhanced detection for better results")
        enhanced_result = await asyncio.to_thread(
            self.detectors['enhanced_frame_diff'].detect_shots, video_path
        )
        
        return enhanced_result
    
    async def _ensemble_detect(self, video_path: str) -> DetectionResult:
        """使用集成方法检测"""
        # 配置多检测器
        self.multi_detector.detectors.clear()
        self.multi_detector.weights.clear()
        
        # 添加检测器和权重
        self.multi_detector.add_detector(self.detectors['frame_diff'], weight=0.3)
        self.multi_detector.add_detector(self.detectors['histogram'], weight=0.4)
        self.multi_detector.add_detector(self.detectors['enhanced_frame_diff'], weight=0.3)
        
        # 初始化多检测器
        self.multi_detector.initialize_all()
        
        # 运行集成检测
        result = await asyncio.to_thread(
            self.multi_detector.detect_shots_ensemble, video_path
        )
        
        return result
    
    def _evaluate_result_quality(self, result: DetectionResult) -> bool:
        """评估检测结果质量"""
        if not result.boundaries:
            return False
        
        # 简单的质量评估指标
        avg_confidence = sum(b.confidence for b in result.boundaries) / len(result.boundaries)
        shot_density = len(result.boundaries) / (result.frame_count / 30)  # 每秒镜头数
        
        # 质量阈值
        return avg_confidence > 0.4 and 0.1 < shot_density < 2.0
    
    async def _save_results(self, result: DetectionResult, video_path: str, output_dir: str):
        """保存检测结果"""
        try:
            output_path = Path(output_dir)
            output_path.mkdir(parents=True, exist_ok=True)
            
            video_name = Path(video_path).stem
            
            # 保存JSON格式结果
            json_file = output_path / f"{video_name}_shot_detection.json"
            result_dict = {
                'video_path': video_path,
                'algorithm': result.algorithm_name,
                'processing_time': result.processing_time,
                'frame_count': result.frame_count,
                'shot_count': len(result.boundaries),
                'boundaries': [asdict(boundary) for boundary in result.boundaries],
                'confidence_scores': result.confidence_scores,
                'metadata': result.metadata
            }
            
            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump(result_dict, f, indent=2, ensure_ascii=False)
            
            # 保存简化的文本格式
            txt_file = output_path / f"{video_name}_shots.txt"
            with open(txt_file, 'w', encoding='utf-8') as f:
                f.write(f"Video: {video_path}\n")
                f.write(f"Algorithm: {result.algorithm_name}\n")
                f.write(f"Processing Time: {result.processing_time:.2f}s\n")
                f.write(f"Total Shots: {len(result.boundaries)}\n\n")
                
                for i, boundary in enumerate(result.boundaries):
                    f.write(f"Shot {i+1}:\n")
                    f.write(f"  Frame: {boundary.frame_number}\n")
                    f.write(f"  Time: {boundary.timestamp:.2f}s\n")
                    f.write(f"  Confidence: {boundary.confidence:.3f}\n")
                    f.write(f"  Type: {boundary.boundary_type}\n\n")
            
            self.logger.info(f"Results saved to {output_path}")
            
        except Exception as e:
            self.logger.error(f"Error saving results: {e}")
    
    async def segment_video(self, 
                           video_path: str,
                           output_dir: str,
                           algorithm: str = 'auto',
                           format: str = 'mp4') -> List[str]:
        """
        根据检测到的镜头边界切分视频
        
        Args:
            video_path: 输入视频路径
            output_dir: 输出目录
            algorithm: 检测算法
            format: 输出格式
            
        Returns:
            List[str]: 切分后的视频文件路径列表
        """
        # 1. 检测镜头边界
        result = await self.detect_shots(video_path, algorithm, output_dir)
        
        if not result.boundaries:
            self.logger.warning("No shot boundaries detected, cannot segment video")
            return []
        
        # 2. 切分视频
        return await self._split_video_by_shots(video_path, result.boundaries, output_dir, format)
    
    async def _split_video_by_shots(self, 
                                   video_path: str,
                                   boundaries: List[ShotBoundary],
                                   output_dir: str,
                                   format: str) -> List[str]:
        """根据镜头边界切分视频"""
        try:
            import cv2
            
            output_path = Path(output_dir)
            output_path.mkdir(parents=True, exist_ok=True)
            
            video_name = Path(video_path).stem
            output_files = []
            
            cap = cv2.VideoCapture(video_path)
            fps = cap.get(cv2.CAP_PROP_FPS)
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            
            # 定义编码器
            fourcc = cv2.VideoWriter_fourcc(*'mp4v') if format == 'mp4' else cv2.VideoWriter_fourcc(*'XVID')
            
            # 添加视频开始和结束边界
            all_boundaries = [ShotBoundary(0, 0.0, 1.0, boundary_type='start')] + boundaries
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

            # 使用更精确的时间戳计算
            end_timestamp = self._calculate_precise_timestamp(total_frames, cap, fps)
            
            
            all_boundaries.append(ShotBoundary(total_frames, end_timestamp, 1.0, boundary_type='end'))
            
            for i in range(len(all_boundaries) - 1):
                start_frame = all_boundaries[i].frame_number
                end_frame = all_boundaries[i + 1].frame_number
                
                if end_frame - start_frame < 10:  # 跳过太短的片段
                    continue
                
                # 创建输出文件
                output_file = output_path / f"{video_name}_shot_{i+1:03d}.{format}"
                output_files.append(str(output_file))
                
                out = cv2.VideoWriter(str(output_file), fourcc, fps, (width, height))
                
                # 定位到起始帧
                cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
                
                # 写入帧
                for frame_idx in range(start_frame, end_frame):
                    ret, frame = cap.read()
                    if not ret:
                        break
                    out.write(frame)
                
                out.release()
                self.logger.info(f"Created segment: {output_file}")
            
            cap.release()
            
            self.logger.info(f"Video segmentation completed: {len(output_files)} segments created")
            return output_files
            
        except Exception as e:
            self.logger.error(f"Error in video segmentation: {e}")
            raise
    
    def get_available_algorithms(self) -> List[str]:
        """获取可用的检测算法列表"""
        algorithms = list(self.detectors.keys()) + ['auto', 'ensemble']
        return algorithms
    
    def get_detector_info(self, algorithm: str) -> Dict[str, Any]:
        """获取检测器信息"""
        if algorithm in self.detectors:
            detector = self.detectors[algorithm]
            return detector.get_performance_metrics()
        else:
            return {}

    def _calculate_precise_timestamp(self, frame_number: int, cap, fps: float) -> float:
        """计算精确的时间戳"""
        try:
            import cv2
            self.logger.info(f"-------------------------------{fps}/{frame_number}")
            self.logger.info(f"-------------------------------{fps}/{frame_number}")
            self.logger.info(f"-------------------------------{fps}/{frame_number}")
            self.logger.info(f"-------------------------------{fps}/{frame_number}")
            # 方法1: 使用OpenCV的PTS (Presentation Time Stamp)
            original_pos = cap.get(cv2.CAP_PROP_POS_FRAMES)  # 保存当前位置
            cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
            pts_timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
            cap.set(cv2.CAP_PROP_POS_FRAMES, original_pos)  # 恢复位置

            # 验证PTS时间戳的合理性
            expected_timestamp = frame_number / fps if fps > 0 else frame_number / 30.0

            # 如果PTS时间戳合理（与期望值差异不超过10%），使用PTS
            if (pts_timestamp > 0 and
                abs(pts_timestamp - expected_timestamp) / max(expected_timestamp, 0.001) < 0.1):
                return pts_timestamp

            # 方法2: 使用帧率计算（主要方法）
            if fps > 0:
                return frame_number / fps

            # 方法3: 最后备用
            return frame_number / 30.0  # 假设30fps

        except Exception as e:
            self.logger.warning(f"Failed to get precise timestamp for frame {frame_number}: {e}")
            # 备用计算
            return frame_number / (fps if fps > 0 else 30.0)

    def cleanup(self):
        """清理资源"""
        try:
            for detector in self.detectors.values():
                detector.cleanup()
            
            self.multi_detector.cleanup_all()
            self.detectors.clear()
            
            self.logger.info("Shot detection service cleaned up")
            
        except Exception as e:
            self.logger.error(f"Error during cleanup: {e}")
    
    def __enter__(self):
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.cleanup()
