"""
OpenCV视频分析器

基于OpenCV的视频帧分析和场景检测功能。
"""

import asyncio
import cv2
import numpy as np
from typing import List, Dict, Any, Optional, Tuple
import logging
import os
from pathlib import Path

from src.core.di import Injectable, Inject
from .base import VideoAnalyzer, FrameAnalyzer, SceneAnalyzer


@Injectable(scope="singleton")
class OpenCVAnalyzer(VideoAnalyzer):
    """
    OpenCV视频分析器
    
    使用OpenCV进行视频帧分析、场景检测和特征提取。
    """
    
    def __init__(self, 
                 config: Dict[str, Any] = Inject("config"),
                 logger: logging.Logger = Inject("logger")):
        super().__init__()
        self.config = config
        self.logger = logger
        
        # OpenCV配置
        self.opencv_config = config.get("opencv", {})
        
        # 分析配置
        self.scene_threshold = self.opencv_config.get("scene_threshold", 0.3)
        self.sample_rate = self.opencv_config.get("sample_rate", 1.0)  # 采样率
        self.max_frames = self.opencv_config.get("max_frames", 1000)  # 最大分析帧数
        
        # 初始化分析器组件
        self.frame_analyzer = FrameAnalyzer()
        self.scene_analyzer = SceneAnalyzer(threshold=self.scene_threshold)
        
        # 特征检测器
        self._init_feature_detectors()
    
    def _init_feature_detectors(self) -> None:
        """初始化特征检测器"""
        try:
            # SIFT特征检测器
            self.sift = cv2.SIFT_create()
            
            # ORB特征检测器
            self.orb = cv2.ORB_create()
            
            # 角点检测器
            self.corner_detector = cv2.goodFeaturesToTrack
            
            
        except Exception as e:
            self.logger.warning(f"部分特征检测器初始化失败: {e}")
    
    async def analyze_video(self, video_path: str, **kwargs) -> Dict[str, Any]:
        """
        分析视频
        
        Args:
            video_path: 视频文件路径
            **kwargs: 其他参数
        
        Returns:
            完整的视频分析结果
        """
        self.logger.info(f"开始分析视频: {video_path}")
        
        if not os.path.exists(video_path):
            raise FileNotFoundError(f"视频文件不存在: {video_path}")
        
        # 打开视频
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise RuntimeError(f"无法打开视频文件: {video_path}")
        
        try:
            # 获取视频基本信息
            video_info = self._get_video_info(cap)
            
            # 提取帧进行分析
            frames, timestamps = await self._extract_frames(cap, **kwargs)
            
            # 分析帧
            frame_analysis = await self._analyze_frames_batch(frames, timestamps)
            
            # 检测场景
            scenes = self.scene_analyzer.detect_scene_changes(frames, timestamps)
            
            # 提取特征
            features = await self._extract_video_features(frames)
            
            # 运动分析
            motion_analysis = await self._analyze_motion(frames, timestamps)
            
            # 汇总结果
            analysis_result = {
                "video_info": video_info,
                "frame_count": len(frames),
                "analyzed_frames": len(frame_analysis),
                "scenes": scenes,
                "scene_count": len(scenes),
                "features": features,
                "motion_analysis": motion_analysis,
                "frame_analysis": frame_analysis,
                "analysis_metadata": {
                    "sample_rate": self.sample_rate,
                    "scene_threshold": self.scene_threshold,
                    "analyzer_version": self.version
                }
            }
            
            self.logger.info(f"视频分析完成: {len(scenes)} 个场景, {len(frames)} 帧")
            return analysis_result
            
        finally:
            cap.release()
    
    def _get_video_info(self, cap: cv2.VideoCapture) -> Dict[str, Any]:
        """获取视频基本信息"""
        return {
            "frame_count": int(cap.get(cv2.CAP_PROP_FRAME_COUNT)),
            "fps": cap.get(cv2.CAP_PROP_FPS),
            "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
            "duration": int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) / cap.get(cv2.CAP_PROP_FPS)
        }
    
    async def _extract_frames(self, cap: cv2.VideoCapture, **kwargs) -> Tuple[List[np.ndarray], List[float]]:
        """提取视频帧"""
        frames = []
        timestamps = []
        
        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        
        # 计算采样间隔
        sample_interval = max(1, int(1.0 / self.sample_rate))
        max_frames = kwargs.get("max_frames", self.max_frames)
        
        frame_index = 0
        extracted_count = 0
        
        while True:
            ret, frame = cap.read()
            if not ret or extracted_count >= max_frames:
                break
            
            # 按采样率提取帧
            if frame_index % sample_interval == 0:
                frames.append(frame.copy())
                timestamps.append(frame_index / fps)
                extracted_count += 1
            
            frame_index += 1
        
        self.logger.info(f"提取了 {len(frames)} 帧用于分析")
        return frames, timestamps
    
    async def _analyze_frames_batch(self, frames: List[np.ndarray], timestamps: List[float]) -> List[Dict[str, Any]]:
        """批量分析帧"""
        frame_analysis = []
        
        for i, (frame, timestamp) in enumerate(zip(frames, timestamps)):
            analysis = self.frame_analyzer.analyze_frame(frame, i)
            analysis["timestamp"] = timestamp
            frame_analysis.append(analysis)
        
        return frame_analysis
    
    async def _extract_video_features(self, frames: List[np.ndarray]) -> Dict[str, Any]:
        """提取视频特征"""
        if not frames:
            return {}
        
        features = {
            "color_features": self._extract_color_features(frames),
            "texture_features": self._extract_texture_features(frames),
            "shape_features": self._extract_shape_features(frames),
            "keypoint_features": self._extract_keypoint_features(frames)
        }
        
        return features
    
    def _extract_color_features(self, frames: List[np.ndarray]) -> Dict[str, Any]:
        """提取颜色特征"""
        all_histograms = []
        brightness_values = []
        contrast_values = []
        
        for frame in frames:
            # 颜色直方图
            hist = self.frame_analyzer.extract_histogram(frame)
            all_histograms.append(hist)
            
            # 亮度和对比度
            brightness = self.frame_analyzer.calculate_brightness(frame)
            contrast = self.frame_analyzer.calculate_contrast(frame)
            
            brightness_values.append(brightness)
            contrast_values.append(contrast)
        
        return {
            "average_brightness": float(np.mean(brightness_values)),
            "brightness_std": float(np.std(brightness_values)),
            "average_contrast": float(np.mean(contrast_values)),
            "contrast_std": float(np.std(contrast_values)),
            "brightness_range": [float(np.min(brightness_values)), float(np.max(brightness_values))],
            "contrast_range": [float(np.min(contrast_values)), float(np.max(contrast_values))]
        }
    
    def _extract_texture_features(self, frames: List[np.ndarray]) -> Dict[str, Any]:
        """提取纹理特征"""
        edge_densities = []
        
        for frame in frames:
            edges = self.frame_analyzer.detect_edges(frame)
            edge_density = np.sum(edges > 0) / (edges.shape[0] * edges.shape[1])
            edge_densities.append(edge_density)
        
        return {
            "average_edge_density": float(np.mean(edge_densities)),
            "edge_density_std": float(np.std(edge_densities)),
            "edge_density_range": [float(np.min(edge_densities)), float(np.max(edge_densities))]
        }

    def _extract_shape_features(self, frames: List[np.ndarray]) -> Dict[str, Any]:
        """提取形状特征"""
        contour_counts = []
        area_ratios = []

        for frame in frames:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            edges = cv2.Canny(gray, 50, 150)
            contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

            contour_counts.append(len(contours))

            # 计算轮廓面积比
            total_area = frame.shape[0] * frame.shape[1]
            contour_area = sum(cv2.contourArea(c) for c in contours)
            area_ratio = contour_area / total_area
            area_ratios.append(area_ratio)

        return {
            "average_contour_count": float(np.mean(contour_counts)),
            "contour_count_std": float(np.std(contour_counts)),
            "average_area_ratio": float(np.mean(area_ratios)),
            "area_ratio_std": float(np.std(area_ratios))
        }

    def _extract_keypoint_features(self, frames: List[np.ndarray]) -> Dict[str, Any]:
        """提取关键点特征"""
        keypoint_counts = []

        for frame in frames:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            try:
                # 使用ORB检测关键点
                keypoints = self.orb.detect(gray, None)
                keypoint_counts.append(len(keypoints))
            except Exception:
                keypoint_counts.append(0)

        return {
            "average_keypoint_count": float(np.mean(keypoint_counts)) if keypoint_counts else 0.0,
            "keypoint_count_std": float(np.std(keypoint_counts)) if keypoint_counts else 0.0,
            "keypoint_count_range": [float(np.min(keypoint_counts)), float(np.max(keypoint_counts))] if keypoint_counts else [0.0, 0.0]
        }

    async def _analyze_motion(self, frames: List[np.ndarray], timestamps: List[float]) -> Dict[str, Any]:
        """分析运动"""
        if len(frames) < 2:
            return {"motion_detected": False}

        motion_magnitudes = []

        for i in range(1, len(frames)):
            motion = self.frame_analyzer.calculate_motion_vectors(frames[i-1], frames[i])
            motion_magnitudes.append(motion.get("mean_magnitude", 0.0))

        return {
            "motion_detected": any(m > 1.0 for m in motion_magnitudes),
            "average_motion": float(np.mean(motion_magnitudes)),
            "motion_std": float(np.std(motion_magnitudes)),
            "max_motion": float(np.max(motion_magnitudes)),
            "motion_timeline": motion_magnitudes
        }

    async def detect_scenes(self, video_path: str, **kwargs) -> List[Dict[str, Any]]:
        """
        检测场景变化

        Args:
            video_path: 视频文件路径
            **kwargs: 其他参数

        Returns:
            场景列表
        """
        self.logger.info(f"检测场景变化: {video_path}")

        # 打开视频
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise RuntimeError(f"无法打开视频文件: {video_path}")

        try:
            # 提取帧
            frames, timestamps = await self._extract_frames(cap, **kwargs)

            # 检测场景
            scenes = self.scene_analyzer.detect_scene_changes(frames, timestamps)

            self.logger.info(f"检测到 {len(scenes)} 个场景")
            return scenes

        finally:
            cap.release()

    async def extract_features(self, video_path: str, **kwargs) -> Dict[str, Any]:
        """
        提取视频特征

        Args:
            video_path: 视频文件路径
            **kwargs: 其他参数

        Returns:
            特征字典
        """
        self.logger.info(f"提取视频特征: {video_path}")

        # 打开视频
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise RuntimeError(f"无法打开视频文件: {video_path}")

        try:
            # 提取帧
            frames, timestamps = await self._extract_frames(cap, **kwargs)

            # 提取特征
            features = await self._extract_video_features(frames)

            self.logger.info("视频特征提取完成")
            return features

        finally:
            cap.release()

    async def analyze_frames(self, video_path: str, **kwargs) -> List[Dict[str, Any]]:
        """
        分析视频帧

        Args:
            video_path: 视频文件路径
            **kwargs: 其他参数

        Returns:
            帧分析结果列表
        """
        self.logger.info(f"分析视频帧: {video_path}")

        # 打开视频
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise RuntimeError(f"无法打开视频文件: {video_path}")

        try:
            # 提取帧
            frames, timestamps = await self._extract_frames(cap, **kwargs)

            # 分析帧
            frame_analysis = await self._analyze_frames_batch(frames, timestamps)

            self.logger.info(f"分析了 {len(frame_analysis)} 帧")
            return frame_analysis

        finally:
            cap.release()

    # 实现插件接口方法
    async def classify(self, input_data: Any, **kwargs) -> Any:
        """分类数据（不适用于OpenCV分析器）"""
        raise NotImplementedError("OpenCV分析器不支持分类功能")

    async def output(self, data: Any, **kwargs) -> Any:
        """输出数据（不适用于OpenCV分析器）"""
        raise NotImplementedError("OpenCV分析器不支持输出功能")
