"""
视频内容分析模块
负责对视频进行语音转录、文本分析、场景检测等
"""
import logging
import json
import os
from typing import Dict, List, Any, Optional
from datetime import timedelta
import whisper
import cv2
from moviepy.editor import VideoFileClip
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
from app.exceptions import TranscriptionError, VideoProcessingError
from app.config import settings

logger = logging.getLogger(__name__)


class ContentAnalyzer:
    """视频内容分析器"""

    def __init__(self):
        self.whisper_model = None
        self.sentiment_analyzer = None
        self.summarizer = None
        self._load_models()

    def _load_models(self):
        """加载AI模型"""
        try:
            # 加载Whisper模型
            logger.info(f"Loading Whisper model: {settings.WHISPER_MODEL}")
            self.whisper_model = whisper.load_model(settings.WHISPER_MODEL)

            # 加载情感分析模型
            if settings.ENABLE_SENTIMENT_ANALYSIS:
                logger.info("Loading sentiment analysis model")
                self.sentiment_analyzer = pipeline(
                    "sentiment-analysis",
                    model="cardiffnlp/twitter-roberta-base-sentiment-latest",
                    device=0 if torch.cuda.is_available() else -1
                )

            # 加载摘要模型
            if settings.ENABLE_SUMMARIZATION:
                logger.info("Loading summarization model")
                self.summarizer = pipeline(
                    "summarization",
                    model="facebook/bart-large-cnn",
                    device=0 if torch.cuda.is_available() else -1
                )

        except Exception as e:
            logger.error(f"Failed to load AI models: {e}")
            raise VideoProcessingError(f"模型加载失败: {e}")

    async def analyze_video(self, video_path: str, progress_callback=None) -> Dict[str, Any]:
        """
        分析视频内容

        Args:
            video_path: 视频文件路径
            progress_callback: 进度回调函数

        Returns:
            分析结果字典
        """
        try:
            logger.info(f"Starting video analysis for: {video_path}")

            # 1. 获取视频基本信息
            if progress_callback:
                progress_callback("获取视频信息", 5)
            video_info = await self._get_video_info(video_path)

            # 2. 语音转录
            if settings.ENABLE_TRANSCRIPTION:
                if progress_callback:
                    progress_callback("语音转录中", 10)
                transcription = await self._transcribe_audio(video_path, progress_callback)
            else:
                transcription = None

            # 3. 场景检测
            if settings.ENABLE_SCENE_DETECTION:
                if progress_callback:
                    progress_callback("场景检测中", 60)
                scenes = await self._detect_scenes(video_path)
            else:
                scenes = None

            # 4. 文本分析
            if transcription and (settings.ENABLE_SUMMARIZATION or settings.ENABLE_SENTIMENT_ANALYSIS):
                if progress_callback:
                    progress_callback("文本分析中", 80)
                text_analysis = await self._analyze_text(transcription)
            else:
                text_analysis = None

            # 5. 生成语义单元
            if progress_callback:
                progress_callback("生成语义单元", 90)
            semantic_units = await self._generate_semantic_units(
                transcription, scenes, text_analysis, video_info
            )

            if progress_callback:
                progress_callback("分析完成", 100)

            result = {
                "video_info": video_info,
                "transcription": transcription,
                "scenes": scenes,
                "text_analysis": text_analysis,
                "semantic_units": semantic_units
            }

            logger.info("Video analysis completed successfully")
            return result

        except Exception as e:
            logger.error(f"Video analysis failed: {e}")
            raise VideoProcessingError(f"视频分析失败: {e}")

    async def _get_video_info(self, video_path: str) -> Dict[str, Any]:
        """获取视频基本信息"""
        try:
            with VideoFileClip(video_path) as clip:
                return {
                    "duration": clip.duration,
                    "fps": clip.fps,
                    "width": clip.w,
                    "height": clip.h,
                    "size": os.path.getsize(video_path)
                }
        except Exception as e:
            logger.error(f"Failed to get video info: {e}")
            raise VideoProcessingError(f"无法获取视频信息: {e}")

    async def _transcribe_audio(self, video_path: str, progress_callback=None) -> Dict[str, Any]:
        """语音转录"""
        try:
            logger.info("Starting audio transcription")

            # 使用Whisper进行转录
            result = self.whisper_model.transcribe(
                video_path,
                language=settings.TRANSCRIPTION_LANGUAGE,
                task="transcribe",
                word_timestamps=True,
                verbose=False
            )

            # 处理转录结果
            segments = []
            for segment in result["segments"]:
                segments.append({
                    "start": segment["start"],
                    "end": segment["end"],
                    "text": segment["text"].strip(),
                    "confidence": segment.get("avg_logprob", 0)
                })

            transcription_data = {
                "text": result["text"],
                "language": result["language"],
                "segments": segments,
                "word_timestamps": result.get("words", [])
            }

            logger.info(f"Transcription completed. {len(segments)} segments found")
            return transcription_data

        except Exception as e:
            logger.error(f"Transcription failed: {e}")
            raise TranscriptionError(f"语音转录失败: {e}")

    async def _detect_scenes(self, video_path: str) -> List[Dict[str, Any]]:
        """场景检测"""
        try:
            logger.info("Starting scene detection")

            # 使用OpenCV进行场景检测
            cap = cv2.VideoCapture(video_path)
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

            scenes = []
            current_scene_start = 0
            prev_hist = None
            scene_threshold = 30  # 场景切换阈值

            frame_count = 0
            while True:
                ret, frame = cap.read()
                if not ret:
                    break

                frame_count += 1

                # 每隔一定帧数检测一次
                if frame_count % int(fps * 2) == 0:  # 每2秒检测一次
                    # 计算直方图
                    hist = cv2.calcHist([frame], [0, 1, 2], None, [50, 50, 50], [0, 256, 0, 256, 0, 256])

                    if prev_hist is not None:
                        # 计算直方图差异
                        diff = cv2.compareHist(prev_hist, hist, cv2.HISTCMP_CORREL)

                        # 如果差异大于阈值，认为有场景切换
                        if diff < scene_threshold / 100:
                            current_time = current_scene_start / fps
                            end_time = frame_count / fps

                            scenes.append({
                                "start_time": current_time,
                                "end_time": end_time,
                                "start_frame": current_scene_start,
                                "end_frame": frame_count
                            })

                            current_scene_start = frame_count

                    prev_hist = hist

            # 添加最后一个场景
            if frame_count > current_scene_start:
                scenes.append({
                    "start_time": current_scene_start / fps,
                    "end_time": frame_count / fps,
                    "start_frame": current_scene_start,
                    "end_frame": frame_count
                })

            cap.release()

            logger.info(f"Scene detection completed. {len(scenes)} scenes found")
            return scenes

        except Exception as e:
            logger.error(f"Scene detection failed: {e}")
            # 场景检测失败不应该阻断整个流程
            return []

    async def _analyze_text(self, transcription: Dict[str, Any]) -> Dict[str, Any]:
        """文本分析"""
        try:
            full_text = transcription["text"]
            analysis = {}

            # 情感分析
            if settings.ENABLE_SENTIMENT_ANALYSIS and self.sentiment_analyzer:
                # 将长文本分段分析
                segments = transcription["segments"]
                sentiments = []

                for segment in segments:
                    if segment["text"]:
                        try:
                            result = self.sentiment_analyzer(segment["text"][:512])  # 限制长度
                            sentiments.append({
                                "start": segment["start"],
                                "end": segment["end"],
                                "sentiment": result[0]["label"],
                                "score": result[0]["score"]
                            })
                        except Exception as e:
                            logger.warning(f"Sentiment analysis failed for segment: {e}")

                analysis["sentiments"] = sentiments

            # 文本摘要
            if settings.ENABLE_SUMMARIZATION and self.summarizer and len(full_text) > 100:
                try:
                    # 限制输入长度
                    if len(full_text) > 1024:
                        full_text = full_text[:1024]

                    summary_result = self.summarizer(
                        full_text,
                        max_length=150,
                        min_length=30,
                        do_sample=False
                    )

                    analysis["summary"] = summary_result[0]["summary_text"]
                except Exception as e:
                    logger.warning(f"Summarization failed: {e}")

            return analysis

        except Exception as e:
            logger.error(f"Text analysis failed: {e}")
            return {}

    async def _generate_semantic_units(
        self,
        transcription: Optional[Dict[str, Any]],
        scenes: Optional[List[Dict[str, Any]]],
        text_analysis: Optional[Dict[str, Any]],
        video_info: Dict[str, Any]
    ) -> List[Dict[str, Any]]:
        """生成语义单元"""
        try:
            semantic_units = []

            if transcription and transcription["segments"]:
                # 基于转录片段生成语义单元
                for segment in transcription["segments"]:
                    unit = {
                        "start_time": segment["start"],
                        "end_time": segment["end"],
                        "duration": segment["end"] - segment["start"],
                        "text": segment["text"],
                        "confidence": segment.get("confidence", 0),
                        "type": "speech"
                    }

                    # 添加情感信息
                    if text_analysis and "sentiments" in text_analysis:
                        for sentiment in text_analysis["sentiments"]:
                            if (abs(sentiment["start"] - segment["start"]) < 1.0 and
                                abs(sentiment["end"] - segment["end"]) < 1.0):
                                unit["sentiment"] = sentiment["sentiment"]
                                unit["sentiment_score"] = sentiment["score"]
                                break

                    semantic_units.append(unit)

            elif scenes:
                # 如果没有语音，基于场景生成语义单元
                for scene in scenes:
                    unit = {
                        "start_time": scene["start_time"],
                        "end_time": scene["end_time"],
                        "duration": scene["end_time"] - scene["start_time"],
                        "text": "",
                        "type": "scene"
                    }
                    semantic_units.append(unit)

            # 语义单元后处理
            semantic_units = self._post_process_semantic_units(semantic_units)

            logger.info(f"Generated {len(semantic_units)} semantic units")
            return semantic_units

        except Exception as e:
            logger.error(f"Failed to generate semantic units: {e}")
            return []

    def _post_process_semantic_units(self, units: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """后处理语义单元"""
        if not units:
            return units

        # 合并过短的单元
        min_duration = 2.0  # 最小2秒
        processed_units = []
        current_unit = units[0].copy()

        for i in range(1, len(units)):
            next_unit = units[i]

            # 如果当前单元太短，与下一个单元合并
            if current_unit["duration"] < min_duration:
                current_unit["end_time"] = next_unit["end_time"]
                current_unit["duration"] = current_unit["end_time"] - current_unit["start_time"]
                current_unit["text"] += " " + next_unit["text"]
            else:
                processed_units.append(current_unit)
                current_unit = next_unit.copy()

        processed_units.append(current_unit)

        return processed_units