import datetime
import os
import shutil
import tempfile
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Dict, List, Optional, Tuple

import cv2
import numpy as np
from moviepy.editor import VideoFileClip
from tqdm import tqdm

from insightface_modules.face_feature_extractor import FaceFeatureExtractor
from insightface_modules.face_matching import FaceMatching
from entity.video_types import (
    BatchProcessingResult,
    SupportedVideoFormat,
    VideoFrame,
    VideoFrameMetadata,
    VideoFramesExtractionResult,
    VideoInfo,
    VideoTextExtractionResult,
)
from speech_recognition_modules.audio_processor import AudioProcessor
from utils.logger_config import get_logger_config


class VideoProcessor:
    """视频处理类，用于从视频文件中提取文字和图像"""

    def __init__(self, language: str = "zh-CN", chunk_size: int = 30000) -> None:
        """
        初始化视频处理器

        参数:
            language: 语言代码，默认为中文。处理英文视频可设为 'en'
            chunk_size: 音频分块大小(毫秒)，用于处理较长的音频
        """
        self.face_detector = FaceFeatureExtractor()
        self.face_matcher = FaceMatching()
        self.supported_formats: List[SupportedVideoFormat] = [
            ".mp4",
            ".avi",
            ".mov",
            ".mkv",
            ".flv",
            ".wmv",
        ]
        self.logger = get_logger_config(name="video_processor").get_logger()
        self.logger.info(f"初始化视频处理器，语言: {language}")

    def _is_valid_video_file(self, file_path: str) -> Tuple[bool, str]:
        """
        检查文件是否存在且是支持的视频格式

        参数:
            file_path: 视频文件路径

        返回:
            (是否有效, 错误信息)
        """
        if not os.path.exists(file_path):
            return False, f"文件不存在: {file_path}"

        _, ext = os.path.splitext(file_path.lower())
        if ext not in self.supported_formats:
            return (
                False,
                f"不支持的视频格式: {ext}，支持的格式: {', '.join(self.supported_formats)}",
            )

        return True, ""

    def _extract_audio_from_video(
        self, video_path: str
    ) -> Tuple[Optional[str], Optional[str]]:
        """
        从视频文件中提取音频轨道

        参数:
            video_path: 视频文件路径

        返回:
            (临时音频文件路径, 错误信息)
        """
        try:
            # 创建临时文件
            temp_dir = tempfile.gettempdir()
            unique_id = uuid.uuid4()
            temp_audio_path = os.path.join(
                temp_dir, f"temp_audio_{os.path.basename(video_path)}_{unique_id}.wav"
            )

            # 使用moviepy提取音频
            self.logger.info(f"正在从视频提取音频: {video_path}")
            video = VideoFileClip(video_path)
            audio = video.audio

            if audio is None:
                return None, "视频文件不包含音频轨道"

            # 保存音频到临时文件
            audio.write_audiofile(
                temp_audio_path, codec="pcm_s16le", verbose=False, logger=None
            )
            video.close()

            return temp_audio_path, None
        except Exception as e:
            self.logger.error(f"提取音频失败: {str(e)}")
            import traceback

            self.logger.error(traceback.format_exc())
            return None, f"提取音频失败: {str(e)}"

    def _extract_frames(
        self, video_path: str, interval: float = 1.0, max_frames: int = 10
    ) -> List[VideoFrame]:
        """
        从视频中提取帧

        参数:
            video_path: 视频文件路径
            interval: 提取帧的时间间隔(秒)
            max_frames: 最大提取帧数

        返回:
            提取的帧列表
        """
        try:
            # 打开视频文件
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                self.logger.error(f"无法打开视频文件: {video_path}")
                return []

            # 获取视频信息
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            duration = total_frames / fps if fps > 0 else 0

            self.logger.info(
                f"视频信息 - FPS: {fps}, 总帧数: {total_frames}, 时长: {duration:.2f}秒"
            )

            # 计算提取帧的间隔
            frame_interval = int(fps * interval)
            if frame_interval <= 0:
                frame_interval = 1

            # 计算要提取的帧数
            num_frames = min(max_frames, int(total_frames / frame_interval))

            # 提取帧
            frames: List[VideoFrame] = []
            for i in tqdm(range(num_frames), desc="提取视频帧"):
                # 设置帧位置
                frame_pos = i * frame_interval
                cap.set(cv2.CAP_PROP_POS_FRAMES, frame_pos)

                # 读取帧
                ret, frame = cap.read()
                if not ret:
                    break

                # 添加到结果列表
                frames.append(
                    {
                        "frame_index": frame_pos,
                        "timestamp": frame_pos / fps if fps > 0 else 0,
                        "image": frame,
                    }
                )

            # 释放视频对象
            cap.release()

            return frames
        except Exception as e:
            self.logger.error(f"提取视频帧失败: {str(e)}")
            import traceback

            self.logger.error(traceback.format_exc())
            return []

    def _get_video_info(self, video_path: str) -> VideoInfo:
        """
        获取视频信息

        参数:
            video_path: 视频文件路径

        返回:
            视频信息字典
        """

        res: VideoInfo = {}

        # 提取视频信息
        try:
            cap = cv2.VideoCapture(video_path)
            if cap.isOpened():
                fps = cap.get(cv2.CAP_PROP_FPS)
                total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                duration = total_frames / fps if fps > 0 else 0

                res = {
                    "fps": fps,
                    "total_frames": total_frames,
                    "width": width,
                    "height": height,
                    "duration_seconds": duration,
                }
                cap.release()

        except Exception as e:
            self.logger.error(f"获取视频信息失败: {str(e)}")

        return res

    def extract_frames_from_video(
        self, video_path: str, frame_interval: float = 5.0, max_frames: int = 10
    ) -> VideoFramesExtractionResult:
        """
        从视频文件中提取帧

        最便捷使用方法:
            1. 创建 VideoProcessor 实例，按需设置参数。
            2. 调用此方法，传入视频文件路径。
            示例:
                processor = VideoProcessor()
                result = processor.extract_frames_from_video('your_video.mp4')
                self.logger.info(result)

        参数:
            video_path: 视频文件路径
            frame_interval: 提取帧的时间间隔(秒)
            max_frames: 最大提取帧数

        返回:
            VideoFramesExtractionResult: 帧提取结果
        """
        result: VideoFramesExtractionResult = {
            "success": False,
            "file_name": os.path.basename(video_path),
            "file_path": video_path,
            "text": "",
            "error": "",
            "frames": [],
            "video_info": {
                "fps": 0.0,
                "total_frames": 0,
                "width": 0,
                "height": 0,
                "duration_seconds": 0.0,
            },
        }

        # 检查文件有效性
        is_valid, error = self._is_valid_video_file(video_path)
        if not is_valid:
            result["error"] = error
            return result

        try:
            # 获取视频信息
            video_info = self._get_video_info(video_path)
            result["video_info"] = video_info
            # 提取视频帧
            self.logger.info(f"正在提取视频帧: {video_path}")
            frames = self._extract_frames(
                video_path, interval=frame_interval, max_frames=max_frames
            )

            # 将帧添加到结果中（不包含图像数据，只包含元数据）
            for i, frame in enumerate(frames):
                frame_metadata: VideoFrameMetadata = {
                    "frame_id": i,
                    "frame_index": frame["frame_index"],
                    "timestamp": frame["timestamp"],
                }
                result["frames"].append(frame_metadata)
            self.logger.info(f"提取了 {len(frames)} 帧")

        except Exception as e:
            self.logger.error(f"处理过程中发生错误: {str(e)}")
            import traceback

            self.logger.error(traceback.format_exc())
            result["error"] = f"处理过程中发生错误: {str(e)}"
        finally:
            result["success"] = True if not result["error"] else False

        return result

    def extract_text_from_video(
        self,
        video_path: str,
        save_audio_path: Optional[str] = None,
        is_segment: bool = False,
        language: str = "zh-CN",
    ) -> VideoTextExtractionResult:
        """
        从视频文件中提取文字

        最便捷使用方法:
            1. 实例化 VideoProcessor 类，按需设置语言参数。
            2. 调用此方法，传入视频文件路径。
            示例:
                processor = VideoProcessor(language='en')
                result = processor.extract_text_from_video('your_video.mp4')
                self.logger.info(result)

        参数:
            video_path: 视频文件路径
            extract_frames: 是否提取视频帧
            frame_interval: 提取帧的时间间隔(秒)
            max_frames: 最大提取帧数
            save_audio_path: 保存提取的音频文件路径，如果为None则不保存
            language: 识别语言

        返回:
            包含识别结果的JSON结构
        """
        result: VideoTextExtractionResult = {
            "success": False,
            "file_name": os.path.basename(video_path),
            "file_path": video_path,
            "text": "",
            "error": "",
            "segments": [],
            "frames": [],
            "video_info": {
                "fps": 0.0,
                "total_frames": 0,
                "width": 0,
                "height": 0,
                "duration_seconds": 0.0,
            },
        }

        # 检查文件有效性
        is_valid, error = self._is_valid_video_file(video_path)
        if not is_valid:
            result["error"] = error
            return result

        # 提取音频
        audio_path, error = self._extract_audio_from_video(video_path)
        if error:
            result["error"] = error
            return result

        if not audio_path:
            result["error"] = "提取音频失败"
            return result

        try:
            # 如果需要保存音频文件
            if save_audio_path:
                try:
                    shutil.copy2(audio_path, save_audio_path)
                    self.logger.info(f"已将提取的音频保存到: {save_audio_path}")
                except Exception as e:
                    self.logger.error(f"保存音频文件失败: {str(e)}")

            # 加载音频文件
            try:
                # 使用文件路径调用extract_text_with_whisper方法
                audio_processor = AudioProcessor(
                    language=language
                )  # AudioProcessor.get_instance()
                result_obj = audio_processor.extract_text_with_whisper(
                    audio_path, is_segment=is_segment
                )

                if not result_obj.success:
                    result["error"] = result_obj.error
                    result["success"] = False
                else:
                    result["text"] = result_obj.text
                    result["success"] = True

                    # 将AudioSegmentResult对象转换为字典格式
                    for segment in result_obj.segments:
                        segment_dict = {
                            "segment_id": segment.segment_id,
                            "start_time": segment.start_time,
                            "end_time": segment.end_time,
                            "text": segment.text,
                            "error": segment.error,
                        }
                        result["segments"].append(segment_dict)

            except Exception as e:
                self.logger.error(f"音频处理失败: {str(e)}")
                self.logger.error(traceback.format_exc())
                result["error"] = f"音频处理失败: {str(e)}"
                return result

            # 提取视频信息
            result["video_info"] = self._get_video_info(video_path)

        except Exception as e:
            self.logger.error(f"处理过程中发生错误: {str(e)}")
            import traceback

            self.logger.error(traceback.format_exc())
            result["error"] = f"处理过程中发生错误: {str(e)}"
        finally:
            # 删除临时音频文件（如果不需要保存）
            if not save_audio_path and audio_path and os.path.exists(audio_path):
                try:
                    os.remove(audio_path)
                except:
                    pass

        return result

    def batch_process_videos(
        self, directory_path: str, recursive: bool = False
    ) -> BatchProcessingResult:
        """
        批量处理目录中的视频文件

        参数:
            directory_path: 目录路径
            recursive: 是否递归处理子目录

        返回:
            包含所有处理结果的列表
        """
        if not os.path.exists(directory_path) or not os.path.isdir(directory_path):
            return {
                "success": False,
                "directory": directory_path,
                "total_files": 0,
                "successful_extractions": 0,
                "results": [],
                "error": f"目录不存在: {directory_path}",
            }

        results: List[VideoTextExtractionResult] = []

        # 获取所有视频文件
        video_files: List[str] = []
        if recursive:
            for root, _, files in os.walk(directory_path):
                for file in files:
                    file_path = os.path.join(root, file)
                    _, ext = os.path.splitext(file_path.lower())
                    if ext in self.supported_formats:
                        video_files.append(file_path)
        else:
            for file in os.listdir(directory_path):
                file_path = os.path.join(directory_path, file)
                if os.path.isfile(file_path):
                    _, ext = os.path.splitext(file_path.lower())
                    if ext in self.supported_formats:
                        video_files.append(file_path)

        # 处理每个视频文件
        for file_path in tqdm(video_files, desc="处理视频文件"):
            result = self.extract_text_from_video(file_path)
            results.append(result)

        return {
            "success": True,
            "directory": directory_path,
            "total_files": len(video_files),
            "successful_extractions": sum(1 for r in results if r["success"]),
            "results": results,
            "error": None,
        }

    def extract_face_frames(
        self,
        video_path: str,
        min_face_size: int = 50,
        similarity_threshold: float = 0.8,
        max_frames: int = 20,
        num_workers: int = 4,
        det_thresh: float = 0.6,
    ) -> Dict[str, Any]:
        """
        从视频中提取包含人脸的关键帧（多线程优化版）

        参数:
            video_path: 视频文件路径
            min_face_size: 最小人脸尺寸（像素）
            similarity_threshold: 人脸相似度阈值（0-1）
            max_frames: 最大提取帧数
            num_workers: 并行处理线程数

        返回:
            包含关键帧信息的字典
        """
        self.logger.info(f"开始从视频提取人脸关键帧: {video_path} (多线程模式)")

        # 检查视频是否可以打开
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            self.logger.error(f"无法打开视频文件: {video_path}")
            raise ValueError("无法打开视频文件，请检查视频格式是否正确")

        # 获取视频信息
        fps = cap.get(cv2.CAP_PROP_FPS)  # 获取视频帧率
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))  # 获取视频总帧数
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))  # 获取视频宽度
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 获取视频高度
        duration = total_frames / fps if fps > 0 else 0  # 计算视频时长

        self.logger.info(
            f"视频信息: {width}x{height}, {fps} FPS, {total_frames} 帧, {duration:.2f} 秒"
        )

        # 检查GPU可用性
        gpu_available = False
        try:
            import torch

            gpu_available = torch.cuda.is_available()  # 检查是否有可用的GPU
        except ImportError:
            pass

        self.logger.info(f"GPU加速{'可用' if gpu_available else '不可用'}")

        # 动态计算采样间隔
        # target_processing_time = 30  # 目标处理时间(秒)
        # 计算最多需要处理的帧数，取max_frames的3倍和目标处理时间内帧数的较小值
        # max_frames_to_process = min(
        #     max_frames,
        #     int(target_processing_time * fps) if fps > 0 else 100,
        # )  # 计算出需要处理的最大帧数：每秒帧数x目标处理时间，和最大获取帧数比较取两者的较小值
        max_frames_to_process = max_frames
        # 计算采样间隔，确保至少每1帧采样一次
        # sample_interval = (
        #     max(1, total_frames // max_frames_to_process)
        #     if max_frames_to_process > 0
        #     else 1
        # )  # 计算出采样间隔，根据参数max_frames_to_process计算
        sample_interval = fps // 3  # 计算出采样间隔，每秒3帧

        self.logger.info(f"动态采样间隔: 每{sample_interval}帧采样一次")

        # 多线程处理帧
        start_time = datetime.datetime.now()  # 记录开始处理时间

        # 收集所有需要处理的帧
        frames_to_process = []
        frame_count = 0
        while (
            frame_count < total_frames
            and len(frames_to_process) < max_frames_to_process
        ):
            # 设置视频读取位置到指定帧
            cap.set(cv2.CAP_PROP_POS_FRAMES, frame_count)
            ret, frame = cap.read()  # 读取当前帧
            if not ret:
                break
            frames_to_process.append((frame_count, frame))  # 将帧索引和帧图像添加到列表
            frame_count += sample_interval  # 按采样间隔移动到下一帧
        cap.release()  # 释放视频捕获对象

        key_frames = []  # 用于存储关键帧信息的列表
        last_face_features = None  # 存储上一个关键帧的人脸特征
        processed_count = 0  # 记录已处理的帧数

        def process_frame(frame_data):
            """
            处理单帧图像，检测人脸并判断是否为关键帧

            :param frame_data: 包含帧索引和帧图像的元组
            :return: 若为关键帧则返回帧信息，否则返回 None
            """
            nonlocal last_face_features, key_frames, processed_count
            frame_idx, frame = frame_data
            result = None  # 处理结果集合

            try:
                # 将帧转换为RGB格式(如果原始是BGR)
                if frame.ndim == 3 and frame.shape[2] == 3:
                    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                else:
                    frame_rgb = frame

                # 提取特征，调用人脸检测器从图像中提取人脸特征
                face_features = self.face_detector.extract_features_from_image(
                    image_data=frame_rgb,
                    det_thresh=det_thresh,
                )

                # 如果检测到人脸
                if face_features and len(face_features) > 0:
                    # 过滤小人脸，只保留尺寸大于 min_face_size 的人脸
                    valid_faces = [
                        face
                        for face in face_features
                        if (face["bbox"][2] - face["bbox"][0]) > min_face_size
                        and (face["bbox"][3] - face["bbox"][1]) > min_face_size
                    ]

                    if valid_faces:
                        # 需要同步处理的关键帧判断逻辑
                        with ThreadPoolExecutor(
                            max_workers=1
                        ) as sync_executor:  # 使用单线程保证同步
                            future = sync_executor.submit(
                                self._check_key_frame,
                                valid_faces,
                                last_face_features,
                                similarity_threshold,
                                frame_idx,
                                fps,
                                frame.copy(),
                                len(valid_faces),
                            )
                            # 获取关键帧判断结果
                            is_new_key_frame, last_face_features, frame_info = (
                                future.result()
                            )

                            if is_new_key_frame:
                                result = frame_info
            except Exception as e:
                self.logger.error(f"处理帧 {frame_idx} 时出错: {str(e)}")
            finally:
                processed_count += 1  # 已处理帧数加1
                if processed_count % 100 == 0:
                    self.logger.info(
                        f"已处理 {processed_count} 帧，找到 {len(key_frames)} 个关键帧"
                    )
                return result

        # 使用线程池处理帧
        with ThreadPoolExecutor(max_workers=num_workers) as executor:
            # 为每个需要处理的帧提交处理任务
            futures = [
                executor.submit(process_frame, frame_data)  # 处理单帧任务返回结果集
                for frame_data in frames_to_process
            ]
            # 遍历已完成的任务
            for future in as_completed(futures):
                frame_result = future.result()
                # self.logger.info(f"帧信息：{frame_result}")
                if frame_result:
                    key_frames.append(frame_result)  # 将关键帧信息添加到列表

        process_time = (
            datetime.datetime.now() - start_time
        ).total_seconds()  # 计算处理耗时
        self.logger.info(
            f"人脸关键帧提取完成，处理了 {processed_count} 帧，提取了 {len(key_frames)} 个关键帧，耗时 {process_time:.2f} 秒"
        )

        # 构建结果
        result = {
            "total_frames_processed": processed_count,  # 已处理的总帧数
            "key_frames_extracted": len(key_frames),  # 提取到的关键帧数量
            "key_frames": key_frames,  # 关键帧信息列表
            "video_info": {
                "fps": fps,  # 视频帧率
                "total_frames": total_frames,  # 视频总帧数
                "width": width,  # 视频宽度
                "height": height,  # 视频高度
                "duration_seconds": duration,  # 视频时长
            },
            "process_time_seconds": process_time,  # 处理耗时
        }

        return result

    def _check_key_frame(
        self,
        current_faces: List[Dict[str, Any]],
        last_face_features: Optional[List[Dict[str, Any]]],
        similarity_threshold: float,
        frame_idx: int,
        fps: float,
        frame: np.ndarray,
        face_count: int,
    ) -> Tuple[bool, List[Dict[str, Any]], Dict[str, Any]]:
        """
        判断是否为关键帧（线程安全方法）

        参数:
            current_faces: 当前帧检测到的人脸特征
            last_face_features: 上一关键帧的人脸特征
            similarity_threshold: 相似度阈值
            frame_idx: 当前帧索引
            fps: 视频帧率
            frame: 当前帧图像
            face_count: 人脸数量

        返回:
            (是否为关键帧, 更新后的last_face_features, 帧信息)
        """
        # 初始化是否为新关键帧的标志
        is_new_key_frame = False
        # 存储新出现的人脸
        new_faces = []
        # 存储已匹配的人脸索引
        matched_prev_faces = set()

        if last_face_features is None:
            # 如果没有上一关键帧，当前帧自动成为关键帧
            is_new_key_frame = True
            new_faces = current_faces
        else:
            # 遍历当前帧检测到的所有人脸，curr_idx 是当前人脸在 current_faces 列表中的索引，curr_face 是当前人脸的特征信息
            for curr_idx, curr_face in enumerate(current_faces):
                # 初始化匹配标志为 False，表示当前人脸尚未匹配到上一关键帧的人脸
                curr_matched = False
                # 遍历上一关键帧检测到的所有人脸，prev_idx 是上一关键帧人脸在 last_face_features 列表中的索引，prev_face 是上一关键帧人脸的特征信息
                for prev_idx, prev_face in enumerate(last_face_features):
                    # 如果上一关键帧的这个人脸已经匹配过了，跳过本次循环
                    if prev_idx in matched_prev_faces:
                        continue
                    # 计算当前人脸和上一关键帧人脸的相似度，调用 face_matcher 的 compute_similarity 方法
                    similarity = self.face_matcher.compute_similarity(
                        np.array(curr_face["embedding"]),
                        np.array(prev_face["embedding"]),
                    )
                    # 如果相似度大于设定的阈值
                    if similarity > similarity_threshold:
                        # 标记当前人脸已匹配
                        curr_matched = True
                        # 将上一关键帧中匹配的人脸索引添加到 matched_prev_faces 集合中
                        matched_prev_faces.add(prev_idx)
                        # 找到匹配后，跳出内层循环
                        break

                # 如果当前人脸没有匹配到上一关键帧的任何人脸
                if not curr_matched:
                    # 将当前人脸添加到新人脸列表中
                    new_faces.append(curr_face)

            # 如果存在新人脸
            if new_faces:  # or len(matched_prev_faces) < len(last_face_features):
                # 将当前帧标记为新的关键帧
                is_new_key_frame = True

        self.logger.info(
            f"帧 {int(frame_idx)}: 新人脸数={len(new_faces)}, 匹配人脸数={len(matched_prev_faces)}/{len(last_face_features) if last_face_features else 0}, 总人脸数={face_count}"
        )

        frame_info = None
        if is_new_key_frame:
            frame_info = {
                "frame_id": frame_idx,
                "frame_index": frame_idx,
                "timestamp": frame_idx / fps if fps > 0 else 0,
                "frame": frame.copy(),
                "face_count": face_count,
                "new_face_count": len(new_faces),
            }
            # 更新上一关键帧的人脸特征为当前帧所有人脸
            last_face_features = current_faces

        return is_new_key_frame, last_face_features, frame_info
