import cv2
import numpy as np
import whisper
import os
import json
import subprocess
import tempfile
from typing import List, Dict, Any
import torch
import time
from datetime import datetime
import requests

class VideoNarrator:
    def __init__(self, model_size="large-v3", use_doubao=True, doubao_api_key=None):
        """
        初始化视频解说生成器
        """
        self.start_time = time.time()
        print(f"[{self._get_timestamp()}] 初始化视频解说生成器...")

        # 加载语音识别模型 (Whisper)
        print(f"[{self._get_timestamp()}] 正在加载 Whisper {model_size} 模型...")
        whisper_start = time.time()
        self.whisper_model = whisper.load_model(model_size).to("cuda")
        whisper_time = time.time() - whisper_start
        print(f"[{self._get_timestamp()}] Whisper 模型加载完成，耗时: {whisper_time:.2f}秒")

        # 是否使用豆包模型
        self.use_doubao = use_doubao
        self.doubao_api_key = doubao_api_key

        if self.use_doubao:
            if self.doubao_api_key:
                print(f"[{self._get_timestamp()}] 已配置豆包API密钥，将使用豆包API调用")
            else:
                print(f"[{self._get_timestamp()}] 未提供API密钥，将使用基础文案生成")
                self.use_doubao = False
        else:
            print(f"[{self._get_timestamp()}] 未启用豆包模型")

        # 视频分析参数
        self.fps = 0
        self.duration = 0
        self.frame_count = 0

    def _get_timestamp(self):
        """获取当前时间戳"""
        return datetime.now().strftime("%H:%M:%S")

    def _format_time(self, seconds):
        """格式化时间显示"""
        hours = int(seconds // 3600)
        minutes = int((seconds % 3600) // 60)
        secs = int(seconds % 60)
        if hours > 0:
            return f"{hours:02d}:{minutes:02d}:{secs:02d}"
        else:
            return f"{minutes:02d}:{secs:02d}"

    def extract_audio(self, video_path, audio_path=None):
        """
        从视频中提取音频（使用 ffmpeg）
        """
        if audio_path is None:
            audio_path = tempfile.mktemp(suffix='.wav')

        print(f"[{self._get_timestamp()}] 正在提取音频: {video_path} -> {audio_path}")
        audio_start = time.time()

        # 使用 ffmpeg 提取音频，优化参数以提高语音识别质量
        cmd = [
            'ffmpeg',
            '-i', video_path,
            '-vn',  # 禁用视频记录
            '-acodec', 'pcm_s16le',  # 音频编解码器
            '-ar', '16000',  # 音频采样率
            '-ac', '1',  # 音频通道数
            '-y',  # 覆盖输出文件
            audio_path
        ]

        try:
            subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
            audio_time = time.time() - audio_start
            print(f"[{self._get_timestamp()}] 音频提取完成，耗时: {audio_time:.2f}秒")
            return audio_path
        except subprocess.CalledProcessError:
            raise Exception("无法提取音频，请确保已安装 ffmpeg")
        except FileNotFoundError:
            raise Exception("未找到 ffmpeg，请先安装 ffmpeg")

    def transcribe_audio(self, audio_path):
        """
        将音频转换为文字（优化版）
        """
        print(f"[{self._get_timestamp()}] 正在进行语音识别...")
        transcribe_start = time.time()

        # 使用更详细的参数进行语音识别
        result = self.whisper_model.transcribe(audio_path, language="zh")

        transcribe_time = time.time() - transcribe_start
        segment_count = len(result["segments"])

        # 打印识别结果摘要
        print(f"[{self._get_timestamp()}] 语音识别完成，识别到 {segment_count} 个语音片段，耗时: {transcribe_time:.2f}秒")
        print(f"[{self._get_timestamp()}] 完整文本内容:")
        print("=" * 60)
        print(result["text"])
        print("=" * 60)

        # 打印详细的时间轴信息
        print(f"[{self._get_timestamp()}] 详细时间轴信息:")
        print("-" * 60)
        for i, segment in enumerate(result["segments"]):
            start_time = segment['start']
            end_time = segment['end']
            text = segment['text']
            print(f"[{start_time:.2f}s - {end_time:.2f}s] {text}")
        print("-" * 60)

        return result["text"], result["segments"]

    def extract_key_frames(self, video_path, interval=5):
        """
        提取关键帧用于场景分析
        """
        print(f"[{self._get_timestamp()}] 正在提取关键帧 (间隔: {interval}秒)...")
        frames_start = time.time()

        cap = cv2.VideoCapture(video_path)
        self.fps = int(cap.get(cv2.CAP_PROP_FPS))
        self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        self.duration = self.frame_count / self.fps

        print(f"[{self._get_timestamp()}] 视频信息 - 分辨率: {int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))}x{int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))}, "
              f"帧率: {self.fps}, 总帧数: {self.frame_count}, 时长: {self._format_time(self.duration)}")

        key_frames = []
        frame_number = 0

        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break

            # 每隔指定秒数提取一帧
            if frame_number % (self.fps * interval) == 0:
                timestamp = frame_number / self.fps
                key_frames.append({
                    "timestamp": timestamp,
                    "frame": frame.copy()
                })

            frame_number += 1

        cap.release()

        frames_time = time.time() - frames_start
        print(f"[{self._get_timestamp()}] 关键帧提取完成，共提取 {len(key_frames)} 帧，耗时: {frames_time:.2f}秒")
        return key_frames

    def analyze_scene(self, frame):
        """
        分析单帧画面内容（简化版）
        """
        # 转换为灰度图进行简单分析
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        avg_brightness = np.mean(gray)

        # 简单场景判断
        if avg_brightness > 180:
            scene_type = "明亮场景"
        elif avg_brightness < 50:
            scene_type = "暗场景"
        else:
            scene_type = "普通场景"

        return scene_type

    def generate_narration_with_doubao_api(self, video_content: str, scene_info: str, timestamp: float, video_duration: float) -> str:
        """
        使用豆包API生成解说文案
        """
        if not self.use_doubao or not self.doubao_api_key:
            return video_content

        try:
            # 构造针对视频解说的提示词
            if timestamp == 0:  # 开场白
                prompt = f"""你是一位专业的影视博主，专门剪辑影视，为剪辑的影视添加原创解说文案。请为以下视频生成解说文案：

视频总时长：{int(video_duration)}秒
场景信息：{scene_info}

请生成一段吸引人的开场白，要求：
1. 简要介绍视频内容
2. 吸引观众注意力
3. 语言生动有趣

开场白："""
            else:  # 中间内容
                prompt = f"""你是一位专业的影视博主，专门剪辑影视，为剪辑的影视添加原创解说文案。请为以下视频生成解说文案：

视频内容：{video_content}
场景信息：{scene_info}
时间点：{timestamp:.1f}秒

请生成一段自然流畅、生动有趣的解说词，要求：
1. 语言通俗易懂，富有表现力
2. 结合场景信息丰富解说内容
3. 保持与视频内容的一致性
4. 不要添加与内容无关的信息

解说词："""

            # 调用豆包API
            print(f"[{self._get_timestamp()}] 调用豆包API生成文案 (时间点: {timestamp:.1f}s)...")
            api_start = time.time()

            url = "https://ark.cn-beijing.volces.com/api/v3/chat/completions"
            headers = {
                "Authorization": f"Bearer {self.doubao_api_key}",
                "Content-Type": "application/json"
            }

            data = {
                "model": " Doubao-pro",  # 使用推荐的模型
                "messages": [
                    {"role": "system", "content": "你是一位专业的影视博主，专门剪辑影视，为剪辑的影视添加原创解说文案。请根据视频生成解说文案。"},
                    {"role": "user", "content": prompt}
                ],
                "stream": False
            }

            response = requests.post(url, headers=headers, json=data)
            api_time = time.time() - api_start

            if response.status_code == 200:
                result = response.json()
                generated_text = result["choices"][0]["message"]["content"]
                print(f"[{self._get_timestamp()}] API调用成功，耗时: {api_time:.2f}秒")
                return generated_text.strip() if generated_text.strip() else video_content
            else:
                print(f"[{self._get_timestamp()}] API调用失败: {response.status_code} {response.text}")
                # 添加更详细的错误信息
                try:
                    error_detail = response.json()
                    print(f"[{self._get_timestamp()}] 错误详情: {error_detail}")
                except:
                    pass
                return video_content

        except Exception as e:
            print(f"[{self._get_timestamp()}] 豆包API调用失败: {e}")
            return video_content

    def generate_narration(self, transcription, segments, key_frames):
        """
        生成解说文案（集成豆包API优化）
        """
        print(f"[{self._get_timestamp()}] 正在生成解说文案...")
        narration_start = time.time()

        narration_script = []
        processed_segments = 0
        total_segments = len(segments)

        # 开场白
        print(f"[{self._get_timestamp()}] 生成开场白...")
        opening_text = f"欢迎观看这段{int(self.duration)}秒的视频内容。"
        if self.use_doubao and self.doubao_api_key:
            opening_text = self.generate_narration_with_doubao_api(
                "欢迎观看这段视频内容。",
                f"视频总时长{int(self.duration)}秒",
                0.0,
                self.duration
            )

        narration_script.append({
            "timestamp": 0,
            "duration": 3,
            "text": opening_text
        })

        # 根据语音识别结果和关键帧生成解说
        for i, segment in enumerate(segments):
            start_time = segment['start']
            end_time = segment['end']

            processed_segments += 1
            if processed_segments % 10 == 0 or processed_segments == total_segments:
                print(f"[{self._get_timestamp()}] 处理进度: {processed_segments}/{total_segments} ({processed_segments/total_segments*100:.1f}%)")

            # 查找该时间段内的关键帧
            relevant_frames = [f for f in key_frames
                             if start_time <= f['timestamp'] <= end_time]

            # 收集场景信息
            scene_descriptions = []
            for frame_info in relevant_frames:
                scene_type = self.analyze_scene(frame_info['frame'])
                scene_descriptions.append(f"在{int(frame_info['timestamp'])}秒处是{scene_type}")

            # 基础文案
            basic_text = segment['text']
            scene_info = "，".join(scene_descriptions) if scene_descriptions else "无特殊场景变化"

            # 使用豆包API优化文案
            if self.use_doubao and self.doubao_api_key and basic_text.strip():
                enhanced_text = self.generate_narration_with_doubao_api(basic_text, scene_info, start_time, self.duration)
            else:
                enhanced_text = basic_text + (" " + scene_info if scene_info else "")

            narration_script.append({
                "timestamp": start_time,
                "duration": end_time - start_time,
                "text": enhanced_text
            })

        # 结尾
        print(f"[{self._get_timestamp()}] 生成结尾语...")
        closing_text = "感谢观看这段视频内容。"
        if self.use_doubao and self.doubao_api_key:
            closing_text = self.generate_narration_with_doubao_api(
                "感谢观看这段视频内容。",
                "视频播放结束",
                self.duration,
                self.duration
            )

        narration_script.append({
            "timestamp": self.duration,
            "duration": 3,
            "text": closing_text
        })

        narration_time = time.time() - narration_start
        print(f"[{self._get_timestamp()}] 解说文案生成完成，共生成 {len(narration_script)} 条文案，耗时: {narration_time:.2f}秒")

        return narration_script

    def process_video(self, video_path):
        """
        处理视频并生成解说文案
        """
        print(f"[{self._get_timestamp()}] 开始处理视频: {video_path}")
        total_start = time.time()

        print("========================================")
        print("           视频处理流程开始")
        print("========================================")

        print("正在提取音频...")
        audio_path = self.extract_audio(video_path)

        print("正在进行语音识别...")
        transcription, segments = self.transcribe_audio(audio_path)

        print("正在分析视频内容...")
        key_frames = self.extract_key_frames(video_path)

        print("正在生成解说文案...")
        if self.use_doubao and self.doubao_api_key:
            print("正在使用豆包API优化解说文案...")
        narration = self.generate_narration(transcription, segments, key_frames)

        # 清理临时文件
        if os.path.exists(audio_path):
            os.remove(audio_path)
            print(f"[{self._get_timestamp()}] 已清理临时音频文件")

        total_time = time.time() - total_start
        print("========================================")
        print("           视频处理流程完成")
        print("========================================")
        print(f"[{self._get_timestamp()}] 总处理时间: {total_time:.2f}秒")

        return {
            "transcription": transcription,
            "narration_script": narration,
            "video_duration": self.duration,
            "use_doubao": self.use_doubao,
            "processing_time": total_time
        }

# 使用示例
def main():
    # 创建解说生成器实例（启用豆包API）
    print("=" * 60)
    print("           视频解说文案生成工具")
    print("=" * 60)

    # 请在此处填入您的豆包API密钥
    # 注意：请确保使用正确的API密钥格式
    DOUBAO_API_KEY = "Tm1KaVl6SXdNMlprT1RZeE5EVXdNR0k1TlRBek16SXhZV1kzTURBek9UWQ=="  # 请替换为您的实际API密钥

    narrator = VideoNarrator(model_size="large-v3", use_doubao=True, doubao_api_key=DOUBAO_API_KEY)

    # 处理视频文件 (请确保该文件存在)
    video_path = "zww.mp4"  # 替换为实际视频路径

    # 检查文件是否存在
    if not os.path.exists(video_path):
        print(f"[{datetime.now().strftime('%H:%M:%S')}] 错误：找不到视频文件 {video_path}")
        print("请确保视频文件存在，或修改代码中的文件路径")
        return

    try:
        result = narrator.process_video(video_path)

        # 输出结果
        print("\n" + "="*60)
        print("语音转文字结果:")
        print("="*60)
        print(result["transcription"])

        print("\n" + "="*60)
        print("解说文案:")
        print("="*60)
        for item in result["narration_script"]:
            print(f"[{item['timestamp']:.1f}s] {item['text']}")

        # 保存为JSON文件
        with open("narration_script.json", "w", encoding="utf-8") as f:
            json.dump(result, f, ensure_ascii=False, indent=2)

        print(f"\n结果已保存到 narration_script.json 文件中")
        if result["use_doubao"]:
            print("已使用豆包API优化解说文案")

        print(f"总处理时间: {result['processing_time']:.2f}秒")
        print(f"视频时长: {narrator._format_time(result['video_duration'])}")
        print(f"处理速度: {result['processing_time']/result['video_duration']:.2f}x 实时")

    except Exception as e:
        print(f"[{datetime.now().strftime('%H:%M:%S')}] 处理过程中出现错误: {str(e)}")

if __name__ == "__main__":
    main()
