# -*- coding: utf-8 -*-
"""
AI 视频价值增强器 (Demo)

本脚本是一个演示项目，旨在展示如何利用 AI 为现有视频内容增加两种核心价值：
1.  **双语字幕**: 提高内容的可及性，触达更广泛的跨语言观众。
2.  **知识卡片**: 提炼视频核心知识点，帮助观众高效学习和回顾。

工作流程:
1.  下载指定的 YouTube 视频。
2.  使用 Whisper 生成原始 SRT 字幕。
3.  调用大语言模型 (LLM) 完成两项任务:
    a. 翻译字幕内容，以生成双语字幕。
    b. 分析字幕内容，提取核心知识点。
4.  使用 FFmpeg 和 MoviePy 将双语字幕和知识卡片合成到新视频中。
"""

import os
import json
import time
import torch
import whisper
import yt_dlp
from openai import OpenAI
import moviepy.editor as mp
import re
import subprocess
import numpy as np
from PIL import Image, ImageDraw


# ====================================================================
# 1. 配置模块 (Configuration)
# ====================================================================

# --- 用户需要在此处配置 ---
# 使用的视频 URL (建议选择5分钟以内的知识类短视频)
VIDEO_URL = "https://www.youtube.com/watch?v=5OGFD9h_yVY&pp=ygUKZ2VtaW5pIGNsaQ%3D%3D" # 示例: 一个关于黑洞的科普视频

# 目标翻译语言
TARGET_LANGUAGE = "zh"

# 工作目录
WORK_DIR = "./video_demo"

# 是否跳过知识卡片提取 (True 跳过，False 不跳过)
SKIP_KNOWLEDGE_CARD_EXTRACTION = False

# 是否跳过摘要和关键词提取 (True 跳过，False 不跳过)
SKIP_SUMMARY_AND_KEYWORDS_EXTRACTION = False

# OpenAI/DeepSeek API 配置 (请确保已在环境中设置 API Key)
try:
    client = OpenAI(
        api_key=os.getenv("DEEPSEEK_API_KEY"),
        base_url="https://openrouter.ai/api/v1"
    )
except Exception:
    client = None
    print("[警告] 未能初始化 LLM 客户端。请检查 API Key 配置。")

# ====================================================================
# 2. 核心功能函数 (Core Functions)
# ====================================================================

def setup_environment():
    """创建工作目录"""
    os.makedirs(WORK_DIR, exist_ok=True)
    print(f"[INFO] 工作目录 '{WORK_DIR}' 已准备就绪。")

def download_video(url: str) -> (str, str):
    """下载视频并返回视频路径和安全的文件名基础。"""    
    # 检查视频文件是否已存在
    video_filename = os.path.join(WORK_DIR, 'original_video.mp4') # 假设下载的视频总是mp4格式
    if os.path.exists(video_filename):
        print(f"[INFO] 视频文件已存在: {video_filename}，跳过下载。")
        # 提取base_name
        base_name = os.path.splitext(os.path.basename(video_filename))[0]
        return video_filename, base_name

    print(f"[INFO] 正在从 {url} 下载视频...")
    ydl_opts = {
        'outtmpl': os.path.join(WORK_DIR, 'original_video.%(ext)s'),
        'format': 'best[ext=mp4]/best',
        'merge_output_format': 'mp4',
    }
    with yt_dlp.YoutubeDL(ydl_opts) as ydl:
        info = ydl.extract_info(url, download=True)
        video_path = ydl.prepare_filename(info)
        base_name = os.path.splitext(os.path.basename(video_path))[0]
    print(f"[INFO] 视频下载完成: {video_path}")
    return video_path, base_name

def transcribe_to_srt(video_path: str, srt_path: str) -> (str, str):
    """使用 Whisper 生成 SRT 字幕文件，并返回所有文本内容和检测到的语言。"""
    if os.path.exists(srt_path):
        print(f"[INFO] 原始 SRT 字幕已存在: {srt_path}，跳过转录。")
        full_text = ""
        detected_language = "en" # 假设如果文件存在，默认是英文，后续可以优化为从文件名或元数据中读取
        with open(srt_path, 'r', encoding='utf-8') as f:
            for line in f:
                # 提取字幕文本行，跳过时间戳和序号
                if ' --> ' not in line and line.strip() and not line.strip().isdigit():
                    full_text += line.strip() + " "
        return full_text.strip(), detected_language

    print("[INFO] 正在使用 Whisper 生成字幕... (这可能需要一些时间)")
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = whisper.load_model("base", device=device)
    result = model.transcribe(video_path, verbose=False)

    full_text = ""
    detected_language = result["language"]
    print(f"[INFO] 检测到的语言: {detected_language}")

    with open(srt_path, 'w', encoding='utf-8') as f:
        for i, seg in enumerate(result["segments"], start=1):
            start_time = time.strftime("%H:%M:%S", time.gmtime(seg['start'])) + f",{int((seg['start'] % 1) * 1000):03d}"
            end_time = time.strftime("%H:%M:%S", time.gmtime(seg['end'])) + f",{int((seg['end'] % 1) * 1000):03d}"
            text = seg['text'].strip()
            f.write(f"{i}\n{start_time} --> {end_time}\n{text}\n\n")
            full_text += text + " "
    
    print(f"[INFO] 原文 SRT 字幕已生成: {srt_path}")
    return full_text.strip(), detected_language

def call_llm(prompt: str, is_json: bool = False) -> str:
    """调用大语言模型 (LLM) 并返回结果。"""
    if not client: return ""
    try:
        messages = [{"role": "user", "content": prompt}]
        response = client.chat.completions.create(model="deepseek/deepseek-r1-0528:free", messages=messages)
        content = response.choices[0].message.content.strip()
        if is_json:
            # 尝试从返回的 markdown 中提取 json
            match = re.search(r'```json\n(.*?)```', content, re.DOTALL)
            return match.group(1) if match else content
        return content
    except Exception as e:
        print(f"[错误] 调用 LLM 时出错: {e}")
        return ""

def generate_chapters(full_text: str, srt_path: str, min_chapters: int, max_chapters: int, min_chapter_duration: int) -> str:
    """使用 LLM 分析视频文本，生成符合 YouTube 格式的章节。"""
    print("[INFO] 正在请求 LLM 生成章节信息...")
    srt_content = ""
    with open(srt_path, 'r', encoding='utf-8') as f:
        srt_content = f.read()

    prompt = f"""Analyze the following video transcript and its SRT content to generate a list of video chapters for YouTube. Follow these rules strictly:
1.  Create between {min_chapters} and {max_chapters} chapters.
2.  Each chapter must be at least {min_chapter_duration} seconds long.
3.  The first chapter MUST start at exactly 00:00.
4.  Format each chapter as `HH:MM:SS Chapter Title` or `MM:SS Chapter Title` on a new line.
5.  Do NOT include any introductory, concluding, or explanatory text. Only output the chapter list.

Example:
00:00 Introduction
01:23 Key Feature 1
03:45 Demonstration
05:10 Conclusion

Video Transcript:
{full_text}

SRT Content (for timing reference):
{srt_content}

Chapters:"""
    
    chapters_text = call_llm(prompt)
    if chapters_text:
        print("[INFO] 章节信息已从 LLM 成功获取。")
        return chapters_text
    else:
        print("[警告] LLM 未能生成章节信息。")
        return ""

def process_subtitles(original_video_path: str, base_name: str, work_dir: str):
    """处理字幕：生成原文SRT，翻译，并创建双语SRT。"""
    original_srt_path = os.path.join(work_dir, f"{base_name}_original.srt")
    bilingual_srt_path = os.path.join(work_dir, f"{base_name}_bilingual.srt")
    full_text = ""

    if os.path.exists(bilingual_srt_path):
        print(f"[INFO] 双语 SRT 文件 {bilingual_srt_path} 已存在，跳过转录和翻译。")
        # 即使跳过，仍需读取原文以生成知识卡片和章节
        full_text, _ = transcribe_to_srt(original_video_path, original_srt_path) # _ 忽略 detected_language
        if not full_text: return None, None
    else:
        print("[INFO] 生成新的双语 SRT 文件...")
        full_text, detected_language = transcribe_to_srt(original_video_path, original_srt_path)
        if not full_text: return None, None

        if detected_language == "zh":
            target_translation_language = "English"
            source_language_for_prompt = "Chinese"
        else:
            target_translation_language = "Chinese"
            source_language_for_prompt = "English"

        print("[INFO] 正在请求 LLM 进行逐块翻译...")
        translated_blocks = []
        original_srt_blocks = parse_srt_blocks(original_srt_path)
        for block_text in original_srt_blocks:
            translation_prompt = f"Provide a direct and semantically equivalent translation of the following {source_language_for_prompt} text into {target_translation_language}. Do NOT add any extra information, interpretations, headings, or introductory/concluding remarks. The output should ONLY contain the translated text.\n\n{block_text}"
            translated_block = call_llm(translation_prompt)
            cleaned_block = re.sub(r"[\(（][^\)）]*[\)）]", "", translated_block).strip()
            translated_blocks.append(cleaned_block)
        
        if not translated_blocks: return None, None
        
        create_bilingual_srt(original_srt_path, translated_blocks, bilingual_srt_path)

    return full_text, bilingual_srt_path

def generate_summary_and_keywords(full_text: str) -> dict:
    """使用 LLM 分析视频文本，生成摘要和关键词，并增加容错。"""
    print("[INFO] 正在请求 LLM 生成摘要和关键词...")
    prompt = f"""Based on the following video transcript, generate a concise summary (around 100-150 words) and 5-10 relevant keywords. Return the result as a valid JSON object in a ```json block. The format should be: 
{{"summary": "Your concise summary here.", "keywords": ["keyword1", "keyword2", "keyword3"]}}.\n\nVideo Transcript:\n{full_text}\n\nJSON:"""
    
    result_json_str = call_llm(prompt, is_json=True)
    
    # 增加对 LLM 返回结果的健壮性检查
    if not result_json_str:
        print("[警告] LLM 未返回有效的摘要和关键词内容。")
        return {"summary": "", "keywords": []}

    try:
        result = json.loads(result_json_str)
        print("[INFO] 摘要和关键词已生成。")
        return result
    except (json.JSONDecodeError, TypeError) as e:
        print(f"[警告] 未能解析摘要和关键词JSON: {e}")
        return {"summary": "", "keywords": []}

def create_bilingual_srt(original_srt_path: str, translated_blocks: list, output_srt_path: str):
    """将原文SRT和翻译文本合并为双语SRT文件。"""
    print("[INFO] 正在创建双语字幕文件...")
    with open(original_srt_path, 'r', encoding='utf-8') as f_orig, \
         open(output_srt_path, 'w', encoding='utf-8') as f_out:
        
        original_blocks = f_orig.read().strip().split('\n\n')

        for i, block in enumerate(original_blocks):
            parts = block.split('\n')
            if len(parts) < 3: continue
            original_text = parts[2]
            translated_line = translated_blocks[i] if i < len(translated_blocks) else ""
            
            f_out.write(f"{parts[0]}\n")
            f_out.write(f"{parts[1]}\n")
            f_out.write(f"{original_text}\n{translated_line}\n\n")
    print(f"[INFO] 双语 SRT 字幕已生成: {output_srt_path}")

def parse_srt_blocks(srt_path: str) -> list:
    """解析SRT文件，提取每个字幕块的文本内容。"""
    texts = []
    with open(srt_path, 'r', encoding='utf-8') as f:
        content = f.read()
    
    # 使用正则表达式匹配每个字幕块的文本内容
    # 匹配模式：数字行，时间戳行，然后是文本行（可能有多行）
    # 捕获组1是文本内容
    matches = re.findall(r'\d+\n\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}\n([\s\S]*?)(?:\n\n|\Z)', content)
    
    for match in matches:
        # 清理文本，移除空行和多余的空白
        cleaned_text = '\n'.join([line.strip() for line in match.split('\n') if line.strip()])
        texts.append(cleaned_text)
    return texts

def generate_ai_metadata(full_text: str, original_srt_path: str, original_video_path: str):
    """生成所有AI相关的元数据，包括带时间戳的知识卡片，并对章节进行严格验证。"""
    knowledge_cards = []
    if not SKIP_KNOWLEDGE_CARD_EXTRACTION:
        print("[INFO] 正在请求 LLM 提取带时间戳的知识卡片...")
        
        srt_content = ""
        try:
            with open(original_srt_path, 'r', encoding='utf-8') as f:
                srt_content = f.read()
        except FileNotFoundError:
            print(f"[警告] SRT文件未找到: {original_srt_path}，无法为知识卡片提供精确定时。")

        knowledge_prompt = f"""From the following video transcript, extract the 3-5 most important key concepts. For each concept, provide a brief, one-sentence definition and its exact start time in seconds.

Use the provided SRT content for precise timing. The 'start_time' must be a float number representing the moment the concept is first mentioned.

Return the result as a valid JSON object in a ```json block. The format must be an array of objects, like this example:
[
  {{
    "concept": "Statefulness",
    "definition": "The ability of a system to remember previous interactions and events.",
    "start_time": 49.5
  }},
  {{
    "concept": "Toolchain",
    "definition": "The set of programming tools used to create a software product.",
    "start_time": 111.2
  }}
]

Transcript:
{full_text}

SRT Content (for timing reference):
{srt_content}
"""
        knowledge_json_str = call_llm(knowledge_prompt, is_json=True)
        try:
            knowledge_cards = json.loads(knowledge_json_str)
            print("[INFO] 已成功提取带时间戳的知识卡片。")
        except (json.JSONDecodeError, TypeError):
            knowledge_cards = []
            print("[警告] 未能解析知识卡片JSON，将跳过此步骤。")
    else:
        print("[INFO] 已跳过知识卡片提取。")

    # --- 章节生成与验证 ---
    chapters = ""
    video_duration = mp.VideoFileClip(original_video_path).duration
    
    # 根据视频时长动态调整章节要求
    if video_duration < 5 * 60: 
        min_chapters, max_chapters, min_chapter_duration = 3, 5, 20
    elif video_duration < 15 * 60: 
        min_chapters, max_chapters, min_chapter_duration = 5, 10, 30
    else: 
        min_chapters, max_chapters, min_chapter_duration = 8, 15, 45

    chapters_raw = generate_chapters(full_text, original_srt_path, min_chapters, max_chapters, min_chapter_duration)
    if chapters_raw:
        print("\n[INFO] LLM 原始章节信息:\n---\n" + chapters_raw + "\n---")
        # 1. 清理和格式化
        chapter_lines = []
        # 正则表达式现在接受 HH:MM:SS 和 MM:SS 格式
        time_format_re = r'^(?:\d{2}:)?\d{2}:\d{2} .+'
        for line in chapters_raw.split('\n'):
            if re.match(time_format_re, line.strip()):
                chapter_lines.append(line.strip())
        
        # 2. 严格验证
        is_valid = True
        if len(chapter_lines) < 3:
            print("[警告] 章节数量少于3个，不符合YouTube要求。")
            is_valid = False
        if is_valid and not chapter_lines[0].startswith("00:00"):
            print("[警告] 第一个章节不从 00:00 开始，不符合YouTube要求。")
            is_valid = False
        
        if is_valid:
            chapters = "\n".join(chapter_lines)
            print("\n[INFO] 清理并验证后的章节信息:\n---\n" + chapters + "\n---")
        else:
            print("[警告] 生成的章节未通过验证，将被丢弃。")
            chapters = "" # 验证失败，清空章节

    # --- 摘要和关键词生成 ---
    video_summary = ""
    video_keywords = []
    if not SKIP_SUMMARY_AND_KEYWORDS_EXTRACTION:
        summary_and_keywords_result = generate_summary_and_keywords(full_text)
        video_summary = summary_and_keywords_result.get("summary", "")
        video_keywords = summary_and_keywords_result.get("keywords", [])
    else:
        print("[INFO] 已跳过摘要和关键词提取。")

    return knowledge_cards, chapters, video_summary, video_keywords

def create_knowledge_card_clip(knowledge_cards: list, duration: int, size: tuple) -> mp.VideoClip:
    """使用 MoviePy 创建一个显示知识卡片的视频片段，并带有淡入效果。"""
    print("[INFO] 正在创建知识卡片摘要视频...")
    # 使用更专业的深蓝色背景
    bg_clip = mp.ColorClip(size=size, color=(20, 20, 80), duration=duration)
    
    clips = [bg_clip]
    current_y = 70 # 调整起始 Y 坐标

    # 为 macOS 指定一个明确的字体路径
    font_path = "/System/Library/Fonts/PingFang.ttc"

    # 标题
    title_clip = mp.TextClip("Key Concepts / 核心知识点", fontsize=50, color='white', font=font_path).set_duration(duration).set_position(('center', current_y))
    clips.append(title_clip)
    current_y += 100 # 增加标题和内容间的距离

    # 内容文本
    for card in knowledge_cards:
        concept_text = f"• {card.get('concept', '')}"
        def_text = f"  {card.get('definition', '')}"
        
        concept_clip = mp.TextClip(concept_text, fontsize=35, color='#FFD700', font=font_path, size=(size[0]-100, None), align='West').set_duration(duration).set_position((60, current_y))
        clips.append(concept_clip)
        current_y += 50
        
        def_clip = mp.TextClip(def_text, fontsize=28, color='white', font=font_path, size=(size[0]-120, None), align='West').set_duration(duration).set_position((80, current_y))
        clips.append(def_clip)
        current_y += 80

    # 将所有元素合成为一个片段，并应用淡入效果
    final_card = mp.CompositeVideoClip(clips)
    return final_card.fadein(1.0) # 1秒淡入

def create_gradient_rounded_rect(width, height, radius, color_start, color_end, opacity):
    """创建一个带圆角和渐变背景的 PIL Image。"""
    img = Image.new('RGBA', (width, height), (0, 0, 0, 0)) # 全透明背景
    draw = ImageDraw.Draw(img)

    # 绘制渐变
    for y in range(height):
        r = int(color_start[0] + (color_end[0] - color_start[0]) * y / height)
        g = int(color_start[1] + (color_end[1] - color_start[1]) * y / height)
        b = int(color_start[2] + (color_end[2] - color_start[2]) * y / height)
        draw.line([(0, y), (width, y)], fill=(r, g, b, int(255 * opacity))) # 带透明度的颜色

    # 创建圆角蒙版
    mask = Image.new('L', (width, height), 0) # 黑色背景
    mask_draw = ImageDraw.Draw(mask)
    mask_draw.rounded_rectangle([(0, 0), (width, height)], radius=radius, fill=255) # 白色圆角矩形

    # 应用蒙版
    img.putalpha(mask)
    return img

def create_card_overlay(card: dict, video_size: tuple) -> mp.VideoClip:
    """为单个知识卡片创建一个带渐变、圆角、淡入淡出效果的弹窗叠加层。"""
    # 1. 尺寸和位置设置
    overlay_width = int(video_size[0] * 0.45)  # 宽度增加到45%以容纳更多内容
    overlay_height = int(video_size[1] * 0.70) # 高度为屏幕的70%
    margin = 30
    
    # 垂直居中，水平靠右
    pos_x = video_size[0] - overlay_width - margin
    pos_y = (video_size[1] - overlay_height) / 2
    overlay_position = (pos_x, pos_y)

    # 2. 创建带渐变和圆角的背景
    gradient_start_color = (0, 100, 130)  # 深蓝绿色
    gradient_end_color = (0, 150, 180)   # 浅蓝绿色
    corner_radius = 25
    bg_opacity = 0.9
    
    # 使用辅助函数创建 PIL Image
    background_pil = create_gradient_rounded_rect(
        overlay_width, overlay_height, corner_radius, 
        gradient_start_color, gradient_end_color, bg_opacity
    )
    
    # 将 PIL Image 转换为 MoviePy Clip
    bg_clip = mp.ImageClip(np.array(background_pil)).set_duration(7)

    # 3. 准备和布局文本
    concept_text = card.get('concept', '')
    def_text = card.get('definition', '')
    font_path = "/System/Library/Fonts/PingFang.ttc"

    # 动态调整字体大小以适应更大的区域
    concept_fontsize = 22
    def_fontsize = 12
    
    # 使用 caption 方法自动换行
    concept_clip = mp.TextClip(
        concept_text, 
        fontsize=concept_fontsize, 
        color='#FFD700',  # 金色，突出概念
        font=font_path,
        size=(overlay_width - 80, None), # 左右留出40px边距
        method='caption', 
        align='West'
    )
    
    def_clip = mp.TextClip(
        def_text, 
        fontsize=def_fontsize, 
        color='white', 
        font=font_path,
        size=(overlay_width - 80, None), # 左右留出40px边距
        method='caption', 
        align='West'
    )

    # 4. 组合文本和背景
    # 将文本放置在背景上，调整垂直位置
    text_composite = mp.CompositeVideoClip([
        concept_clip.set_position((40, 40)), # 弹窗内部边距
        def_clip.set_position((40, 60 + concept_clip.h)) # 概念下方20px
    ], size=(overlay_width, overlay_height))

    # 合成最终的叠加层
    overlay = mp.CompositeVideoClip([bg_clip, text_composite])

    # 5. 设置动画和最终位置
    return overlay.set_duration(7).fadein(0.5).fadeout(0.5).set_position(overlay_position)



def burn_subtitles_and_finalize(video_path: str, srt_path: str, knowledge_cards: list, output_path: str):
    """使用 FFmpeg 烧录字幕，然后使用 MoviePy 将带时间戳的知识卡片叠加层合成到视频上。"""
    print("[INFO] 正在烧录字幕并合成最终视频... (这可能需要几分钟)")
    subtitled_video_path = os.path.join(WORK_DIR, "subtitled_video_temp.mp4")

    # 1. 使用 FFmpeg 高效烧录字幕
    font_path = "/System/Library/Fonts/PingFang.ttc"
    style = f"Fontname='{font_path}',Fontsize=14,PrimaryColour=&H00D7FF,SecondaryColour=&H000000FF,OutlineColour=&H00000000,BackColour=&H80000000,Bold=-1,Outline=1,Shadow=0.5,Alignment=2,MarginV=25"
    command = [
        'ffmpeg',
        '-i', video_path,
        '-vf', f"subtitles='{srt_path}':force_style='{style}'",
        '-y',
        subtitled_video_path
    ]
    try:
        subprocess.run(command, check=True, capture_output=True, text=True)
        print("[INFO] 字幕烧录完成。")
    except subprocess.CalledProcessError as e:
        print(f"[错误] FFmpeg 字幕烧录失败。Stdout: {e.stdout}, Stderr: {e.stderr}")
        return

    # 2. 使用 MoviePy 添加知识卡片叠加层
    main_clip = mp.VideoFileClip(subtitled_video_path)
    video_size = main_clip.size
    clips_to_composite = [main_clip] # 将主视频作为基础

    if knowledge_cards:
        print("[INFO] 正在创建并叠加知识卡片...")
        for card in knowledge_cards:
            start_time = card.get('start_time')
            if start_time is None:
                print(f"[警告] 知识卡片 '{card.get('concept')}' 缺少 'start_time'，已跳过。")
                continue

            # 创建叠加层并设置其开始时间
            overlay = create_card_overlay(card, video_size)
            overlay = overlay.set_start(start_time)
            clips_to_composite.append(overlay)

            # 记录日志以供验证
            end_time = start_time + overlay.duration
            print(f"[VERIFICATION] 知识卡片 '{card.get('concept')}' 将在 {start_time:.1f}s 至 {end_time:.1f}s 显示。")

    # 3. 合成最终视频
    final_clip = mp.CompositeVideoClip(clips_to_composite)
    final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac", preset="fast")
    
    # 4. 清理临时文件
    os.remove(subtitled_video_path)

    print(f"[成功] 最终视频已生成: {output_path}")

from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload


def get_authenticated_service(token_file: str, secret_file: str):
    """通过 OAuth 2.0 认证并返回 YouTube API 服务对象。"""
    SCOPES = ['https://www.googleapis.com/auth/youtube.upload', 'https://www.googleapis.com/auth/youtube.force-ssl']
    credentials = None

    if os.path.exists(token_file):
        credentials = Credentials.from_authorized_user_file(token_file, SCOPES)

    if not credentials or not credentials.valid:
        if credentials and credentials.expired and credentials.refresh_token:
            credentials.refresh(Request())
        else:
            if not os.path.exists(secret_file):
                raise FileNotFoundError(f"[错误] 客户端密钥文件不存在: {secret_file}")
            flow = InstalledAppFlow.from_client_secrets_file(secret_file, SCOPES)
            credentials = flow.run_local_server(port=0) # 使用 run_local_server 适合本地环境
        with open(token_file, "w") as token:
            token.write(credentials.to_json())
    
    return build("youtube", "v3", credentials=credentials)

def upload_video_to_youtube(**kwargs):
    """
    上传视频到 YouTube，支持定时发布、添加播放列表和自定义封面。
    """
    print("[INFO] 准备上传视频到 YouTube...")
    youtube = get_authenticated_service(kwargs['token_file'], kwargs['secret_file'])

    request_body = {
        "snippet": {
            "title": kwargs['title'], "description": kwargs['description'],
            "tags": kwargs['tags'], "categoryId": kwargs['category_id']
        },
        "status": {"privacyStatus": kwargs['privacy'], "madeForKids": False}
    }
    if kwargs.get('publish_at') and kwargs['privacy'] in ["private", "unlisted"]:
        request_body["status"]["publishAt"] = kwargs['publish_at']

    media = MediaFileUpload(kwargs['file_path'], mimetype="video/mp4", resumable=True)
    request = youtube.videos().insert(part="snippet,status", body=request_body, media_body=media)
    
    # 带重试逻辑的上传
    response = None
    retry = 0
    while response is None:
        try:
            status, response = request.next_chunk()
            if status:
                print(f"上传进度: {int(status.progress() * 100)}%")
        except Exception as e:
            if retry < 3:
                retry += 1
                print(f"[警告] 上传中断，正在重试 ({retry}/3)...\n错误: {e}")
                time.sleep(5)
            else:
                print("[错误] 上传失败，已达最大重试次数。")
                return

    video_id = response.get('id')
    print(f"[成功] 视频已上传: https://www.youtube.com/watch?v={video_id}")

    if kwargs.get('playlist'):
        youtube.playlistItems().insert(part="snippet", body={
            "snippet": {"playlistId": kwargs['playlist'], "resourceId": {"kind": "youtube#video", "videoId": video_id}}
        }).execute()
        print(f"[INFO] 视频已添加到播放列表。")

    if kwargs.get('thumbnail_path'):
        youtube.thumbnails().set(videoId=video_id, media_body=MediaFileUpload(kwargs['thumbnail_path'])).execute()
        print("[INFO] 自定义封面已设置。")

def upload_to_youtube(output_path: str, base_name: str, chapters: str, video_summary: str, video_keywords: list):
    """将增强后的视频上传到 YouTube。"""
    print("\n[INFO] 准备上传视频到 YouTube...")

    token_file = os.path.join(os.getcwd(), "qian_token.json")
    secret_file = os.path.join(os.getcwd(), "qian_client_secrets.json")

    video_title = base_name.replace("_original", "").replace("_", " ").strip()

    if chapters:
        video_description = f"{chapters}\n\n{video_summary}\n\n这是一个由 AI 增强的视频。\n更多精彩内容，敬请关注！"
    else:
        video_description = f"{video_summary}\n\n这是一个由 AI 增强的视频。\n更多精彩内容，敬请关注！"

    print("\n[INFO] 最终视频描述:\n---\n" + video_description + "\n---")

    final_video_tags = video_keywords if video_keywords else ["AI", "视频增强", "双语字幕", "知识卡片", "YouTube自动化"]

    video_category_id = "27"
    video_privacy_status = "private"

    try:
        upload_video_to_youtube(
            file_path=output_path,
            title=video_title,
            description=video_description,
            tags=final_video_tags,
            category_id=video_category_id,
            privacy=video_privacy_status,
            token_file=token_file,
            secret_file=secret_file
        )
    except Exception as e:
        print(f"[错误] 视频上传到 YouTube 失败: {e}")

# ====================================================================
# 3. 主执行流程 (Main Workflow)
# ====================================================================

def main():
    """执行完整的演示流程"""
    start_time = time.time()
    setup_environment()

    # 1. 下载视频
    original_video_path, base_name = download_video(VIDEO_URL)
    if not original_video_path: return

    # 2. 处理字幕
    full_text, bilingual_srt_path = process_subtitles(original_video_path, base_name, WORK_DIR)
    if not full_text or not bilingual_srt_path: return

    # 3. 生成 AI 元数据 (包括带时间戳的知识卡片)
    knowledge_cards, chapters, video_summary, video_keywords = generate_ai_metadata(full_text, os.path.join(WORK_DIR, f"{base_name}_original.srt"), original_video_path)

    # 4. 烧录字幕并合成知识卡片
    output_path = os.path.join(WORK_DIR, f"{base_name}_enhanced.mp4")
    burn_subtitles_and_finalize(original_video_path, bilingual_srt_path, knowledge_cards, output_path)

    print(f"--- Demo流程结束，总用时: {time.time() - start_time:.2f} 秒 ---")

    # 5. 上传到 YouTube
    upload_to_youtube(output_path, base_name, chapters, video_summary, video_keywords)

if __name__ == "__main__":
    if not client:
        print("[错误] LLM 客户端未初始化，无法执行。请在脚本顶部配置 API Key。")
    else:
        main()