"""
负责所有视频/音频/图像处理
"""

import os
import time
import torch
import whisper
import yt_dlp
import moviepy.editor as mp
import re
import subprocess
import numpy as np
from PIL import Image, ImageDraw
import logging

from . import config
from .ai_services import call_llm
from .cache_service import cache_result, CacheKeys
from .tts_services import create_audio_from_text, is_tts_available


def _convert_to_mp4_if_needed(video_path: str) -> str:
    """Ensure the downloaded video is in MP4 format, converting when necessary."""
    base, ext = os.path.splitext(video_path)
    if ext.lower() == ".mp4":
        return video_path

    mp4_path = f"{base}.mp4"
    logging.info(f"正在将下载的视频转换为MP4格式: {video_path} -> {mp4_path}")
    command = [
        "ffmpeg",
        "-i",
        video_path,
        "-c:v",
        "libx264",
        "-c:a",
        "aac",
        "-y",
        mp4_path,
    ]
    result = subprocess.run(command, capture_output=True, text=True)
    if result.returncode == 0 and os.path.exists(mp4_path):
        try:
            os.remove(video_path)
        except OSError:
            logging.warning(f"删除原始视频失败: {video_path}")
        logging.info("视频格式转换完成。")
        return mp4_path

    logging.warning(f"MP4 转换失败，继续使用原始视频文件: {video_path}\n错误输出: {result.stderr}")
    return video_path

def setup_environment():
    """创建工作目录"""
    os.makedirs(config.WORK_DIR, exist_ok=True)
    logging.info(f"工作目录 '{config.WORK_DIR}' 已准备就绪。")

def get_available_videos(video_type):
    """获取指定类型的可用视频列表
    
    Args:
        video_type: 视频类型，支持 'intros', 'openings', 'endings', 'transitions'
    
    Returns:
        list: 视频信息列表
    """
    video_dir = os.path.join(config.PROJECT_ROOT, "videos", video_type)
    
    if not os.path.exists(video_dir):
        logging.warning(f"{video_type}视频目录不存在: {video_dir}")
        return []
    
    videos = []
    supported_formats = ['.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v']
    
    try:
        for filename in os.listdir(video_dir):
            if filename.startswith('.') or filename.lower() == 'readme.md':
                continue
                
            file_path = os.path.join(video_dir, filename)
            if os.path.isfile(file_path) and any(filename.lower().endswith(ext) for ext in supported_formats):
                # 获取文件信息
                file_size = os.path.getsize(file_path)
                
                # 获取视频信息
                try:
                    with mp.VideoFileClip(file_path) as clip:
                        duration = clip.duration
                        width, height = clip.size
                        
                    videos.append({
                        'filename': filename,
                        'name': os.path.splitext(filename)[0],
                        'path': file_path,
                        'size': file_size,
                        'duration': duration,
                        'resolution': f"{width}x{height}",
                        'size_mb': round(file_size / (1024 * 1024), 1)
                    })
                except Exception as e:
                    logging.warning(f"无法读取视频文件信息: {filename} - {e}")
                    continue
    except Exception as e:
        logging.error(f"读取{video_type}视频目录失败: {e}")
        return []
    
    logging.info(f"找到 {len(videos)} 个可用的{video_type}视频")
    return videos

def get_available_intros():
    """获取可用的片头视频列表"""
    return get_available_videos('intros')

def get_available_openings():
    """获取可用的开场口播视频列表"""
    return get_available_videos('openings')

def download_video(url: str) -> (str, str):
    """下载视频并返回文件路径及基础文件名，支持多次回退策略。"""

    video_filename = os.path.join(config.WORK_DIR, "original_video.mp4")
    if os.path.exists(video_filename):
        logging.info(f"视频文件已存在: {video_filename}，跳过下载。")
        base_name = os.path.splitext(os.path.basename(video_filename))[0]
        return video_filename, base_name

    platform = detect_video_platform(url)
    logging.info(f"正在从 {platform} 下载视频: {url}")

    primary_opts = {
        "outtmpl": os.path.join(config.WORK_DIR, "original_video.%(ext)s"),
        "format": "best[ext=mp4]/best",
        "merge_output_format": "mp4",
        "writesubtitles": False,
        "ignoreerrors": False,
    }

    fallback_opts = {
        "outtmpl": os.path.join(config.WORK_DIR, "original_video.%(ext)s"),
        "format": "bv*+ba/best",
        "merge_output_format": "mp4",
        "writesubtitles": False,
        "ignoreerrors": False,
    }

    fallback_opts_final = {
        "outtmpl": os.path.join(config.WORK_DIR, "original_video.%(ext)s"),
        "format": "best",
        "merge_output_format": "mp4",
        "writesubtitles": False,
        "ignoreerrors": False,
    }

    last_error = None
    for attempt, opts in enumerate([primary_opts, fallback_opts, fallback_opts_final], start=1):
        try:
            logging.info(f"使用下载策略 #{attempt}，格式参数: {opts['format']}")
            with yt_dlp.YoutubeDL(opts) as ydl:
                info = ydl.extract_info(url, download=True)
                video_path = ydl.prepare_filename(info)
            video_path = _convert_to_mp4_if_needed(video_path)
            base_name = os.path.splitext(os.path.basename(video_path))[0]
            logging.info(f"视频下载完成: {video_path}")
            return video_path, base_name
        except Exception as exc:
            last_error = exc
            logging.warning(f"下载策略 #{attempt} 失败: {exc}")

    logging.error(f"从 {platform} 下载视频失败: {last_error}")
    return None, None

def detect_video_platform(url: str) -> str:
    """检测视频URL所属的平台"""
    url_lower = url.lower()
    
    if 'youtube.com' in url_lower or 'youtu.be' in url_lower:
        return 'YouTube'
    elif 'bilibili.com' in url_lower or 'b23.tv' in url_lower:
        return 'Bilibili (B站)'
    elif 'tiktok.com' in url_lower:
        return 'TikTok'
    elif 'douyin.com' in url_lower:
        return '抖音'
    elif 'twitter.com' in url_lower or 'x.com' in url_lower:
        return 'Twitter/X'
    elif 'facebook.com' in url_lower or 'fb.watch' in url_lower:
        return 'Facebook'
    elif 'instagram.com' in url_lower:
        return 'Instagram'
    elif 'vimeo.com' in url_lower:
        return 'Vimeo'
    elif 'dailymotion.com' in url_lower:
        return 'Dailymotion'
    else:
        return '未知平台 (尝试通用下载)'

async def save_uploaded_video(video_file) -> (str, str):
    """保存上传的视频文件并返回视频路径和安全的文件名基础。"""
    import shutil
    
    # 生成安全的文件名
    original_filename = video_file.filename
    file_extension = os.path.splitext(original_filename)[1].lower()
    
    # 支持的视频格式
    supported_formats = ['.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v']
    if file_extension not in supported_formats:
        logging.error(f"不支持的视频格式: {file_extension}")
        return None, None
    
    video_filename = os.path.join(config.WORK_DIR, f'original_video{file_extension}')
    base_name = "original_video"
    
    # 如果文件已存在，删除旧文件
    if os.path.exists(video_filename):
        os.remove(video_filename)
        logging.info(f"删除已存在的视频文件: {video_filename}")
    
    try:
        logging.info(f"正在保存上传的视频文件: {original_filename}")
        
        # 保存上传的文件
        with open(video_filename, "wb") as buffer:
            content = await video_file.read()
            buffer.write(content)
        
        logging.info(f"视频文件保存完成: {video_filename}")
        
        # 如果不是MP4格式，转换为MP4
        if file_extension != '.mp4':
            mp4_filename = os.path.join(config.WORK_DIR, 'original_video.mp4')
            logging.info(f"正在转换视频格式为MP4: {file_extension} -> .mp4")
            
            import subprocess
            command = [
                'ffmpeg',
                '-i', video_filename,
                '-c:v', 'libx264',
                '-c:a', 'aac',
                '-y',
                mp4_filename
            ]
            
            result = subprocess.run(command, capture_output=True, text=True)
            if result.returncode == 0:
                # 删除原文件，使用转换后的MP4文件
                os.remove(video_filename)
                video_filename = mp4_filename
                logging.info(f"视频格式转换完成: {mp4_filename}")
            else:
                logging.warning(f"视频格式转换失败，使用原格式: {result.stderr}")
        
        return video_filename, base_name
        
    except Exception as e:
        logging.error(f"保存视频文件失败: {e}")
        return None, None

def transcribe_to_srt(video_path: str, srt_path: str) -> (str, str):
    """使用 Whisper 生成 SRT 字幕文件，并返回所有文本内容和检测到的语言。"""
    if os.path.exists(srt_path):
        logging.info(f"原始 SRT 字幕已存在: {srt_path}，跳过转录。")
        full_text = ""
        detected_language = "en"
        with open(srt_path, 'r', encoding='utf-8') as f:
            for line in f:
                if ' --> ' not in line and line.strip() and not line.strip().isdigit():
                    full_text += line.strip() + " "
        return full_text.strip(), detected_language

    # 尝试从缓存获取转录结果
        from .cache_service import cache_service
    import hashlib
    
    # 计算视频文件的哈希值作为缓存键
    try:
        with open(video_path, 'rb') as f:
            # 只读取文件开头和结尾部分计算哈希，避免大文件影响性能
            f.seek(0)
            start_chunk = f.read(1024 * 1024)  # 1MB
            f.seek(-1024 * 1024, 2)  # 文件末尾1MB
            end_chunk = f.read(1024 * 1024)
            file_hash = hashlib.md5(start_chunk + end_chunk).hexdigest()
        
        cache_key = f"{CacheKeys.WHISPER_TRANSCRIPT}:{file_hash}"
        cached_result = cache_service.get(cache_key)
        
        if cached_result:
            logging.info("从缓存获取Whisper转录结果")
            full_text, detected_language, segments = cached_result
            
            # 生成SRT文件
            with open(srt_path, 'w', encoding='utf-8') as f:
                for i, seg in enumerate(segments, start=1):
                    start_time = time.strftime("%H:%M:%S", time.gmtime(seg['start'])) + f",{int((seg['start'] % 1) * 1000):03d}"
                    end_time = time.strftime("%H:%M:%S", time.gmtime(seg['end'])) + f",{int((seg['end'] % 1) * 1000):03d}"
                    text = seg['text'].strip()
                    f.write(f"{i}\n{start_time} --> {end_time}\n{text}\n\n")
            
            logging.info(f"从缓存生成 SRT 字幕文件: {srt_path}")
            return full_text, detected_language
            
    except Exception as e:
        logging.warning(f"缓存处理失败，继续正常转录: {e}")

    logging.info("正在使用 Whisper 生成字幕... (这可能需要一些时间)")
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = whisper.load_model("base", device=device)
    result = model.transcribe(video_path, verbose=False)

    full_text = ""
    detected_language = result["language"]
    logging.info(f"检测到的语言: {detected_language}")

    with open(srt_path, 'w', encoding='utf-8') as f:
        for i, seg in enumerate(result["segments"], start=1):
            start_time = time.strftime("%H:%M:%S", time.gmtime(seg['start'])) + f",{int((seg['start'] % 1) * 1000):03d}"
            end_time = time.strftime("%H:%M:%S", time.gmtime(seg['end'])) + f",{int((seg['end'] % 1) * 1000):03d}"
            text = seg['text'].strip()
            f.write(f"{i}\n{start_time} --> {end_time}\n{text}\n\n")
            full_text += text + " "
    
    # 缓存转录结果
    try:
        cache_result = (full_text.strip(), detected_language, result["segments"])
        cache_service.set(cache_key, cache_result, ttl=7200)  # 缓存2小时
        logging.info("Whisper转录结果已缓存")
    except Exception as e:
        logging.warning(f"缓存转录结果失败: {e}")
    
    logging.info(f"原文 SRT 字幕已生成: {srt_path}")
    return full_text.strip(), detected_language

def process_subtitles(original_video_path: str, base_name: str, work_dir: str):
    """处理字幕：生成原文SRT，翻译，并创建双语SRT。"""
    original_srt_path = os.path.join(work_dir, f"{base_name}_original.srt")
    bilingual_srt_path = os.path.join(work_dir, f"{base_name}_bilingual.srt")
    full_text = ""

    if os.path.exists(bilingual_srt_path):
        logging.info(f"双语 SRT 文件 {bilingual_srt_path} 已存在，跳过转录和翻译。")
        full_text, _ = transcribe_to_srt(original_video_path, original_srt_path)
        if not full_text: return None, None
    else:
        logging.info("生成新的双语 SRT 文件...")
        full_text, detected_language = transcribe_to_srt(original_video_path, original_srt_path)
        if not full_text: return None, None

        if detected_language == "zh":
            target_translation_language = "English"
            source_language_for_prompt = "Chinese"
        else:
            target_translation_language = "Chinese"
            source_language_for_prompt = "English"

        logging.info("正在请求 LLM 进行逐块翻译...")
        translated_blocks = []
        original_srt_blocks = parse_srt_blocks(original_srt_path)
        for block_text in original_srt_blocks:
            translation_prompt = f"Provide a direct and semantically equivalent translation of the following {source_language_for_prompt} text into {target_translation_language}. Do NOT add any extra information, interpretations, headings, or introductory/concluding remarks. The output should ONLY contain the translated text.\n\n{block_text}"
            translated_block = call_llm(translation_prompt)
            cleaned_block = re.sub(r"[\(（][^\)）]*[\)）]", "", translated_block).strip()
            translated_blocks.append(cleaned_block)
        
        if not translated_blocks: return None, None
        
        create_bilingual_srt(original_srt_path, translated_blocks, bilingual_srt_path)

    return full_text, bilingual_srt_path

def create_bilingual_srt(original_srt_path: str, translated_blocks: list, output_srt_path: str):
    """将原文SRT和翻译文本合并为双语SRT文件。"""
    logging.info("正在创建双语字幕文件...")
    with open(original_srt_path, 'r', encoding='utf-8') as f_orig, \
         open(output_srt_path, 'w', encoding='utf-8') as f_out:
        
        original_blocks = f_orig.read().strip().split('\n\n')

        for i, block in enumerate(original_blocks):
            parts = block.split('\n')
            if len(parts) < 3: continue
            original_text = parts[2]
            translated_line = translated_blocks[i] if i < len(translated_blocks) else ""
            
            f_out.write(f"{parts[0]}\n")
            f_out.write(f"{parts[1]}\n")
            f_out.write(f"{original_text}\n{translated_line}\n\n")
    logging.info(f"双语 SRT 字幕已生成: {output_srt_path}")

def parse_srt_blocks(srt_path: str) -> list:
    """解析SRT文件，提取每个字幕块的文本内容。"""
    texts = []
    with open(srt_path, 'r', encoding='utf-8') as f:
        content = f.read()
    
    matches = re.findall(r'\d+\n\d{2}:\d{2}:\d{2},\d{3} --> \d{2}:\d{2}:\d{2},\d{3}\n([\s\S]*?)(?:\n\n|\Z)', content)
    
    for match in matches:
        cleaned_text = '\n'.join([line.strip() for line in match.split('\n') if line.strip()])
        texts.append(cleaned_text)
    return texts

def create_knowledge_card_clip(knowledge_cards: list, duration: int, size: tuple) -> mp.VideoClip:
    """使用 MoviePy 创建一个显示知识卡片的视频片段，并带有淡入效果。"""
    logging.info("正在创建知识卡片摘要视频...")
    bg_clip = mp.ColorClip(size=size, color=(20, 20, 80), duration=duration)
    
    clips = [bg_clip]
    current_y = 70

    title_clip = mp.TextClip("Key Concepts / 核心知识点", fontsize=50, color='white', font=config.FONT_PATH).set_duration(duration).set_position(('center', current_y))
    clips.append(title_clip)
    current_y += 100

    for card in knowledge_cards:
        concept_text = f"• {card.get('concept', '')}"
        def_text = f"  {card.get('definition', '')}"
        
        concept_clip = mp.TextClip(concept_text, fontsize=35, color='#FFD700', font=config.FONT_PATH, size=(size[0]-100, None), align='West').set_duration(duration).set_position((60, current_y))
        clips.append(concept_clip)
        current_y += 50
        
        def_clip = mp.TextClip(def_text, fontsize=28, color='white', font=config.FONT_PATH, size=(size[0]-120, None), align='West').set_duration(duration).set_position((80, current_y))
        clips.append(def_clip)
        current_y += 80

    final_card = mp.CompositeVideoClip(clips)
    return final_card.fadein(1.0)

def create_gradient_rounded_rect(width, height, radius, color_start, color_end, opacity):
    """创建一个带圆角和渐变背景的 PIL Image。"""
    img = Image.new('RGBA', (width, height), (0, 0, 0, 0))
    draw = ImageDraw.Draw(img)

    for y in range(height):
        r = int(color_start[0] + (color_end[0] - color_start[0]) * y / height)
        g = int(color_start[1] + (color_end[1] - color_start[1]) * y / height)
        b = int(color_start[2] + (color_end[2] - color_start[2]) * y / height)
        draw.line([(0, y), (width, y)], fill=(r, g, b, int(255 * opacity)))

    mask = Image.new('L', (width, height), 0)
    mask_draw = ImageDraw.Draw(mask)
    mask_draw.rounded_rectangle([(0, 0), (width, height)], radius=radius, fill=255)

    img.putalpha(mask)
    return img

def create_card_overlay(card: dict, video_size: tuple) -> mp.VideoClip:
    """为单个知识卡片创建一个带渐变、圆角、淡入淡出效果的弹窗叠加层。"""
    overlay_width = int(video_size[0] * 0.45)
    overlay_height = int(video_size[1] * 0.70)
    margin = 30
    
    pos_x = video_size[0] - overlay_width - margin
    pos_y = (video_size[1] - overlay_height) / 2
    overlay_position = (pos_x, pos_y)

    gradient_start_color = (0, 100, 130)
    gradient_end_color = (0, 150, 180)
    corner_radius = 25
    bg_opacity = 0.9
    
    background_pil = create_gradient_rounded_rect(
        overlay_width, overlay_height, corner_radius, 
        gradient_start_color, gradient_end_color, bg_opacity
    )
    
    bg_clip = mp.ImageClip(np.array(background_pil)).set_duration(7)

    concept_text = card.get('concept', '')
    def_text = card.get('definition', '')

    concept_fontsize = 26
    def_fontsize = 16
    
    concept_clip = mp.TextClip(
        concept_text, 
        fontsize=concept_fontsize, 
        color='#FFD700',
        font=config.FONT_PATH,
        size=(overlay_width - 80, None),
        method='caption', 
        align='West'
    )
    
    def_clip = mp.TextClip(
        def_text, 
        fontsize=def_fontsize, 
        color='white', 
        font=config.FONT_PATH,
        size=(overlay_width - 80, None),
        method='caption', 
        align='West'
    )

    text_composite = mp.CompositeVideoClip([
        concept_clip.set_position((40, 40)),
        def_clip.set_position((40, 60 + concept_clip.h))
    ], size=(overlay_width, overlay_height))

    overlay = mp.CompositeVideoClip([bg_clip, text_composite])

    return overlay.set_duration(7).fadein(0.5).fadeout(0.5).set_position(overlay_position)

def create_static_background_video(original_video_path: str, background_image_path: str) -> str:
    """创建静态背景视频，保持原视频的音频和时长，但使用静态图片作为背景。
    
    Args:
        original_video_path: 原视频路径（用于提取音频和时长）
        background_image_path: 背景图片路径
        
    Returns:
        生成的静态背景视频路径
    """
    logging.info("正在创建静态背景视频...")
    
    try:
        # 获取原视频信息
        original_clip = mp.VideoFileClip(original_video_path)
        video_duration = original_clip.duration
        video_fps = original_clip.fps
        video_size = original_clip.size
        audio = original_clip.audio
        
        logging.info(f"原视频信息: 时长={video_duration:.2f}秒, FPS={video_fps}, 尺寸={video_size}")
        
        # 加载并调整背景图片
        background_image = Image.open(background_image_path)
        
        # 调整图片尺寸到视频尺寸，保持比例
        img_width, img_height = background_image.size
        video_width, video_height = video_size
        
        # 计算缩放比例，确保图片完全覆盖视频区域
        scale_w = video_width / img_width
        scale_h = video_height / img_height
        scale = max(scale_w, scale_h)  # 使用较大的缩放比例确保覆盖
        
        new_width = int(img_width * scale)
        new_height = int(img_height * scale)
        
        # 调整图片大小
        background_image = background_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
        
        # 如果调整后的图片比视频大，需要居中裁剪
        if new_width > video_width or new_height > video_height:
            left = (new_width - video_width) // 2
            top = (new_height - video_height) // 2
            right = left + video_width
            bottom = top + video_height
            background_image = background_image.crop((left, top, right, bottom))
        
        # 将PIL图片转换为numpy数组
        img_array = np.array(background_image)
        
        # 创建静态图片clip
        image_clip = mp.ImageClip(img_array, duration=video_duration)
        
        # 合成最终视频：静态背景 + 原音频
        final_clip = image_clip.set_audio(audio)
        
        # 输出路径
        static_video_path = os.path.join(config.WORK_DIR, "static_background_video.mp4")
        
        # 写入视频文件
        final_clip.write_videofile(
            static_video_path,
            fps=video_fps,
            codec='libx264',
            audio_codec='aac',
            temp_audiofile='temp-audio.m4a',
            remove_temp=True,
            verbose=False,
            logger=None
        )
        
        # 清理资源
        original_clip.close()
        final_clip.close()
        
        logging.info(f"静态背景视频已创建: {static_video_path}")
        return static_video_path
        
    except Exception as e:
        logging.error(f"创建静态背景视频失败: {e}")
        # 如果失败，返回原视频路径
        return original_video_path

def burn_subtitles_and_finalize(video_path: str, srt_path: str, knowledge_cards: list, output_path: str, 
                                background_image_path: str = None, selected_intro: str = "", selected_opening: str = ""):
    """使用 FFmpeg 烧录字幕，然后使用 MoviePy 将带时间戳的知识卡片叠加层合成到视频上。
    支持三段式拼接：片头 → 开场口播 → 正片内容
    
    Args:
        video_path: 原视频路径
        srt_path: 字幕文件路径  
        knowledge_cards: 知识卡片列表
        output_path: 输出视频路径
        background_image_path: 静态背景图片路径（可选，用于播客模式）
        selected_intro: 选择的片头视频文件名
        selected_opening: 选择的开场口播视频文件名
    """
    logging.info("正在烧录字幕并合成最终视频... (这可能需要几分钟)")
    
    # 如果是静态背景模式，先创建静态背景视频
    if background_image_path:
        logging.info("正在创建静态背景视频...")
        base_video_path = create_static_background_video(video_path, background_image_path)
    else:
        base_video_path = video_path
    
    # 获取主视频的分辨率信息
    main_clip = mp.VideoFileClip(base_video_path)
    main_resolution = main_clip.size
    main_fps = main_clip.fps
    main_clip.close()
    
    logging.info(f"主视频分辨率: {main_resolution}, FPS: {main_fps}")
    
    # 准备要拼接的视频片段列表
    video_segments = []
    total_intro_duration = 0  # 记录片头和开场的总时长，用于调整字幕时间戳
    
    # 处理片头视频
    if selected_intro and selected_intro.strip():
        intro_videos = get_available_intros()
        intro_video = next((v for v in intro_videos if v['filename'] == selected_intro), None)
        
        if intro_video:
            logging.info(f"添加片头视频: {intro_video['filename']}")
            
            # 调整片头视频分辨率
            intro_processed_path = os.path.join(config.WORK_DIR, "intro_processed.mp4")
            processed_intro = _process_video_segment(intro_video['path'], main_resolution, intro_processed_path)
            
            if processed_intro:
                video_segments.append(processed_intro)
                total_intro_duration += intro_video['duration']
                logging.info(f"片头视频已添加，时长: {intro_video['duration']:.2f}秒")
            else:
                logging.warning("片头视频处理失败，将跳过")
        else:
            logging.warning(f"未找到选择的片头视频: {selected_intro}")
    
    # 处理开场口播视频
    if selected_opening and selected_opening.strip():
        opening_videos = get_available_openings()
        opening_video = next((v for v in opening_videos if v['filename'] == selected_opening), None)
        
        if opening_video:
            logging.info(f"添加开场口播视频: {opening_video['filename']}")
            
            # 调整开场视频分辨率
            opening_processed_path = os.path.join(config.WORK_DIR, "opening_processed.mp4")
            processed_opening = _process_video_segment(opening_video['path'], main_resolution, opening_processed_path)
            
            if processed_opening:
                video_segments.append(processed_opening)
                total_intro_duration += opening_video['duration']
                logging.info(f"开场口播视频已添加，时长: {opening_video['duration']:.2f}秒")
            else:
                logging.warning("开场口播视频处理失败，将跳过")
        else:
            logging.warning(f"未找到选择的开场口播视频: {selected_opening}")
    
    # 先将主视频添加到拼接列表（无字幕）
    video_segments.append(base_video_path)
    
    # 如果有多个视频片段，进行拼接
    if len(video_segments) > 1:
        logging.info(f"开始三段式视频拼接，共 {len(video_segments)} 个片段")
        concatenated_video_path = os.path.join(config.WORK_DIR, "concatenated_video.mp4")
        
        # 使用ffmpeg进行视频拼接
        if _concatenate_videos_with_ffmpeg(video_segments, concatenated_video_path):
            video_for_subtitles = concatenated_video_path
        else:
            logging.warning("ffmpeg拼接失败，回退到MoviePy拼接")
            video_for_subtitles = _concatenate_videos_with_moviepy(video_segments, concatenated_video_path)
    else:
        video_for_subtitles = base_video_path
    
    if not video_for_subtitles:
        logging.error("视频拼接失败")
        return

    subtitled_video_path = None
    has_subtitles = bool(srt_path and os.path.exists(srt_path))

    if has_subtitles:
        # 烧录字幕到拼接后的完整视频
        subtitled_video_path = os.path.join(config.WORK_DIR, "subtitled_video_temp.mp4")

        # 如果有片头或开场视频，需要调整字幕的时间戳
        if total_intro_duration > 0:
            logging.info(f"调整字幕时间戳，偏移: {total_intro_duration:.2f}秒")
            adjusted_srt_path = os.path.join(config.WORK_DIR, "adjusted_subtitles.srt")
            _adjust_srt_timestamps(srt_path, adjusted_srt_path, total_intro_duration)
            srt_for_burning = adjusted_srt_path
        else:
            srt_for_burning = srt_path

        style = f"Fontname='{config.FONT_PATH}',Fontsize=14,PrimaryColour=&H00D7FF,SecondaryColour=&H000000FF,OutlineColour=&H00000000,BackColour=&H80000000,Bold=-1,Outline=1,Shadow=0.5,Alignment=2,MarginV=25"
        command = [
            'ffmpeg',
            '-i', video_for_subtitles,
            '-vf', f"subtitles='{srt_for_burning}':force_style='{style}'",
            '-y',
            subtitled_video_path
        ]
        try:
            subprocess.run(command, check=True, capture_output=True, text=True)
            logging.info("字幕烧录完成。")
        except subprocess.CalledProcessError as e:
            logging.error(f"FFmpeg 字幕烧录失败。Stdout: {e.stdout}, Stderr: {e.stderr}")
            return

        main_video_for_composite = subtitled_video_path
    else:
        logging.info("未提供字幕文件，跳过字幕烧录步骤。")
        main_video_for_composite = video_for_subtitles

    # 加载最终视频进行知识卡片叠加
    main_clip = mp.VideoFileClip(main_video_for_composite)
    video_size = main_clip.size
    clips_to_composite = [main_clip]

    if knowledge_cards:
        logging.info("正在创建并叠加知识卡片...")
        for card in knowledge_cards:
            start_time = card.get('start_time')
            if start_time is None:
                logging.warning(f"知识卡片 '{card.get('concept')}' 缺少 'start_time'，已跳过。")
                continue

            # 调整知识卡片的时间戳
            adjusted_start_time = start_time + total_intro_duration

            overlay = create_card_overlay(card, video_size)
            overlay = overlay.set_start(adjusted_start_time)
            clips_to_composite.append(overlay)

            end_time = adjusted_start_time + overlay.duration
            logging.info(f"知识卡片 '{card.get('concept')}' 将在 {adjusted_start_time:.1f}s 至 {end_time:.1f}s 显示。")

    final_clip = mp.CompositeVideoClip(clips_to_composite)
    final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac", preset="fast")
    
    # 清理临时文件
    if subtitled_video_path and os.path.exists(subtitled_video_path):
        os.remove(subtitled_video_path)
    
    # 清理处理后的片头和开场视频
    for temp_file in ["intro_processed.mp4", "opening_processed.mp4", "concatenated_video.mp4", "adjusted_subtitles.srt"]:
        temp_path = os.path.join(config.WORK_DIR, temp_file)
        if os.path.exists(temp_path):
            os.remove(temp_path)

    logging.info(f"三段式视频拼接完成: {output_path}")


def _adjust_srt_timestamps(input_srt_path: str, output_srt_path: str, offset_seconds: float):
    """
    调整SRT字幕文件的时间戳
    
    Args:
        input_srt_path: 输入SRT文件路径
        output_srt_path: 输出SRT文件路径
        offset_seconds: 时间偏移量（秒）
    """
    import re
    
    def add_time_offset(time_str: str, offset: float) -> str:
        # 解析时间字符串 "HH:MM:SS,mmm"
        match = re.match(r'(\d{2}):(\d{2}):(\d{2}),(\d{3})', time_str)
        if not match:
            return time_str
        
        hours, minutes, seconds, milliseconds = map(int, match.groups())
        
        # 转换为总毫秒数
        total_ms = (hours * 3600 + minutes * 60 + seconds) * 1000 + milliseconds
        # 添加偏移
        total_ms += int(offset * 1000)
        
        # 转换回时间格式
        new_hours = total_ms // (3600 * 1000)
        new_minutes = (total_ms % (3600 * 1000)) // (60 * 1000)
        new_seconds = (total_ms % (60 * 1000)) // 1000
        new_milliseconds = total_ms % 1000
        
        return f"{new_hours:02d}:{new_minutes:02d}:{new_seconds:02d},{new_milliseconds:03d}"
    
    try:
        with open(input_srt_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # 使用正则表达式匹配时间戳行
        timestamp_pattern = r'(\d{2}:\d{2}:\d{2},\d{3}) --> (\d{2}:\d{2}:\d{2},\d{3})'
        
        def replace_timestamp(match):
            start_time = add_time_offset(match.group(1), offset_seconds)
            end_time = add_time_offset(match.group(2), offset_seconds)
            return f"{start_time} --> {end_time}"
        
        adjusted_content = re.sub(timestamp_pattern, replace_timestamp, content)
        
        with open(output_srt_path, 'w', encoding='utf-8') as f:
            f.write(adjusted_content)
            
        logging.info(f"字幕时间戳已调整，偏移 {offset_seconds:.2f} 秒")
        
    except Exception as e:
        logging.error(f"调整字幕时间戳失败: {e}")


def _concatenate_videos_with_ffmpeg(video_list: list, output_path: str) -> bool:
    """
    使用ffmpeg拼接视频文件
    
    Args:
        video_list: 视频文件路径列表
        output_path: 输出视频路径
        
    Returns:
        True if successful, False otherwise
    """
    try:
        # 创建文件列表
        filelist_path = os.path.join(config.WORK_DIR, "filelist.txt")
        with open(filelist_path, 'w', encoding='utf-8') as f:
            for video_path in video_list:
                # 使用绝对路径并转义特殊字符
                escaped_path = video_path.replace("'", "'\"'\"'")
                f.write(f"file '{escaped_path}'\n")
        
        # 使用ffmpeg concat protocol拼接
        command = [
            'ffmpeg',
            '-f', 'concat',
            '-safe', '0',
            '-i', filelist_path,
            '-c', 'copy',
            '-y',
            output_path
        ]
        
        result = subprocess.run(command, capture_output=True, text=True)
        
        # 清理文件列表
        if os.path.exists(filelist_path):
            os.remove(filelist_path)
        
        if result.returncode == 0:
            logging.info(f"FFmpeg视频拼接成功: {output_path}")
            return True
        else:
            logging.error(f"FFmpeg视频拼接失败: {result.stderr}")
            return False
            
    except Exception as e:
        logging.error(f"FFmpeg视频拼接异常: {e}")
        return False


def _concatenate_videos_with_moviepy(video_list: list, output_path: str) -> str:
    """
    使用MoviePy拼接视频文件（备用方案）
    
    Args:
        video_list: 视频文件路径列表
        output_path: 输出视频路径
        
    Returns:
        输出路径if successful, None otherwise
    """
    try:
        clips = []
        for video_path in video_list:
            if os.path.exists(video_path):
                clip = mp.VideoFileClip(video_path)
                clips.append(clip)
            else:
                logging.warning(f"视频文件不存在，跳过: {video_path}")
        
        if not clips:
            logging.error("没有有效的视频片段可以拼接")
            return None
        
        final_clip = mp.concatenate_videoclips(clips)
        final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac", preset="fast")
        
        # 清理clips
        for clip in clips:
            clip.close()
        final_clip.close()
        
        logging.info(f"MoviePy视频拼接成功: {output_path}")
        return output_path
        
    except Exception as e:
        logging.error(f"MoviePy视频拼接失败: {e}")
        return None


def _create_multi_speaker_audio(text: str, language: str = "zh", model_type: str = "bark") -> str:
    """
    创建多人对话音频
    
    Args:
        text: 多人对话文本（格式：[说话人]: 内容）
        language: 语言代码
        model_type: TTS模型类型
        
    Returns:
        合成音频文件路径，失败返回None
    """
    import re
    import moviepy.editor as mp
    import hashlib
    
    try:
        logging.info("正在解析多人对话文本...")
        
        # 解析对话格式：[说话人]: 内容
        dialogue_pattern = r'\[([^\]]+)\]:\s*([^\[]*?)(?=\[|$)'
        matches = re.findall(dialogue_pattern, text, re.MULTILINE | re.DOTALL)
        
        logging.info(f"对话格式解析结果: 找到 {len(matches)} 个对话片段")
        for i, (speaker, content) in enumerate(matches):
            logging.info(f"  片段{i+1}: [{speaker}] -> {content.strip()[:50]}...")
        
        if not matches:
            logging.warning("未找到对话格式，回退到单人模式")
            return create_audio_from_text(text, language, "female", model_type)
        
        # 检查模型是否支持多人对话
        multi_speaker_models = ["bark", "xtts_v2"]
        if model_type not in multi_speaker_models:
            logging.error(f"模型 '{model_type}' 不支持多人对话模式")
            logging.error("支持多人对话的模型: " + ", ".join(multi_speaker_models))
            logging.error("请在页面上选择 Bark 或 XTTS v2 模型，或切换为单人模式")
            return None
        
        # 定义说话人语音映射（根据模型类型）
        speaker_voice_map = {}
        if model_type == "bark":
            # Bark模型使用voice preset
            if language == "zh":
                voice_presets = ["v2/zh_speaker_7", "v2/zh_speaker_8", "v2/zh_speaker_6", "v2/zh_speaker_9"]
            else:
                voice_presets = ["v2/en_speaker_6", "v2/en_speaker_9", "v2/en_speaker_3", "v2/en_speaker_1"]
        elif model_type == "xtts_v2":
            # XTTS v2使用speaker_wav参数，需要预设的参考音频文件
            # 为不同说话人使用不同的标识符，在生成时会处理
            voice_presets = ["speaker_A", "speaker_B", "speaker_C", "speaker_D"]
            logging.info("XTTS v2使用speaker_wav参数，将为不同说话人生成音频")
        else:
            # 其他模型使用性别区分
            voice_presets = ["female", "male", "female", "male"]
        
        # 收集所有音频片段
        audio_clips = []
        speaker_count = 0
        
        for speaker_name, content in matches:
            speaker_name = speaker_name.strip()
            content = content.strip()
            
            if not content:
                continue
            
            # 为新说话人分配语音
            if speaker_name not in speaker_voice_map:
                voice_preset = voice_presets[speaker_count % len(voice_presets)]
                speaker_voice_map[speaker_name] = voice_preset
                speaker_count += 1
                logging.info(f"说话人 '{speaker_name}' 分配语音: {voice_preset}")
            
            voice_preset = speaker_voice_map[speaker_name]
            
            # 生成该段对话的音频
            if model_type == "bark":
                # Bark模型需要特殊处理voice preset
                audio_path = _generate_bark_audio(content, language, voice_preset)
            elif model_type == "xtts_v2":
                # XTTS v2需要特殊处理speaker参数
                audio_path = _generate_xtts_audio(content, language, voice_preset)
            else:
                # 其他模型使用标准接口
                audio_path = create_audio_from_text(content, language, voice_preset, model_type)
            
            if audio_path and os.path.exists(audio_path):
                audio_clip = mp.AudioFileClip(audio_path)
                audio_clips.append(audio_clip)
                # 在对话之间添加短暂停顿
                silence = mp.AudioClip(lambda t: 0, duration=0.3).set_fps(22050)
                audio_clips.append(silence)
                logging.info(f"'{speaker_name}' 的对话音频生成完成")
            else:
                logging.warning(f"'{speaker_name}' 的对话音频生成失败，跳过")
        
        if not audio_clips:
            logging.error("所有对话音频生成都失败")
            return None
        
        # 移除最后一个停顿
        if len(audio_clips) > 1:
            audio_clips.pop()
        
        # 合并所有音频片段
        logging.info(f"正在合并 {len(audio_clips)} 个对话音频片段...")
        final_audio = mp.concatenate_audioclips(audio_clips)
        
        # 生成输出文件名
        text_hash = hashlib.md5(text.encode()).hexdigest()[:8]
        output_path = os.path.join(config.WORK_DIR, f"multi_speaker_audio_{text_hash}.wav")
        
        # 写入文件
        final_audio.write_audiofile(output_path, verbose=False, logger=None)
        
        # 清理资源
        for clip in audio_clips:
            if hasattr(clip, 'close'):
                clip.close()
        final_audio.close()
        
        logging.info(f"多人对话音频合成完成: {output_path}")
        return output_path
        
    except Exception as e:
        logging.error(f"多人对话音频创建失败: {e}")
        return None


def _generate_bark_audio(text: str, language: str, voice_preset: str) -> str:
    """
    使用Bark模型生成音频（直接使用voice preset）
    """
    from .tts_services import tts_service
    import tempfile
    import hashlib
    
    try:
        # 确保Bark模型已加载
        if not tts_service.load_model("bark"):
            return None
        
        # 生成缓存键
        text_hash = hashlib.md5(f"{text}_{language}_{voice_preset}_bark".encode()).hexdigest()
        
        # 创建临时音频文件
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir=config.WORK_DIR) as temp_file:
            temp_audio_path = temp_file.name
        
        # 直接调用Bark模型
        tts_service.current_model.tts_to_file(
            text=text,
            file_path=temp_audio_path,
            voice_preset=voice_preset
        )
        
        # 验证生成的音频文件
        if not os.path.exists(temp_audio_path) or os.path.getsize(temp_audio_path) == 0:
            return None
        
        # 移动到最终位置
        final_audio_path = os.path.join(config.WORK_DIR, f"bark_audio_{text_hash}.wav")
        os.rename(temp_audio_path, final_audio_path)
        
        return final_audio_path
        
    except Exception as e:
        logging.error(f"Bark音频生成失败: {e}")
        return None


def _get_or_create_reference_audio(speaker_name: str, language: str) -> str:
    """
    获取或创建参考音频文件，用于XTTS v2的voice cloning
    """
    import os
    
    # 检查是否有预设的参考音频文件
    reference_audio_dir = os.path.join(config.WORK_DIR, "reference_audio")
    os.makedirs(reference_audio_dir, exist_ok=True)
    
    # 根据speaker_name查找对应的参考音频文件
    speaker_audio_map = {
        "speaker_A": "reference_A.wav",
        "speaker_B": "reference_B.wav", 
        "speaker_C": "reference_C.wav",
        "speaker_D": "reference_D.wav"
    }
    
    audio_filename = speaker_audio_map.get(speaker_name, "reference_A.wav")
    reference_audio_path = os.path.join(reference_audio_dir, audio_filename)
    
    # 如果参考音频文件已存在，直接返回
    if os.path.exists(reference_audio_path):
        logging.info(f"使用已存在的参考音频: {reference_audio_path}")
        return reference_audio_path
    
    # 如果不存在，使用其他TTS模型生成简单的参考音频
    logging.info(f"创建参考音频文件: {reference_audio_path}")
    
    # 使用较长的样本文本生成参考音频（符合XTTS v2的10-30秒建议）
    sample_texts = {
        "speaker_A": "你好，我是第一个说话人。我喜欢科技和创新，经常关注人工智能的最新发展。今天很高兴能和大家分享一些有趣的话题。希望我们的对话能给大家带来启发。",
        "speaker_B": "大家好，我是第二个说话人。我对技术充满热情，特别是在机器学习和自然语言处理方面有深入的研究。我相信技术能够改变世界，让我们的生活变得更美好。",
        "speaker_C": "欢迎大家，我是第三个说话人。作为一名资深的技术专家，我见证了互联网和移动技术的快速发展。我喜欢探讨新技术的应用前景和对社会的影响。", 
        "speaker_D": "很高兴见到大家，我是第四个说话人。我专注于用户体验设计和产品创新。我认为好的技术应该以人为本，让复杂的功能变得简单易用。"
    }
    
    sample_text = sample_texts.get(speaker_name, "你好，这是一个测试音频。我是默认的说话人，正在为语音合成系统创建参考音频文件。")
    
    # 使用Bark模型生成参考音频（因为Bark比较稳定）
    from .tts_services import tts_service
    if tts_service.load_model("bark"):
        try:
            # 使用不同的voice preset为不同说话人生成参考音频
            voice_presets = {
                "speaker_A": "v2/zh_speaker_7",
                "speaker_B": "v2/zh_speaker_8", 
                "speaker_C": "v2/zh_speaker_6",
                "speaker_D": "v2/zh_speaker_9"
            }
            
            voice_preset = voice_presets.get(speaker_name, "v2/zh_speaker_7")
            
            logging.info(f"正在使用Bark创建参考音频: {voice_preset}")
            tts_service.current_model.tts_to_file(
                text=sample_text,
                file_path=reference_audio_path,
                voice_preset=voice_preset
            )
            
            # 详细检查生成的文件
            if os.path.exists(reference_audio_path):
                file_size = os.path.getsize(reference_audio_path)
                logging.info(f"参考音频创建成功: {reference_audio_path}, 大小: {file_size} bytes")
                
                if file_size > 1000:  # 至少1KB，确保不是空文件
                    return reference_audio_path
                else:
                    logging.error(f"参考音频文件太小: {file_size} bytes")
                    os.remove(reference_audio_path)
            else:
                logging.error("参考音频文件未创建")
            
        except Exception as e:
            logging.error(f"使用Bark创建参考音频失败: {e}")
    
    # 如果Bark也失败了，返回None
    logging.error("无法创建参考音频文件")
    return None


def _generate_xtts_audio(text: str, language: str, speaker_name: str) -> str:
    """
    使用XTTS v2模型生成音频（简化版本，不使用speaker参数）
    """
    from .tts_services import tts_service
    import tempfile
    import hashlib
    
    try:
        # 确保XTTS v2模型已加载
        if not tts_service.load_model("xtts_v2"):
            return None
        
        # 生成缓存键
        text_hash = hashlib.md5(f"{text}_{language}_{speaker_name}_xtts_v2".encode()).hexdigest()
        
        # 创建临时音频文件
        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False, dir=config.WORK_DIR) as temp_file:
            temp_audio_path = temp_file.name
        
        # 映射语言代码到XTTS支持的语言
        language_map = {
            "zh": "zh-cn",
            "en": "en", 
            "ja": "ja",
            "ko": "ko",
            "es": "es",
            "fr": "fr",
            "de": "de"
        }
        tts_language = language_map.get(language, "en")
        
        # XTTS v2强制要求speaker参数，尝试获取可用的speakers
        logging.info(f"XTTS v2生成音频: 语言={tts_language}, speaker_type={speaker_name}, 文本={text[:30]}...")
        
        # XTTS v2强制要求speaker_wav参数，需要生成或使用默认参考音频
        logging.info("XTTS v2需要speaker_wav参数，尝试创建默认参考音频")
        
        # 为不同说话人创建或获取参考音频文件
        speaker_wav_path = _get_or_create_reference_audio(speaker_name, language)
        
        if not speaker_wav_path:
            logging.error("无法获取参考音频文件")
            return None
        
        try:
            # 使用speaker_wav参数进行voice cloning
            logging.info(f"XTTS v2使用参考音频: {speaker_wav_path}")
            tts_service.current_model.tts_to_file(
                text=text,
                file_path=temp_audio_path,
                speaker_wav=speaker_wav_path,
                language=tts_language
            )
            logging.info("XTTS v2音频生成成功")
        except Exception as e:
            logging.error(f"XTTS v2 speaker_wav模式失败: {e}")
            return None
        
        # 验证生成的音频文件
        if not os.path.exists(temp_audio_path) or os.path.getsize(temp_audio_path) == 0:
            return None
        
        # 移动到最终位置
        final_audio_path = os.path.join(config.WORK_DIR, f"xtts_audio_{text_hash}.wav")
        os.rename(temp_audio_path, final_audio_path)
        
        return final_audio_path
        
    except Exception as e:
        logging.error(f"XTTS v2音频生成失败: {e}")
        return None


def create_video_from_text(text: str, background_image_path: str, language: str = "zh", 
                          speaker: str = "female", model_type: str = "bark", dialogue_mode: str = "single") -> tuple:
    """
    从文字内容创建视频（TTS模式）
    
    Args:
        text: 要转换的文字内容
        background_image_path: 背景图片路径
        language: TTS语言代码
        speaker: TTS说话人类型（单人模式使用）
        model_type: TTS模型类型
        dialogue_mode: 对话模式 ("single" 或 "multi")
        
    Returns:
        (video_path, base_name) 生成的视频路径和基础文件名，失败返回(None, None)
    """
    if not is_tts_available():
        # 尝试加载默认模型
        from .tts_services import tts_service
        if not tts_service.load_model(model_type):
            logging.error("TTS服务不可用，无法创建文字视频")
            return None, None
        logging.info(f"TTS模型已加载: {model_type}")
    
    if not text.strip():
        logging.error("文字内容为空")
        return None, None
    
    if not os.path.exists(background_image_path):
        logging.error(f"背景图片不存在: {background_image_path}")
        return None, None
    
    try:
        logging.info("正在从文字生成TTS音频...")
        
        # 根据对话模式处理文本和音频生成
        logging.info(f"对话模式设置为: {dialogue_mode}")
        logging.info(f"输入文本内容: {text[:200]}...")  # 显示前200个字符
        
        if dialogue_mode == "multi":
            logging.info("启用多人对话模式")
            audio_path = _create_multi_speaker_audio(text, language, model_type)
        else:
            logging.info("使用单人模式")
            # 单人模式：直接使用原有逻辑
            audio_path = create_audio_from_text(text, language, speaker, model_type)
        
        if not audio_path or not os.path.exists(audio_path):
            logging.error("TTS音频生成失败")
            return None, None
        
        logging.info(f"TTS音频生成完成: {audio_path}")
        
        # 获取音频时长
        audio_clip = mp.AudioFileClip(audio_path)
        audio_duration = audio_clip.duration
        logging.info(f"音频时长: {audio_duration:.2f}秒")
        
        # 创建视频尺寸（标准1080p）
        video_size = (1920, 1080)
        
        # 加载并调整背景图片
        background_image = Image.open(background_image_path)
        
        # 调整图片尺寸到视频尺寸，保持比例
        img_width, img_height = background_image.size
        video_width, video_height = video_size
        
        # 计算缩放比例，确保图片完全覆盖视频区域
        scale_w = video_width / img_width
        scale_h = video_height / img_height
        scale = max(scale_w, scale_h)  # 使用较大的缩放比例确保覆盖
        
        new_width = int(img_width * scale)
        new_height = int(img_height * scale)
        
        # 调整图片大小
        background_image = background_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
        
        # 如果调整后的图片比视频大，需要居中裁剪
        if new_width > video_width or new_height > video_height:
            left = (new_width - video_width) // 2
            top = (new_height - video_height) // 2
            right = left + video_width
            bottom = top + video_height
            background_image = background_image.crop((left, top, right, bottom))
        
        # 将PIL图片转换为numpy数组
        img_array = np.array(background_image)
        
        # 创建静态图片clip
        image_clip = mp.ImageClip(img_array, duration=audio_duration)
        
        # 合成最终视频：静态背景 + TTS音频
        final_clip = image_clip.set_audio(audio_clip)
        
        # 输出路径
        video_path = os.path.join(config.WORK_DIR, "original_video.mp4")
        base_name = "original_video"
        
        # 如果文件已存在，删除
        if os.path.exists(video_path):
            os.remove(video_path)
        
        # 写入视频文件
        final_clip.write_videofile(
            video_path,
            fps=30,  # 标准帧率
            codec='libx264',
            audio_codec='aac',
            temp_audiofile='temp-audio.m4a',
            remove_temp=True,
            verbose=False,
            logger=None
        )
        
        # 清理资源
        audio_clip.close()
        image_clip.close()
        final_clip.close()
        
        # 清理TTS音频文件（已经合成到视频中）
        try:
            os.remove(audio_path)
        except:
            pass
        
        logging.info(f"TTS视频创建完成: {video_path}")
        return video_path, base_name
        
    except Exception as e:
        logging.error(f"创建TTS视频失败: {e}")
        return None, None


def create_srt_from_text(text: str, audio_duration: float, srt_path: str) -> str:
    """
    从文字内容创建SRT字幕文件（用于TTS模式）
    
    Args:
        text: 文字内容
        audio_duration: 音频总时长（秒）
        srt_path: 输出SRT文件路径
        
    Returns:
        返回文字内容（用于后续处理）
    """
    import re
    
    # 按句子分割文本
    sentences = re.split(r'[。！？.!?]', text)
    sentences = [s.strip() for s in sentences if s.strip()]
    
    if not sentences:
        return text
    
    # 计算每个句子的时间分配
    total_chars = sum(len(s) for s in sentences)
    current_time = 0.0
    
    with open(srt_path, 'w', encoding='utf-8') as f:
        for i, sentence in enumerate(sentences, 1):
            if not sentence:
                continue
                
            # 根据字符数分配时间
            sentence_ratio = len(sentence) / total_chars
            sentence_duration = audio_duration * sentence_ratio
            
            # 确保每个句子至少有2秒
            sentence_duration = max(sentence_duration, 2.0)
            
            start_time = current_time
            end_time = min(current_time + sentence_duration, audio_duration)
            
            # 格式化时间
            start_formatted = time.strftime("%H:%M:%S", time.gmtime(start_time)) + f",{int((start_time % 1) * 1000):03d}"
            end_formatted = time.strftime("%H:%M:%S", time.gmtime(end_time)) + f",{int((end_time % 1) * 1000):03d}"
            
            # 写入SRT条目
            f.write(f"{i}\n{start_formatted} --> {end_formatted}\n{sentence}\n\n")
            
            current_time = end_time
    
    logging.info(f"TTS模式SRT字幕文件已生成: {srt_path}")
    return text


def get_available_videos(video_type: str) -> list:
    """
    通用视频扫描函数，获取指定类型的可用视频文件
    
    Args:
        video_type: 视频类型 ('intros', 'openings', 'endings', 'transitions')
        
    Returns:
        视频文件信息列表，每个元素包含文件名、路径、时长、分辨率等信息
    """
    import moviepy.editor as mp
    
    # 构建视频目录路径
    video_dir = os.path.join(config.PROJECT_ROOT, "videos", video_type)
    
    if not os.path.exists(video_dir):
        logging.warning(f"视频目录不存在: {video_dir}")
        return []
    
    videos = []
    supported_formats = ['.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv', '.webm', '.m4v']
    
    try:
        for filename in os.listdir(video_dir):
            if any(filename.lower().endswith(fmt) for fmt in supported_formats):
                video_path = os.path.join(video_dir, filename)
                
                try:
                    # 获取视频信息
                    clip = mp.VideoFileClip(video_path)
                    duration = clip.duration
                    size = clip.size
                    fps = clip.fps
                    clip.close()
                    
                    # 获取文件大小
                    file_size = os.path.getsize(video_path)
                    file_size_mb = file_size / (1024 * 1024)
                    
                    video_info = {
                        'filename': filename,
                        'path': video_path,
                        'duration': duration,
                        'duration_str': f"{int(duration//60):02d}:{int(duration%60):02d}",
                        'resolution': f"{size[0]}x{size[1]}",
                        'fps': fps,
                        'size_mb': f"{file_size_mb:.1f}MB"
                    }
                    videos.append(video_info)
                    logging.info(f"扫描到{video_type}视频: {filename} ({video_info['duration_str']}, {video_info['resolution']})")
                    
                except Exception as e:
                    logging.warning(f"无法读取视频文件 {filename}: {e}")
                    continue
                    
    except Exception as e:
        logging.error(f"扫描{video_type}目录失败: {e}")
        return []
    
    # 按文件名排序
    videos.sort(key=lambda x: x['filename'])
    logging.info(f"共找到 {len(videos)} 个{video_type}视频文件")
    return videos


def get_available_openings() -> list:
    """
    获取可用的开场口播视频列表
    
    Returns:
        开场视频文件信息列表
    """
    return get_available_videos("openings")


def get_available_intros() -> list:
    """
    获取可用的片头视频列表（使用新的目录结构）
    
    Returns:
        片头视频文件信息列表
    """
    return get_available_videos("intros")


def _process_video_segment(video_path: str, target_resolution: tuple, output_path: str) -> str:
    """
    处理视频片段，调整分辨率以匹配目标视频
    
    Args:
        video_path: 输入视频路径
        target_resolution: 目标分辨率 (width, height)
        output_path: 输出视频路径
        
    Returns:
        处理后的视频路径，失败返回None
    """
    if not os.path.exists(video_path):
        logging.error(f"视频文件不存在: {video_path}")
        return None
    
    try:
        # 使用ffmpeg调整分辨率
        command = [
            'ffmpeg',
            '-i', video_path,
            '-vf', f'scale={target_resolution[0]}:{target_resolution[1]}:force_original_aspect_ratio=decrease,pad={target_resolution[0]}:{target_resolution[1]}:(ow-iw)/2:(oh-ih)/2',
            '-c:v', 'libx264',
            '-c:a', 'aac',
            '-y',
            output_path
        ]
        
        result = subprocess.run(command, capture_output=True, text=True)
        if result.returncode == 0:
            logging.info(f"视频片段分辨率调整完成: {output_path}")
            return output_path
        else:
            logging.error(f"视频片段分辨率调整失败: {result.stderr}")
            return None
            
    except Exception as e:
        logging.error(f"处理视频片段失败: {e}")
        return None
