# from pathlib import Path
# from typing import List, Union
# import random
# import re
# from datetime import timedelta

# from tqdm import trange
# import numpy as np
# import librosa
# import cv2
# from zhon.hanzi import punctuation as zh_punc
# from moviepy.editor import ImageClip, AudioFileClip, CompositeAudioClip, \
#     CompositeVideoClip, ColorClip, VideoFileClip, VideoClip, TextClip, concatenate_audioclips 
# import moviepy.video.compositing.transitions as transfx
# from moviepy.audio.AudioClip import AudioArrayClip
# from moviepy.video.tools.subtitles import SubtitlesClip

# from mm_story_agent.base import register_tool


# def generate_srt(timestamps: List,
#                  captions: List,
#                  save_path: Union[str, Path],
#                  max_single_length: int = 30):
#     """
#     生成SRT字幕文件
    
#     Args:
#         timestamps: 时间戳列表 [[start1, end1], [start2, end2], ...]
#         captions: 字幕文本列表
#         save_path: 保存路径
#         max_single_length: 单行字幕最大长度
#     """
#     def format_time(seconds: float) -> str:
#         """将秒数转换为SRT时间格式 HH:MM:SS,mmm"""
#         td = timedelta(seconds=seconds)
#         total_seconds = int(td.total_seconds())
#         millis = int((td.total_seconds() - total_seconds) * 1000)
#         hours, remainder = divmod(total_seconds, 3600)
#         minutes, seconds = divmod(remainder, 60)
#         return f"{hours:02}:{minutes:02}:{seconds:02},{millis:03}"
    
#     srt_content = []
#     num_caps = len(timestamps)

#     for idx in range(num_caps):
#         start_time, end_time = timestamps[idx]
#         caption_chunks = split_caption(captions[idx], max_single_length).split("\n")
#         num_chunks = len(caption_chunks)
        
#         if num_chunks == 0:
#             continue

#         segment_duration = (end_time - start_time) / num_chunks

#         for chunk_idx, chunk in enumerate(caption_chunks):
#             chunk_start_time = start_time + segment_duration * chunk_idx
#             chunk_end_time = start_time + segment_duration * (chunk_idx + 1)
#             start_time_str = format_time(chunk_start_time)
#             end_time_str = format_time(chunk_end_time)
#             srt_content.append(f"{len(srt_content) // 2 + 1}\n{start_time_str} --> {end_time_str}\n{chunk}\n\n")

#     with open(save_path, 'w', encoding='utf-8') as srt_file:
#         srt_file.writelines(srt_content)


# def add_caption(captions: List,
#                 srt_path: Union[str, Path],
#                 timestamps: List,
#                 video_clip: VideoClip,
#                 max_single_length: int = 30,
#                 **caption_config):
#     """
#     为视频添加字幕
    
#     Args:
#         captions: 字幕文本列表
#         srt_path: SRT文件保存路径
#         timestamps: 时间戳列表
#         video_clip: 视频片段
#         max_single_length: 单行字幕最大长度
#         **caption_config: 字幕样式配置
#     """
#     generate_srt(timestamps, captions, srt_path, max_single_length)

#     generator = lambda txt: TextClip(txt, **caption_config)
#     subtitles = SubtitlesClip(srt_path.__str__(), generator)
#     captioned_clip = CompositeVideoClip([video_clip,
#                                          subtitles.set_position(("center", "bottom"), relative=True)])
#     return captioned_clip


# def split_keep_separator(text, separator):
#     """分割文本但保留分隔符"""
#     pattern = f'([{re.escape(separator)}])'
#     pieces = re.split(pattern, text)
#     return pieces


# def split_caption(caption, max_length=30):
#     """
#     智能分割字幕，支持中英文
    
#     Args:
#         caption: 字幕文本
#         max_length: 单行最大长度
    
#     Returns:
#         分割后的多行字幕（用\n连接）
#     """
#     lines = []
    
#     # 判断是否为英文
#     if ord(caption[0]) >= ord("a") and ord(caption[0]) <= ord("z") or \
#        ord(caption[0]) >= ord("A") and ord(caption[0]) <= ord("Z"):
#         # 英文：按单词分割
#         words = caption.split(" ")
#         current_words = []
#         for word in words:
#             if len(" ".join(current_words + [word])) <= max_length:
#                 current_words += [word]
#             else:
#                 if current_words:
#                     lines.append(" ".join(current_words))
#                     current_words = []

#         if current_words:
#             lines.append(" ".join(current_words))
#     else:
#         # 中文：按标点符号分割
#         sentences = split_keep_separator(caption, zh_punc)
#         current_line = ""
#         for sentence in sentences:
#             if len(current_line + sentence) <= max_length:
#                 current_line += sentence
#             else:
#                 if current_line:
#                     lines.append(current_line)
#                     current_line = ""
#                 if sentence.startswith(tuple(zh_punc)):
#                     if lines:
#                         lines[-1] += sentence[0]
#                     current_line = sentence[1:]
#                 else:
#                     current_line = sentence

#         if current_line:
#             lines.append(current_line.strip())

#     return '\n'.join(lines)


# def add_bottom_black_area(clip: VideoFileClip,
#                           black_area_height: int = 64):
#     """
#     在视频底部添加黑色区域（用于显示字幕）
    
#     Args:
#         clip: 视频片段
#         black_area_height: 黑色区域高度
    
#     Returns:
#         添加黑色区域后的视频片段
#     """
#     black_bar = ColorClip(size=(clip.w, black_area_height), color=(0, 0, 0), duration=clip.duration)
#     extended_clip = CompositeVideoClip([clip, black_bar.set_position(("center", "bottom"))])
#     return extended_clip


# def add_zoom_effect(clip, speed=1.0, mode='in', position='center'):
#     """
#     添加缩放效果（放大或缩小）
    
#     Args:
#         clip: 视频片段
#         speed: 缩放速度
#         mode: 'in'(放大) 或 'out'(缩小)
#         position: 缩放中心位置
    
#     Returns:
#         添加缩放效果后的视频片段
#     """
#     fps = clip.fps
#     duration = clip.duration
#     total_frames = int(duration * fps)
    
#     def main(getframe, t):
#         frame = getframe(t)
#         h, w = frame.shape[:2]
#         i = t * fps
#         if mode == 'out':
#             i = total_frames - i
#         zoom = 1 + (i * ((0.1 * speed) / total_frames))
        
#         positions = {
#             'center':      [(w - (w * zoom)) / 2,  (h - (h * zoom)) / 2],
#             'left':        [0, (h - (h * zoom)) / 2],
#             'right':       [(w - (w * zoom)), (h - (h * zoom)) / 2],
#             'top':         [(w - (w * zoom)) / 2, 0],
#             'topleft':     [0, 0],
#             'topright':    [(w - (w * zoom)), 0],
#             'bottom':      [(w - (w * zoom)) / 2, (h - (h * zoom))],
#             'bottomleft':  [0, (h - (h * zoom))],
#             'bottomright': [(w - (w * zoom)), (h - (h * zoom))]
#         }
#         tx, ty = positions[position]
#         M = np.array([[zoom, 0, tx], [0, zoom, ty]])
#         frame = cv2.warpAffine(frame, M, (w, h))
#         return frame
    
#     return clip.fl(main)


# def add_move_effect(clip, direction="left", move_ratio=0.95):
#     """
#     添加移动效果（左移或右移）
    
#     Args:
#         clip: 视频片段
#         direction: 'left'(左移) 或 'right'(右移)
#         move_ratio: 移动比例（决定放大程度）
    
#     Returns:
#         添加移动效果后的视频片段
#     """
#     orig_width = clip.size[0]
#     orig_height = clip.size[1]

#     # 放大图片以便移动
#     new_width = int(orig_width / move_ratio)
#     new_height = int(orig_height / move_ratio)
#     clip = clip.resize(width=new_width, height=new_height)

#     # 确定起始和结束位置
#     if direction == "left":
#         start_position = (0, 0)
#         end_position = (orig_width - new_width, 0) 
#     elif direction == "right":
#         start_position = (orig_width - new_width, 0)
#         end_position = (0, 0)

#     duration = clip.duration
#     moving_clip = clip.set_position(
#         lambda t: (start_position[0] + (end_position[0] - start_position[0]) / duration * t, 
#                    start_position[1])
#     )

#     final_clip = CompositeVideoClip([moving_clip], size=(orig_width, orig_height))

#     return final_clip


# def add_slide_effect(clips, slide_duration):
#     """
#     添加滑动转场效果
    
#     Args:
#         clips: 视频片段列表
#         slide_duration: 滑动持续时间
    
#     Returns:
#         添加滑动效果后的合成视频
    
#     Note:
#         要求每个片段结尾至少有 slide_duration 长度的静音
#     """
#     durations = [clip.duration for clip in clips]
    
#     # 第一个片段：只需要滑出
#     first_clip = CompositeVideoClip(
#         [clips[0].fx(transfx.slide_out, duration=slide_duration, side="left")]
#     ).set_start(0)

#     slide_out_sides = ["left"]
#     videos = [first_clip]

#     # 滑出方向到滑入方向的映射
#     out_to_in_mapping = {"left": "right", "right": "left"}
    
#     # 中间的片段：需要滑入和滑出
#     for idx, clip in enumerate(clips[1:-1], start=1):
#         # 根据前一个片段的滑出方向确定当前片段的滑入方向
#         slide_in_side = out_to_in_mapping[slide_out_sides[-1]]
        
#         # 随机选择滑出方向
#         slide_out_side = "left" if random.random() <= 0.5 else "right"
#         slide_out_sides.append(slide_out_side)
                
#         videos.append(
#             (
#                 CompositeVideoClip(
#                     [clip.fx(transfx.slide_in, duration=slide_duration, side=slide_in_side)]
#                 )
#                 .set_start(sum(durations[:idx]) - (slide_duration) * idx)
#                 .fx(transfx.slide_out, duration=slide_duration, side=slide_out_side)
#             )
#         )
    
#     # 最后一个片段：只需要滑入
#     last_clip = CompositeVideoClip(
#         [clips[-1].fx(transfx.slide_in, duration=slide_duration, 
#                       side=out_to_in_mapping[slide_out_sides[-1]])]
#     ).set_start(sum(durations[:-1]) - slide_duration * (len(clips) - 1))
#     videos.append(last_clip)

#     video = CompositeVideoClip(videos)
#     return video


# def compose_video(story_dir: Union[str, Path],
#                   save_path: Union[str, Path],
#                   captions: List,
#                   num_pages: int,
#                   fps: int = 10,
#                   audio_sample_rate: int = 16000,
#                   audio_codec: str = "mp3",
#                   caption_config: dict = {},
#                   fade_duration: float = 1.0,
#                   slide_duration: float = 0.4,
#                   zoom_speed: float = 0.5,
#                   move_ratio: float = 0.95):
#     """
#     合成视频（已移除音效和背景音乐）
    
#     Args:
#         story_dir: 故事目录（包含image和speech子目录）
#         save_path: 输出视频路径
#         captions: 字幕文本列表
#         num_pages: 页面数量
#         fps: 视频帧率
#         audio_sample_rate: 音频采样率
#         audio_codec: 音频编码器
#         caption_config: 字幕配置
#         fade_duration: 淡入淡出持续时间
#         slide_duration: 滑动转场持续时间
#         zoom_speed: 缩放速度
#         move_ratio: 移动比例
#     """
#     if not isinstance(story_dir, Path):
#         story_dir = Path(story_dir)

#     # ========== 移除了 sound_dir 和 music 相关代码 ==========
#     image_dir = story_dir / "image"
#     speech_dir = story_dir / "speech"

#     video_clips = []
#     cur_duration = 0
#     timestamps = []

#     print("\n" + "="*60)
#     print("开始合成视频")
#     print("="*60)

#     for page in trange(1, num_pages + 1, desc="处理页面"):
#         # ========== 1. 处理语音音轨 ==========
#         slide_silence = AudioArrayClip(
#             np.zeros((int(audio_sample_rate * slide_duration), 2)), 
#             fps=audio_sample_rate
#         )
#         fade_silence = AudioArrayClip(
#             np.zeros((int(audio_sample_rate * fade_duration), 2)), 
#             fps=audio_sample_rate
#         )

#         if (speech_dir / f"p{page}.wav").exists():  # 单个语音文件
#             single_utterance = True
#             speech_file = (speech_dir / f"./p{page}.wav").__str__()
#             speech_clip = AudioFileClip(speech_file, fps=audio_sample_rate)
            
#             # 添加淡入淡出静音
#             speech_clip = concatenate_audioclips([fade_silence, speech_clip, fade_silence])
            
#         else:  # 多个语音文件（分句）
#             single_utterance = False
#             speech_files = list(speech_dir.glob(f"p{page}_*.wav"))
#             speech_files = sorted(speech_files, key=lambda x: int(x.stem.split("_")[-1]))
#             speech_clips = []
            
#             for utt_idx, speech_file in enumerate(speech_files):
#                 speech_clip = AudioFileClip(speech_file.__str__(), fps=audio_sample_rate)
                
#                 # 记录每句话的时间戳
#                 if utt_idx == 0:
#                     timestamps.append([cur_duration + fade_duration,
#                                      cur_duration + fade_duration + speech_clip.duration])
#                     cur_duration += speech_clip.duration + fade_duration
#                 elif utt_idx == len(speech_files) - 1:
#                     timestamps.append([cur_duration,
#                                      cur_duration + speech_clip.duration])
#                     cur_duration += speech_clip.duration + fade_duration + slide_duration
#                 else:
#                     timestamps.append([cur_duration,
#                                      cur_duration + speech_clip.duration])
#                     cur_duration += speech_clip.duration
                
#                 speech_clips.append(speech_clip)
            
#             speech_clip = concatenate_audioclips([fade_silence] + speech_clips + [fade_silence])
#             speech_file = speech_files[0]  # 用于后续处理
        
#         # 添加滑动静音
#         if page == 1:
#             speech_clip = concatenate_audioclips([speech_clip, slide_silence])
#         else:
#             speech_clip = concatenate_audioclips([slide_silence, speech_clip, slide_silence])
        
#         # 记录整个片段的时间戳
#         if single_utterance:
#             if page == 1:
#                 timestamps.append([cur_duration + fade_duration,
#                                  cur_duration + speech_clip.duration - fade_duration - slide_duration])
#                 cur_duration += speech_clip.duration - slide_duration
#             else:
#                 timestamps.append([cur_duration + fade_duration + slide_duration,
#                                  cur_duration + speech_clip.duration - fade_duration - slide_duration])
#                 cur_duration += speech_clip.duration - slide_duration

#         # ========== 2. 处理图像 ==========
#         image_file = (image_dir / f"./p{page}.png").__str__()        
#         image_clip = ImageClip(image_file)
#         image_clip = image_clip.set_duration(speech_clip.duration).set_fps(fps)
#         image_clip = image_clip.crossfadein(fade_duration).crossfadeout(fade_duration)

#         # 随机添加动画效果
#         if random.random() <= 0.5:  # 50% 概率使用缩放
#             zoom_mode = "in" if random.random() <= 0.5 else "out"
#             image_clip = add_zoom_effect(image_clip, zoom_speed, zoom_mode)
#         else:  # 50% 概率使用移动
#             direction = "left" if random.random() <= 0.5 else "right"
#             image_clip = add_move_effect(image_clip, direction=direction, move_ratio=move_ratio)

#         # ========== 3. 合成视频片段（图像+语音，无音效） ==========
#         video_clip = image_clip.set_audio(speech_clip)        
#         video_clips.append(video_clip)

#     # ========== 4. 添加转场效果 ==========
#     print("\n添加滑动转场效果...")
#     composite_clip = add_slide_effect(video_clips, slide_duration=slide_duration)
    
#     # ========== 5. 添加字幕区域和字幕 ==========
#     print("添加字幕...")
#     composite_clip = add_bottom_black_area(composite_clip, black_area_height=caption_config["area_height"])
#     del caption_config["area_height"]
#     max_caption_length = caption_config["max_length"]
#     del caption_config["max_length"]
    
#     composite_clip = add_caption(
#         captions,
#         story_dir / "captions.srt",
#         timestamps,
#         composite_clip,
#         max_caption_length,
#         **caption_config
#     )

#     # ========== 移除了背景音乐合成的代码 ==========
#     # 现在视频只包含：图像 + 语音 + 字幕
    
#     # ========== 6. 输出视频 ==========
#     print(f"\n正在导出视频到: {save_path}")
#     composite_clip.write_videofile(
#         save_path.__str__(),
#         audio_fps=audio_sample_rate,
#         audio_codec=audio_codec,
#     )
    
#     print("\n" + "="*60)
#     print(f"✅ 视频合成完成！")
#     print(f"   输出路径: {save_path}")
#     print(f"   视频时长: {composite_clip.duration:.2f} 秒")
#     print(f"   包含页面: {num_pages} 页")
#     print("="*60 + "\n")


# @register_tool("slideshow_video_compose")
# class SlideshowVideoComposeAgent:
#     """
#     幻灯片式视频合成代理（已移除音效和背景音乐）
    
#     功能：
#     - 图像序列合成
#     - 语音配音
#     - 字幕添加
#     - 转场动画（滑动、缩放、移动）
#     """

#     def __init__(self, cfg) -> None:
#         self.cfg = cfg
#         print("✅ 视频合成器初始化完成（纯净模式：无音效、无背景音乐）")

#     def adjust_caption_config(self, width, height):
#         """
#         根据视频尺寸自动调整字幕配置
        
#         Args:
#             width: 视频宽度
#             height: 视频高度
        
#         Returns:
#             字幕配置字典
#         """
#         area_height = int(height * 0.06)
#         fontsize = int((width + height) / 2 * 0.025)
#         return {
#             "fontsize": fontsize,
#             "area_height": area_height
#         }

#     def call(self, params):
#         """
#         执行视频合成
        
#         Args:
#             params: 参数字典，包含：
#                 - height: 视频高度
#                 - width: 视频宽度
#                 - pages: 故事页面列表
#                 - story_dir: 故事目录
#                 - fps: 帧率
#                 - audio_sample_rate: 音频采样率
#                 - audio_codec: 音频编码器
#                 - caption: 字幕配置
#                 - slideshow_effect: 幻灯片效果配置
#         """
#         height = params["height"]
#         width = params["width"]
#         pages = params["pages"]
        
#         # 自动调整字幕配置
#         params["caption"].update(self.adjust_caption_config(width, height))
        
#         # 调用视频合成函数
#         compose_video(
#             story_dir=Path(params["story_dir"]),
#             save_path=Path(params["story_dir"]) / "output.mp4",
#             captions=pages,
#             # ========== 移除了 music_path 参数 ==========
#             num_pages=len(pages),
#             fps=params["fps"],
#             audio_sample_rate=params["audio_sample_rate"],
#             audio_codec=params["audio_codec"],
#             caption_config=params["caption"],
#             **params["slideshow_effect"]  # 这里的参数也已经移除了 sound_volume 和 music_volume
#         )

from pathlib import Path
from typing import Union, Optional, List, Dict, Tuple, Set, Any, Callable
import random
import re
from datetime import timedelta
import os
import time
import base64
import requests

from tqdm import trange
import numpy as np
import librosa
import cv2
from zhon.hanzi import punctuation as zh_punc
from moviepy.editor import ImageClip, AudioFileClip, CompositeAudioClip, \
    CompositeVideoClip, ColorClip, VideoFileClip, VideoClip, concatenate_audioclips, \
    concatenate_videoclips
import moviepy.video.compositing.transitions as transfx
from moviepy.audio.AudioClip import AudioArrayClip
from moviepy.audio.fx.all import audio_loop
from PIL import Image, ImageDraw, ImageFont

from mm_story_agent.base import register_tool
# 在文件顶部的导入部分添加：
try:
    from moviepy.video.tools.subtitles import SubtitlesClip
    from moviepy.video.VideoClip import TextClip
    HAS_SUBTITLES = True
except ImportError:
    HAS_SUBTITLES = False
    print("⚠️  SubtitlesClip 不可用，将使用 PIL 字幕模式")

# 🔥 尝试配置 ImageMagick
USE_IMAGEMAGICK = False
HAS_TEXTCLIP = False

try:
    from mm_story_agent.imagemagick_config import configure_imagemagick, test_imagemagick
    if configure_imagemagick():
        if test_imagemagick():
            USE_IMAGEMAGICK = True
            from moviepy.video.VideoClip import TextClip
            from moviepy.video.tools.subtitles import SubtitlesClip
            HAS_TEXTCLIP = True
            print("✅ 使用 ImageMagick 添加字幕")
        else:
            print("⚠️  ImageMagick 配置失败，将使用 PIL 绘制字幕")
    else:
        print("⚠️  未找到 ImageMagick，将使用 PIL 绘制字幕")
except Exception as e:
    print(f"⚠️  ImageMagick 配置异常: {e}，将使用 PIL 绘制字幕")
    USE_IMAGEMAGICK = False
    HAS_TEXTCLIP = False

SHOT_PRESETS: Dict[str, Dict[str, Any]] = {
    "establishing": {
        "motion": "pan_right",
        "focus_cycle": ["center", "topright"],
        "transition_out": "crossfade",
        "subtitle_anchor": "lower",
        "speed_scale": 0.7,
        "move_ratio": 0.9,
        "min_duration": 6.0
    },
    "wide": {
        "motion": "kenburns_out",
        "focus_cycle": ["center", "topleft", "topright"],
        "transition_out": "crossfade",
        "subtitle_anchor": "lower",
        "speed_scale": 0.8,
        "move_ratio": 0.92,
        "min_duration": 5.5
    },
    "medium": {
        "motion": "pan_left",
        "focus_cycle": ["center", "left", "right"],
        "transition_out": "crossfade",
        "subtitle_anchor": "lower_third",
        "speed_scale": 1.0,
        "move_ratio": 0.94,
        "min_duration": 5.0
    },
    "close": {
        "motion": "kenburns_in",
        "focus_cycle": ["center", "bottomright", "bottomleft"],
        "transition_out": "cut",
        "subtitle_anchor": "lower_third",
        "speed_scale": 1.2,
        "move_ratio": 0.96,
        "min_duration": 4.5
    },
    "detail": {
        "motion": "tilt_up",
        "focus_cycle": ["center", "top", "bottom"],
        "transition_out": "cut",
        "subtitle_anchor": "upper",
        "speed_scale": 0.9,
        "move_ratio": 0.95,
        "min_duration": 4.0
    },
    "closing": {
        "motion": "kenburns_out",
        "focus_cycle": ["center"],
        "transition_out": "dip_to_black",
        "subtitle_anchor": "lower",
        "speed_scale": 0.6,
        "move_ratio": 0.9,
        "min_duration": 6.0
    }
}

CROSSFADE_TRANSITIONS: Set[str] = {"crossfade", "dissolve"}
FADE_TRANSITIONS: Set[str] = {"dip_to_black", "fade"}


def plan_storyboard(pages: List[str]) -> List[Dict[str, Any]]:
    """基于分镜理念为每一页生成镜头计划"""
    num_pages = len(pages)
    if num_pages == 0:
        return []

    structural_cycle = ["wide", "medium", "close", "detail", "medium"]
    plan: List[Dict[str, Any]] = []

    for idx in range(num_pages):
        if idx == 0:
            shot_key = "establishing"
        elif idx == num_pages - 1:
            shot_key = "closing"
        else:
            shot_key = structural_cycle[idx % len(structural_cycle)]

        preset = SHOT_PRESETS.get(shot_key, SHOT_PRESETS["medium"]).copy()
        focus_cycle = preset.pop("focus_cycle", None)
        if focus_cycle:
            preset["focus"] = focus_cycle[idx % len(focus_cycle)]
        else:
            preset.setdefault("focus", "center")

        preset["shot_type"] = shot_key
        plan.append(preset)

    for idx, profile in enumerate(plan):
        if idx == 0:
            profile["transition_in"] = "fade"
        else:
            profile["transition_in"] = plan[idx - 1].get("transition_out", "crossfade")

        if idx == len(plan) - 1:
            profile["transition_out"] = "dip_to_black"

        if (idx + 1) % 5 == 0 and idx < len(plan) - 1:
            profile["transition_out"] = "dip_to_black"

    return plan


def resolve_subtitle_position(anchor: str,
                              frame_height: int,
                              box_height: int,
                              safe_margin: int) -> Union[str, int]:
    anchor = (anchor or "").lower()
    if anchor in {"bottom", "lower", "lower_third"}:
        available_space = safe_margin
        if anchor == "lower_third":
            offset = int(frame_height * 0.15) + safe_margin
            return frame_height - box_height - max(available_space, offset)
        return frame_height - box_height - safe_margin
    if anchor in {"top", "upper"}:
        return safe_margin
    if anchor == "center":
        return (frame_height - box_height) // 2
    return frame_height - box_height - safe_margin


def apply_camera_motion(base_clip: VideoClip,
                        shot_profile: Dict[str, Any],
                        default_zoom_speed: float,
                        default_move_ratio: float) -> VideoClip:
    motion = shot_profile.get("motion", "static")
    focus_position = shot_profile.get("focus", "center")
    speed_scale = shot_profile.get("speed_scale", 1.0)
    move_ratio = shot_profile.get("move_ratio", default_move_ratio)

    if motion == "kenburns_in":
        return add_zoom_effect(
            base_clip,
            speed=default_zoom_speed * speed_scale,
            mode="in",
            position=focus_position
        )
    if motion == "kenburns_out":
        return add_zoom_effect(
            base_clip,
            speed=default_zoom_speed * speed_scale,
            mode="out",
            position=focus_position
        )
    if motion == "pan_left":
        return add_move_effect(
            base_clip,
            direction="left",
            move_ratio=move_ratio
        )
    if motion == "pan_right":
        return add_move_effect(
            base_clip,
            direction="right",
            move_ratio=move_ratio
        )
    if motion == "tilt_up":
        return add_move_effect(
            base_clip,
            direction="up",
            move_ratio=move_ratio
        )
    if motion == "tilt_down":
        return add_move_effect(
            base_clip,
            direction="down",
            move_ratio=move_ratio
        )
    return base_clip


def apply_in_transition(clip: VideoClip, transition_type: str, duration: float) -> VideoClip:
    effective = min(duration, clip.duration / 2) if clip.duration else duration
    if transition_type in CROSSFADE_TRANSITIONS:
        return clip.crossfadein(effective)
    if transition_type in FADE_TRANSITIONS:
        return clip.fadein(effective)
    return clip


def apply_out_transition(clip: VideoClip, transition_type: str, duration: float) -> VideoClip:
    effective = min(duration, clip.duration / 2) if clip.duration else duration
    if transition_type in CROSSFADE_TRANSITIONS:
        return clip.crossfadeout(effective)
    if transition_type == "dip_to_black":
        return clip.fadeout(effective)
    if transition_type == "fade":
        return clip.fadeout(effective)
    return clip


def assemble_storyboard_sequence(clips: List[VideoClip],
                                 plan: List[Dict[str, Any]],
                                 transition_duration: float,
                                 image_paths: Optional[List[Union[str, Path]]] = None,
                                 transition_method: str = "crossfade",
                                 fps: int = 24) -> VideoClip:
    """
    组装分镜序列（过渡片段已在主循环中创建）
    
    Args:
        clips: 视频片段列表（已包含过渡片段）
        plan: 分镜计划列表
        transition_duration: 过渡持续时间
        image_paths: 图像路径列表（已不使用，保留以兼容）
        transition_method: 过渡方法（已不使用，保留以兼容）
        fps: 帧率（已不使用，保留以兼容）
    """
    if not clips:
        raise ValueError("没有可合成的视频片段")

    # 🔥 简化版本：过渡片段已经在主循环中创建并添加到 clips 中
    # 这里只需要简单拼接，但需要处理淡入淡出效果
    
    use_smooth_transition = transition_method in ["optical_flow", "hybrid"]
    processed: List[VideoClip] = []
    use_negative_padding = False
    
    # 跟踪当前是主片段还是过渡片段
    clip_idx = 0  # 主片段索引
    
    for i, clip in enumerate(clips):
        # 判断是否是过渡片段（过渡片段通常没有音频且时长较短）
        is_transition = (clip.audio is None and clip.duration <= transition_duration * 1.5)
        
        if is_transition:
            # 过渡片段直接添加，不需要额外处理
            processed.append(clip)
            use_negative_padding = True
        else:
            # 主片段：应用淡入淡出效果（如果未使用平滑过渡）
            profile = plan[clip_idx] if clip_idx < len(plan) else {}
            adjusted = clip
            
            # 只在未使用平滑过渡时应用淡入淡出
            if not use_smooth_transition:
                if clip_idx > 0:
                    in_transition = profile.get("transition_in", "crossfade")
                    adjusted = apply_in_transition(adjusted, in_transition, transition_duration)
                    if in_transition in CROSSFADE_TRANSITIONS:
                        use_negative_padding = True
                
                if clip_idx < len(plan) - 1:
                    out_transition = profile.get("transition_out", "crossfade")
                    adjusted = apply_out_transition(adjusted, out_transition, transition_duration)
                    if out_transition in CROSSFADE_TRANSITIONS:
                        use_negative_padding = True
            
            processed.append(adjusted)
            clip_idx += 1

    padding = -transition_duration if use_negative_padding else 0
    return concatenate_videoclips(processed, method="compose", padding=padding)



def generate_srt(timestamps: List,
                 captions: List,
                 save_path: Union[str, Path],
                 max_single_length: int = 30):
    
    def format_time(seconds: float) -> str:
        td = timedelta(seconds=seconds)
        total_seconds = int(td.total_seconds())
        millis = int((td.total_seconds() - total_seconds) * 1000)
        hours, remainder = divmod(total_seconds, 3600)
        minutes, seconds = divmod(remainder, 60)
        return f"{hours:02}:{minutes:02}:{seconds:02},{millis:03}"
    
    srt_content = []
    num_caps = len(timestamps)

    for idx in range(num_caps):
        start_time, end_time = timestamps[idx]
        caption_chunks = split_caption(captions[idx], max_single_length).split("\n")
        num_chunks = len(caption_chunks)
        
        if num_chunks == 0:
            continue

        segment_duration = (end_time - start_time) / num_chunks

        for chunk_idx, chunk in enumerate(caption_chunks):
            chunk_start_time = start_time + segment_duration * chunk_idx
            chunk_end_time = start_time + segment_duration * (chunk_idx + 1)
            start_time_str = format_time(chunk_start_time)
            end_time_str = format_time(chunk_end_time)
            srt_content.append(f"{len(srt_content) // 2 + 1}\n{start_time_str} --> {end_time_str}\n{chunk}\n\n")

    with open(save_path, 'w', encoding='utf-8') as srt_file:
        srt_file.writelines(srt_content)


def split_text_to_lines(text: str, max_chars_per_line: int) -> List[str]:
    """
    智能分割文本为多行（完整显示，不截断）
    
    Args:
        text: 原始文本
        max_chars_per_line: 每行最大字符数
    
    Returns:
        行列表
    """
    import re
    from zhon.hanzi import punctuation as zh_punc
    
    if not text:
        return []
    
    lines = []
    
    # 判断是否为英文
    is_english = any(c.isascii() and c.isalpha() for c in text[:10]) if text else False
    
    if is_english:
        # 英文：按单词分割
        words = text.split()
        current_line = []
        current_length = 0
        
        for word in words:
            word_length = len(word) + 1  # +1 for space
            if current_length + word_length <= max_chars_per_line:
                current_line.append(word)
                current_length += word_length
            else:
                if current_line:
                    lines.append(" ".join(current_line))
                current_line = [word]
                current_length = len(word)
        
        if current_line:
            lines.append(" ".join(current_line))
    else:
        # 中文：按句子和标点分割
        # 先按句号、问号、感叹号、换行符分割
        sentences = re.split(r'([。！？\n])', text)
        current_line = ""
        
        for part in sentences:
            if not part:
                continue
            
            # 如果是标点符号，添加到当前行
            if part in "。！？\n":
                if part == "\n":
                    if current_line:
                        lines.append(current_line)
                        current_line = ""
                else:
                    current_line += part
                    if len(current_line) >= max_chars_per_line:
                        lines.append(current_line)
                        current_line = ""
            else:
                # 如果是文本，检查长度
                if len(current_line + part) <= max_chars_per_line:
                    current_line += part
                else:
                    if current_line:
                        lines.append(current_line)
                    # 如果单个部分太长，强制分割
                    while len(part) > max_chars_per_line:
                        lines.append(part[:max_chars_per_line])
                        part = part[max_chars_per_line:]
                    current_line = part
        
        if current_line:
            lines.append(current_line)
    
    return lines if lines else [text]


def create_subtitle_clip_independent(text: str,
                                    duration: float,
                                    width: int,
                                    height: int,
                                    caption_config: dict,
                                    anchor: str = "lower") -> VideoClip:
    """创建独立字幕层，支持多行、动态位置与安全边距"""
    from moviepy.editor import TextClip

    if not text:
        return ImageClip(np.zeros((1, 1, 3), dtype=np.uint8)).set_duration(duration)

    font_size = caption_config.get("fontsize", 32)
    font_color = caption_config.get("color", "white")
    font_path = caption_config.get("font", None)
    width_ratio = caption_config.get("width_ratio", 0.9)
    safe_margin_ratio = caption_config.get("safe_margin_ratio", 0.06)
    safe_margin = caption_config.get("safe_margin", int(height * safe_margin_ratio))
    background_opacity = caption_config.get("background_opacity", 0.7)
    line_spacing = caption_config.get("line_spacing", 0.32)

    max_chars_per_line = caption_config.get("max_length")
    if not max_chars_per_line:
        max_chars_per_line = int(width * width_ratio / max(font_size * 0.6, 1))

    lines = split_text_to_lines(text, max_chars_per_line)

    if HAS_TEXTCLIP and font_path and Path(font_path).exists():
        try:
            textclip_config = {
                "fontsize": font_size,
                "color": font_color,
                "font": font_path,
                "method": "caption",
                "align": "center",
                "size": (int(width * width_ratio), None)
            }
            subtitle_clip = TextClip("\n".join(lines), **textclip_config)
            subtitle_clip = subtitle_clip.set_duration(duration)
            box_height = subtitle_clip.h
            position_y = resolve_subtitle_position(anchor, height, box_height, safe_margin)
            return subtitle_clip.set_position(("center", position_y))
        except Exception as exc:
            print(f"⚠️  TextClip 创建失败: {exc}，回退到 PIL 模式")

    return create_subtitle_clip_with_pil(
        text=text,
        duration=duration,
        width=width,
        height=height,
        caption_config=caption_config,
        lines=lines,
        anchor=anchor,
        safe_margin=safe_margin,
        background_opacity=background_opacity,
        width_ratio=width_ratio,
        line_spacing=line_spacing
    )


def create_subtitle_clip_with_pil(text: str,
                                  duration: float,
                                  width: int,
                                  height: int,
                                  caption_config: dict,
                                  lines: List[str],
                                  anchor: str,
                                  safe_margin: int,
                                  background_opacity: float,
                                  width_ratio: float,
                                  line_spacing: float) -> VideoClip:
    """使用 PIL 渲染字幕剪辑"""
    from moviepy.editor import ImageClip

    font_size = caption_config.get("fontsize", 32)
    font_color = caption_config.get("color", "white")
    font_path = caption_config.get("font", None)

    if isinstance(font_color, str):
        color_map = {
            "white": (255, 255, 255),
            "black": (0, 0, 0),
            "yellow": (255, 255, 0),
        }
        font_color = color_map.get(font_color.lower(), (255, 255, 255))
    elif isinstance(font_color, (list, tuple)):
        font_color = tuple(font_color[:3])
    else:
        font_color = (255, 255, 255)

    box_width = max(1, int(width * width_ratio))

    padding = int(font_size * 0.65)
    line_height = int(font_size * (1 + line_spacing))
    total_text_height = len(lines) * line_height
    box_height = total_text_height + padding * 2

    img = Image.new('RGBA', (box_width, box_height), (0, 0, 0, 0))
    draw = ImageDraw.Draw(img)

    try:
        if font_path and Path(font_path).exists():
            font = ImageFont.truetype(font_path, font_size)
        else:
            font_paths = [
                r"C:\Windows\Fonts\msyh.ttc",
                r"C:\Windows\Fonts\simhei.ttf",
                r"C:\Windows\Fonts\simsun.ttc",
            ]
            font = None
            for fp in font_paths:
                if Path(fp).exists():
                    font = ImageFont.truetype(fp, font_size)
                    break
            if font is None:
                font = ImageFont.load_default()
    except Exception as e:
        print(f"⚠️  加载字体失败: {e}，使用默认字体")
        font = ImageFont.load_default()

    bg_color = caption_config.get("bg_color", (0, 0, 0))
    if isinstance(bg_color, str):
        color_map = {
            "black": (0, 0, 0),
            "white": (255, 255, 255),
            "navy": (10, 24, 48)
        }
        bg_color = color_map.get(bg_color.lower(), (0, 0, 0))
    bg_opacity = int(max(0, min(1, background_opacity)) * 255)

    draw.rectangle([0, 0, box_width, box_height],
                   fill=(*bg_color[:3], bg_opacity))

    for idx, line in enumerate(lines):
        bbox = draw.textbbox((0, 0), line, font=font)
        text_width = bbox[2] - bbox[0]
        x = max(0, (box_width - text_width) // 2)
        y = padding + idx * line_height
        shadow_offset = max(1, font_size // 12)
        draw.text((x + shadow_offset, y + shadow_offset),
                  line, font=font, fill=(0, 0, 0, 200))
        draw.text((x, y), line, font=font, fill=font_color + (255,))

    subtitle_clip = ImageClip(np.array(img))
    subtitle_clip = subtitle_clip.set_duration(duration)
    position_y = resolve_subtitle_position(anchor, height, box_height, safe_margin)
    subtitle_clip = subtitle_clip.set_position(("center", position_y))
    return subtitle_clip


def add_caption_with_imagemagick(captions: List,
                                 srt_path: Union[str, Path],
                                 timestamps: List,
                                 video_clip: VideoClip,
                                 max_single_length: int = 30,
                                 **caption_config):
    """
    使用 ImageMagick (TextClip) 添加字幕
    
    Args:
        captions: 字幕文本列表
        srt_path: SRT文件路径
        timestamps: 时间戳列表
        video_clip: 视频剪辑
        max_single_length: 单行最大长度
        **caption_config: 字幕配置
    
    Returns:
        带字幕的视频剪辑
    """
    if not HAS_TEXTCLIP:
        raise ImportError("TextClip 不可用，请使用 PIL 模式")
    
    generate_srt(timestamps, captions, srt_path, max_single_length)
    
    # 准备字幕配置
    textclip_config = {}
    if "font" in caption_config:
        textclip_config["font"] = caption_config["font"]
    if "fontsize" in caption_config:
        textclip_config["fontsize"] = caption_config["fontsize"]
    if "color" in caption_config:
        textclip_config["color"] = caption_config["color"]
    
    generator = lambda txt: TextClip(txt, **textclip_config)
    subtitles = SubtitlesClip(str(srt_path), generator)
    captioned_clip = CompositeVideoClip([video_clip,
                                         subtitles.set_position(("center", "bottom"), relative=True)])
    return captioned_clip


# def add_caption(
#     captions: List[Tuple[float, float, str]],
#     composite_clip,
#     caption_config: Dict,
#     use_srt: bool = True
# ):
#     """添加字幕到视频"""
#     if use_srt:
#         # 🔥 修改前
#         # subtitles = SubtitlesClip(srt_path.__str__(), generator)
        
#         # 🔥 修改后：先读取文件内容并指定编码
#         with open(srt_path, 'r', encoding='utf-8') as f:
#             srt_content = f.read()
        
#         # 创建临时SRT文件（使用系统默认编码）
#         import tempfile
#         with tempfile.NamedTemporaryFile(mode='w', suffix='.srt', delete=False, encoding='gbk') as tmp:
#             tmp.write(srt_content)
#             tmp_path = tmp.name
        
#         try:
#             subtitles = SubtitlesClip(tmp_path, generator)
#             # ... 后续代码保持不变
#         finally:
#             # 清理临时文件
#             import os
#             if os.path.exists(tmp_path):
#                 os.remove(tmp_path)

# def add_caption(
#     captions: List[Tuple[float, float, str]],
#     composite_clip,
#     font: str,
#     fontsize: int,
#     color: str,
#     max_length: int,
#     use_srt: bool = True
# ):
#     """添加字幕到视频"""
    
#     # 字幕生成器
#     def generator(txt):
#         return TextClip(
#             txt,
#             font=font,           # 使用传入的参数
#             fontsize=fontsize,
#             color=color,
#             method='caption',
#             size=(composite_clip.w * 0.9, None)
#         )
    
#     if use_srt:
#         # 🔥 修改后:先读取文件内容并指定编码
#         with open(srt_path, 'r', encoding='utf-8') as f:
#             srt_content = f.read()
        
#         # 创建临时SRT文件(使用系统默认编码)
#         import tempfile
#         import os
        
#         with tempfile.NamedTemporaryFile(mode='w', suffix='.srt', delete=False, encoding='gbk') as tmp:
#             tmp.write(srt_content)
#             tmp_path = tmp.name
        
#         try:
#             subtitles = SubtitlesClip(tmp_path, generator)
#             result = CompositeVideoClip([composite_clip, subtitles.set_position(('center', 'bottom'))])
#         finally:
#             # 清理临时文件
#             if os.path.exists(tmp_path):
#                 os.remove(tmp_path)
#     else:
#         # 手动创建字幕片段
#         text_clips = []
#         for start, end, text in captions:
#             # 根据 max_length 截断文本
#             if len(text) > max_length:
#                 text = text[:max_length] + "..."
            
#             txt_clip = (generator(text)
#                        .set_start(start)
#                        .set_end(end)
#                        .set_position(('center', 'bottom')))
#             text_clips.append(txt_clip)
        
#         result = CompositeVideoClip([composite_clip] + text_clips)
    
#     return result

def add_caption(
    captions: List,
    srt_path: Union[str, Path],
    timestamps: List,
    composite_clip,
    max_length: int,
    font: str,
    fontsize: int,
    color: str,
    use_srt: bool = True
    
):
    """添加字幕到视频"""
    

    # 先生成 SRT 文件
    generate_srt(timestamps, captions, srt_path, max_length)
    
    # 字幕生成器
    def generator(txt):
        return TextClip(
            txt,
            font=font,
            fontsize=fontsize,
            color=color,
            method='caption',
            size=(composite_clip.w * 0.9, None)
        )
    
    if use_srt:
        # 🔥 修复后:先读取文件内容并指定编码
        with open(srt_path, 'r', encoding='utf-8') as f:
            srt_content = f.read()
        
        # 创建临时SRT文件(使用系统默认编码)
        import tempfile
        import os
        
        with tempfile.NamedTemporaryFile(mode='w', suffix='.srt', delete=False, encoding='gbk') as tmp:
            tmp.write(srt_content)
            tmp_path = tmp.name
        
        try:
            subtitles = SubtitlesClip(tmp_path, generator)
            result = CompositeVideoClip([
                composite_clip, 
                subtitles.set_position(('center', 'bottom'))
            ])
        finally:
            # 清理临时文件
            if os.path.exists(tmp_path):
                os.remove(tmp_path)
    else:
        # 手动创建字幕片段
        text_clips = []
        for start, end, text in captions:
            if len(text) > max_length:
                text = text[:max_length] + "..."
            
            txt_clip = (generator(text)
                       .set_start(start)
                       .set_end(end)
                       .set_position(('center', 'bottom')))
            text_clips.append(txt_clip)
        
        result = CompositeVideoClip([composite_clip] + text_clips)
    
    return result

def split_keep_separator(text, separator):
    pattern = f'([{re.escape(separator)}])'
    pieces = re.split(pattern, text)
    return pieces


def split_caption(caption, max_length=30):
    lines = []
    if ord(caption[0]) >= ord("a") and ord(caption[0]) <= ord("z") or ord(caption[0]) >= ord("A") and ord(caption[0]) <= ord("Z"):
        words = caption.split(" ")
        current_words = []
        for word in words:
            if len(" ".join(current_words + [word])) <= max_length:
                current_words += [word]
            else:
                if current_words:
                    lines.append(" ".join(current_words))
                    current_words = []

        if current_words:
            lines.append(" ".join(current_words))
    else:
        sentences = split_keep_separator(caption, zh_punc)
        current_line = ""
        for sentence in sentences:
            if len(current_line + sentence) <= max_length:
                current_line += sentence
            else:
                if current_line:
                    lines.append(current_line)
                    current_line = ""
                if sentence.startswith(tuple(zh_punc)):
                    if lines:
                        lines[-1] += sentence[0]
                    current_line = sentence[1:]
                else:
                    current_line = sentence

        if current_line:
            lines.append(current_line.strip())

    return '\n'.join(lines)


def add_bottom_black_area(clip: VideoFileClip,
                          black_area_height: int = 64):
    """
    Add a black area at the bottom of the video clip (for captions).
    """
    black_bar = ColorClip(size=(clip.w, black_area_height), color=(0, 0, 0), duration=clip.duration)
    extended_clip = CompositeVideoClip([clip, black_bar.set_position(("center", "bottom"))])
    return extended_clip


def add_zoom_effect(clip, speed=1.0, mode='in', position='center'):
    fps = clip.fps
    duration = clip.duration
    total_frames = int(duration * fps)
    def main(getframe, t):
        frame = getframe(t)
        h, w = frame.shape[:2]
        i = t * fps
        if mode == 'out':
            i = total_frames - i
        zoom = 1 + (i * ((0.1 * speed) / total_frames))
        positions = {'center':  [(w - (w * zoom)) / 2,  (h - (h  *  zoom)) / 2],
                     'left': [0, (h - (h * zoom)) / 2],
                     'right': [(w - (w * zoom)), (h - (h * zoom)) / 2],
                     'top': [(w - (w * zoom)) / 2, 0],
                     'topleft': [0, 0],
                     'topright': [(w - (w * zoom)), 0],
                     'bottom': [(w - (w * zoom)) / 2, (h - (h * zoom))],
                     'bottomleft': [0, (h - (h * zoom))],
                     'bottomright': [(w - (w * zoom)), (h - (h * zoom))]}
        tx, ty = positions[position]
        M = np.array([[zoom, 0, tx], [0, zoom, ty]])
        frame = cv2.warpAffine(frame, M, (w, h))
        return frame
    return clip.fl(main)


def add_move_effect(clip,
                    direction: str = "left",
                    move_ratio: float = 0.95) -> VideoClip:
    """添加平移镜头（Ken Burns）"""
    orig_width, orig_height = clip.size
    move_ratio = max(0.5, min(0.99, move_ratio))

    new_width = int(orig_width / move_ratio)
    new_height = int(orig_height / move_ratio)
    clip = clip.resize(width=new_width, height=new_height)

    direction = (direction or "left").lower()
    horizontal = direction in {"left", "right"}
    vertical = direction in {"up", "down"}

    if horizontal:
        start_x = 0 if direction == "left" else orig_width - new_width
        end_x = orig_width - new_width if direction == "left" else 0
        start_y = end_y = (orig_height - new_height) // 2
    elif vertical:
        start_y = orig_height - new_height if direction == "up" else 0
        end_y = 0 if direction == "up" else orig_height - new_height
        start_x = end_x = (orig_width - new_width) // 2
    else:
        start_x = end_x = (orig_width - new_width) // 2
        start_y = end_y = (orig_height - new_height) // 2

    duration = max(clip.duration, 0.1)
    moving_clip = clip.set_position(
        lambda t: (
            start_x + (end_x - start_x) * t / duration,
            start_y + (end_y - start_y) * t / duration
        )
    )

    return CompositeVideoClip([moving_clip], size=(orig_width, orig_height))


def convert_image_to_base64(image_path: Union[str, Path]) -> str:
    """将图片转换为 base64 编码字符串"""
    with open(image_path, 'rb') as image_file:
        image_data = image_file.read()
        base64_str = base64.b64encode(image_data).decode('utf-8')
        return base64_str


def generate_video_from_text_with_runway(prompt_text: str,
                                        api_key: str,  # 🔑🔑🔑 需要填写：Runway API 密钥
                                        duration: int = 5,
                                        model: str = "gen4",
                                        seed: Optional[int] = None,
                                        watermark: bool = False,
                                        ratio: str = "16:9",
                                        timeout: int = 300,
                                        save_dir: Optional[Union[str, Path]] = None,
                                        filename: Optional[str] = None,
                                        image_url: Optional[str] = None) -> Optional[str]:
    """
    使用 Runway API 通过文本提示词直接生成视频
    
    Args:
        prompt_text: 文本提示词（🔑🔑🔑 必须提供）
        api_key: Runway API 密钥（🔑🔑🔑 必须填写）
        duration: 视频时长（秒），默认 5 秒（当前 API 可能不支持此参数）
        model: 使用的模型，默认 "gen4"，可选: "gen3", "gen4", "gen4_turbo"
        seed: 随机种子（可选，当前 API 可能不支持）
        watermark: 是否添加水印，默认 False（当前 API 可能不支持）
        ratio: 视频宽高比，默认 "16:9"，可选: "16:9", "9:16"
        timeout: 超时时间（秒），默认 300 秒
        save_dir: 保存目录（可选）
        filename: 保存文件名（可选）
        image_url: 参考图片地址（可选）
        
    Returns:
        生成的视频文件路径，如果失败返回 None
    """
    # 🔑🔑🔑 检查 API 密钥是否已配置
    if not api_key or api_key == "YOUR_RUNWAY_API_KEY" or api_key == "":
        print("⚠️  未配置 Runway API 密钥，跳过视频生成")
        print("   提示：请在配置文件中设置 runway.api_key 或设置环境变量 RUNWAY_API_KEY")
        return None
    
    if not prompt_text or prompt_text.strip() == "":
        print("⚠️  提示词为空，无法生成视频")
        return None
    
    try:
        print(f"🎬 开始使用 Runway API 通过文本生成视频（提示词: {prompt_text[:50]}...）...")
        
        # 步骤1: 提交文本到视频转换任务
        print("  1. 提交文本到视频转换任务...")
        # 🔑🔑🔑 使用实际的 API 地址
        url = "https://api.wuyinkeji.com/api/video/runway"
        headers = {
            "Authorization": api_key,  # 🔑🔑🔑 API 密钥（不使用 Bearer 前缀）
            "Content-Type": "application/json;charset:utf-8;"
        }
        
        # 🔑🔑🔑 根据实际 API 文档构建请求参数
        data = {
            "model": model,  # 必填，枚举值: gen3, gen4, gen4_turbo
            "prompt": prompt_text  # 必填，提示词
        }
        
        # 可选参数
        if ratio:
            # 确保 ratio 格式正确（16:9 或 9:16）
            if ratio not in ["16:9", "9:16"]:
                # 尝试转换格式
                if "16" in ratio and "9" in ratio:
                    if ratio.startswith("16") or "16:9" in ratio:
                        ratio = "16:9"
                    else:
                        ratio = "9:16"
                else:
                    ratio = "16:9"  # 默认值
            data["ratio"] = ratio
        
        if image_url:
            data["image"] = image_url
        
        response = requests.post(url, headers=headers, json=data, timeout=30)
        
        if response.status_code != 200:
            print(f"  ❌ 提交任务失败: {response.status_code} - {response.text}")
            return None
        
        result = response.json()
        
        # 🔑🔑🔑 根据实际 API 返回格式解析
        if result.get("code") != 200:
            error_msg = result.get("msg", "未知错误")
            print(f"  ❌ API 返回错误: {error_msg}")
            return None
        
        task_id = result.get("data", {}).get("task_id")
        
        if task_id:
            print(f"  ✅ 任务已提交，任务 ID: {task_id}")
        else:
            print(f"  ❌ 未获取到任务 ID: {result}")
            return None
        
        # 步骤2: 轮询任务状态
        print("  2. 等待视频生成完成...")
        # 🔑🔑🔑 根据 API 文档，使用 runwayAlephDetail 接口查询状态
        # 接口地址：https://api.wuyinkeji.com/api/video/runwayAlephDetail
        # 请求方式：GET
        # 参数：key（API密钥）和 task_id（任务ID）
        status_url = "https://api.wuyinkeji.com/api/video/runwayAlephDetail"
        
        start_time = time.time()
        max_wait_time = timeout
        poll_interval = 5  # 每 5 秒查询一次
        
        with trange(0, max_wait_time, poll_interval, desc="  等待生成") as pbar:
            while time.time() - start_time < max_wait_time:
                # 🔥 使用 GET 方法查询状态（根据 API 文档）
                try:
                    status_response = requests.get(
                        status_url,
                        headers=headers,
                        params={
                            "key": api_key,  # API 密钥作为查询参数
                            "task_id": task_id
                        },
                        timeout=30
                    )
                except Exception as e:
                    print(f"\n  ⚠️  查询状态请求异常: {e}")
                    pbar.update(poll_interval)
                    time.sleep(poll_interval)
                    continue
                
                if status_response.status_code != 200:
                    # 打印更详细的错误信息
                    try:
                        error_detail = status_response.text[:200]
                        print(f"\n  ⚠️  查询状态失败: {status_response.status_code} - {error_detail}")
                    except:
                        print(f"\n  ⚠️  查询状态失败: {status_response.status_code}")
                    pbar.update(poll_interval)
                    time.sleep(poll_interval)
                    continue
                
                # 尝试解析 JSON 响应
                try:
                    status_result = status_response.json()
                except Exception as e:
                    print(f"\n  ⚠️  解析响应失败: {e}，响应内容: {status_response.text[:200]}")
                    pbar.update(poll_interval)
                    time.sleep(poll_interval)
                    continue
                
                # 检查返回码
                if status_result.get("code") != 200:
                    error_msg = status_result.get("msg", "查询失败")
                    print(f"\n  ⚠️  查询状态返回错误: code={status_result.get('code')}, msg={error_msg}")
                    pbar.update(poll_interval)
                    time.sleep(poll_interval)
                    continue
                
                status_data = status_result.get("data", {})
                # 🔑🔑🔑 根据 API 文档：status 为整数，1=生成中，2=失败，3=成功
                status = status_data.get("status")
                
                # 如果状态为空，打印调试信息
                if status is None:
                    print(f"\n  ⚠️  状态为空，响应数据: {status_result}")
                    pbar.update(poll_interval)
                    time.sleep(poll_interval)
                    continue
                
                # 状态映射：1=生成中，2=失败，3=成功
                status_map = {1: "生成中", 2: "失败", 3: "成功"}
                status_text = status_map.get(status, f"未知({status})")
                pbar.set_postfix({"状态": status_text})
                
                if status == 3:  # 成功
                    # 🔑🔑🔑 根据 API 文档，视频地址在 data.generate_video 字段
                    video_url = status_data.get("generate_video", "")
                    
                    if video_url:
                        print(f"\n  ✅ 视频生成完成！")
                        
                        # 步骤3: 下载视频
                        print("  3. 下载生成的视频...")
                        if save_dir:
                            video_dir = Path(save_dir)
                        else:
                            video_dir = Path("generated_videos")
                        video_dir.mkdir(parents=True, exist_ok=True)
                        
                        if filename:
                            video_file = video_dir / filename
                        else:
                            video_file = video_dir / f"runway_video_{int(time.time())}.mp4"
                        
                        video_response = requests.get(video_url, timeout=60)
                        if video_response.status_code == 200:
                            with open(video_file, 'wb') as f:
                                f.write(video_response.content)
                            print(f"  ✅ 视频已保存: {video_file}")
                            return str(video_file)
                        else:
                            print(f"  ❌ 下载视频失败: {video_response.status_code}")
                            return None
                    else:
                        print(f"  ❌ 未找到视频 URL: {status_data}")
                        return None
                
                elif status == 2:  # 失败
                    # 🔑🔑🔑 根据 API 文档，失败原因在 data.remark 字段
                    error_msg = status_data.get("remark", "未知错误")
                    print(f"\n  ❌ 视频生成失败: {error_msg}")
                    return None
                
                # 如果状态是 1（生成中），继续等待
                pbar.update(poll_interval)
                time.sleep(poll_interval)
        
        # 超时提示
        print(f"\n  ⚠️  超时：视频生成时间超过 {max_wait_time} 秒")
        return None
        
    except requests.exceptions.Timeout:
        print(f"  ❌ 请求超时")
        return None
    except Exception as e:
        print(f"  ❌ 生成视频时出错: {e}")
        import traceback
        traceback.print_exc()
        return None


def generate_image_with_dashscope(prompt_text: str,
                                  image_dir: Union[str, Path],
                                  page: int,
                                  image_generation_config: Optional[Dict[str, Any]] = None) -> Optional[str]:
    """
    使用 DashScope API 生成图片（作为视频生成失败的备用方案）
    
    Args:
        prompt_text: 文本提示词
        image_dir: 图片保存目录
        page: 页面编号
        image_generation_config: 图片生成配置字典（可选），如果不提供则从环境变量读取
        
    Returns:
        生成的图片文件路径，如果失败返回 None
    """
    try:
        import os
        from mm_story_agent.base import init_tool_instance
        
        # 如果没有提供配置，尝试从环境变量读取或使用默认配置
        if not image_generation_config:
            # 尝试从环境变量读取 API key
            api_key = os.getenv('DASHSCOPE_API_KEY')
            if not api_key:
                print("  ⚠️  未配置 DashScope API key，无法生成回退图片")
                return None
            
            # 创建默认配置
            image_generation_config = {
                'tool_name': 'story_diffusion_t2i',
                'params': {
                    'api_key': api_key,
                    'model': 'wanx-v1',
                    'style_name': 'Storybook'
                }
            }
        
        # 创建图片生成代理
        image_agent = init_tool_instance(image_generation_config)
        params = image_generation_config.get('params', {})
        style_name = params.get('style_name', 'Storybook')
        
        # 优化提示词
        optimized_prompt = image_agent.optimize_prompt(prompt_text, style_name)
        
        # 确保目录存在
        if not isinstance(image_dir, Path):
            image_dir = Path(image_dir)
        image_dir.mkdir(parents=True, exist_ok=True)
        
        # 调用 DashScope API 生成图片
        print(f"  🖼️  使用 DashScope API 生成回退图片...")
        image_file = image_agent.call_dashscope_api_with_wait(
            prompt=optimized_prompt,
            save_path=image_dir,
            index=page
        )
        
        if image_file and Path(image_file).exists():
            print(f"  ✅ DashScope 回退图片生成成功: {Path(image_file).name}")
            return image_file
        else:
            print(f"  ⚠️  DashScope 图片生成失败")
            return None
            
    except Exception as e:
        print(f"  ❌ DashScope 图片生成出错: {e}")
        import traceback
        traceback.print_exc()
        return None


def add_slide_effect(clips, slide_duration):
    durations = [clip.duration for clip in clips]
    first_clip = CompositeVideoClip(
        [clips[0].fx(transfx.slide_out, duration=slide_duration, side="left")]
    ).set_start(0)

    slide_out_sides = ["left"]
    videos = [first_clip]

    out_to_in_mapping = {"left": "right", "right": "left"}
    
    for idx, clip in enumerate(clips[1: -1], start=1):
        slide_in_side = out_to_in_mapping[slide_out_sides[-1]]
        slide_out_side = "left" if random.random() <= 0.5 else "right"
        slide_out_sides.append(slide_out_side)
                
        videos.append(
            (
                CompositeVideoClip(
                    [clip.fx(transfx.slide_in, duration=slide_duration, side=slide_in_side)]
                )
                .set_start(sum(durations[:idx]) - (slide_duration) * idx)
                .fx(transfx.slide_out, duration=slide_duration, side=slide_out_side)
            )
        )
    
    last_clip = CompositeVideoClip(
        [clips[-1].fx(transfx.slide_in, duration=slide_duration, side=out_to_in_mapping[slide_out_sides[-1]])]
    ).set_start(sum(durations[:-1]) - slide_duration * (len(clips) - 1))
    videos.append(last_clip)

    video = CompositeVideoClip(videos)
    return video


def compose_video(story_dir: Union[str, Path],
                  save_path: Union[str, Path],
                  captions: List,
                  num_pages: int,
                  fps: int = 10,
                  audio_sample_rate: int = 16000,
                  audio_codec: str = "mp3",
                  caption_config: dict = {},
                  fade_duration: float = 1.0,
                  slide_duration: float = 0.4,
                  zoom_speed: float = 0.5,
                  move_ratio: float = 0.95,
                  transition_method: str = "crossfade",
                  use_runway: bool = False,
                  runway_api_key: Optional[str] = None,
                  runway_config: Optional[Dict[str, Any]] = None,
                  video_paths: Optional[List[Union[str, Path, None]]] = None,
                  image_generation_config: Optional[Dict[str, Any]] = None):
    """
    🔥 修复版：支持无语音文件的视频合成，支持 Runway API 文本生成视频，支持视频片段拼接
    
    工作流程：
    1. 优先使用传入的 video_paths 中的视频片段（如果提供）
    2. 如果没有 video_paths，尝试使用文本提示词直接生成视频（Runway text-to-video）
    3. 如果视频生成失败，使用 DashScope API 生成图片（fallback）
    4. 如果图片生成也失败，使用传入的图片或现有图片文件
    5. 如果图片也不存在，回退到图像处理方式
    
    Args:
        use_runway: 是否使用 Runway API（🔑 需要配置）
        runway_api_key: Runway API 密钥（🔑 必须填写）
        runway_config: Runway API 配置字典，包含：
            - prompt_text: 文本提示词（🔑 必须提供，用于生成视频）
            - duration: 视频时长（秒），默认 5（当前 API 可能不支持）
            - model: 使用的模型，默认 "gen4"，可选: "gen3", "gen4", "gen4_turbo"
            - seed: 随机种子（可选，当前 API 可能不支持）
            - watermark: 是否添加水印，默认 False（当前 API 可能不支持）
            - ratio: 视频宽高比，默认 "16:9"，可选: "16:9", "9:16"
            - timeout: 视频生成超时时间（秒），默认 300
            - image_url: 参考图片地址（可选）
        video_paths: 视频片段路径列表（可选），None 表示该页面使用图片
                    列表长度应与 num_pages 相同，每个元素可以是视频文件路径或 None
                    如果提供了 video_paths，将优先使用这些视频片段，确保音画同步
        image_generation_config: 图片生成配置字典（可选），用于 DashScope fallback
                                如果不提供，将从环境变量 DASHSCOPE_API_KEY 读取
                                格式：{'tool_name': 'story_diffusion_t2i', 'params': {...}}
    """
    if not isinstance(story_dir, Path):
        story_dir = Path(story_dir)

    image_dir = story_dir / "image"
    speech_dir = story_dir / "speech"

    # 🔥 检查语音目录是否存在
    if not speech_dir.exists():
        print(f"\n⚠️  警告：语音目录不存在: {speech_dir}")
        print("将使用默认时长生成无语音视频")
        use_speech = False
    else:
        use_speech = True

    video_clips = []
    image_paths = []  # 🔥 保存图像路径用于平滑过渡
    cur_duration = 0
    timestamps = []

    print("\n" + "="*60)
    print("开始合成视频")
    print("="*60)
    print(f"过渡方法: {transition_method}")
    print(f"目标页面数: {num_pages}")

    storyboard_plan = plan_storyboard(captions[:num_pages])
    transition_duration = max(0.3, slide_duration)
    
    # 🔥 新增：预先收集所有图片和视频文件，按数字顺序排序
    image_dir = story_dir / "image"
    video_dir = story_dir / "video"
    
    # 收集所有图片文件，按数字顺序排序
    all_image_files = {}
    if image_dir.exists():
        # 尝试多种命名格式
        for img_file in image_dir.glob("p*.png"):
            # 提取数字：p1.png -> 1, p0001.png -> 1
            match = re.search(r'p(\d+)', img_file.stem)
            if match:
                page_num = int(match.group(1))
                # 如果该页面还没有文件，或者当前文件更符合标准格式（4位数字）
                if page_num not in all_image_files or len(match.group(1)) == 4:
                    all_image_files[page_num] = img_file
    
    # 收集所有视频文件，按数字顺序排序
    all_video_files = {}
    if video_dir.exists():
        for vid_file in video_dir.glob("p*.mp4"):
            # 提取数字：p1.mp4 -> 1, p0001.mp4 -> 1
            match = re.search(r'p(\d+)', vid_file.stem)
            if match:
                page_num = int(match.group(1))
                # 如果该页面还没有文件，或者当前文件更符合标准格式（4位数字）
                if page_num not in all_video_files or len(match.group(1)) == 4:
                    all_video_files[page_num] = vid_file
    
    print(f"📁 找到 {len(all_image_files)} 个图片文件，{len(all_video_files)} 个视频文件")
    
    # 🔥 检查是否使用平滑过渡
    use_smooth_transition = transition_method in ["optical_flow", "hybrid", "enhanced_optical_flow"]
    if use_smooth_transition:
        try:
            from mm_story_agent.image_transition import create_smooth_transition_clip
            print(f"✅ 已启用平滑过渡: {transition_method}")
        except ImportError as e:
            print(f"⚠️  无法导入平滑过渡模块: {e}，将使用简单交叉淡化")
            use_smooth_transition = False
            transition_method = "crossfade"

    for page in trange(1, num_pages + 1, desc="处理页面"):
        shot_profile = storyboard_plan[page - 1] if page - 1 < len(storyboard_plan) else {}

        # ========== 1. 处理语音音轨（修复版）==========
        slide_silence = AudioArrayClip(
            np.zeros((int(audio_sample_rate * slide_duration), 2)), 
            fps=audio_sample_rate
        )
        fade_silence = AudioArrayClip(
            np.zeros((int(audio_sample_rate * fade_duration), 2)), 
            fps=audio_sample_rate
        )

        speech_rms = 1.0  # 默认能量值
        
        if use_speech and (speech_dir / f"p{page}.wav").exists():  # 单个语音文件
            single_utterance = True
            speech_file = speech_dir / f"p{page}.wav"
            speech_clip = AudioFileClip(speech_file.__str__(), fps=audio_sample_rate)
            speech_clip = concatenate_audioclips([fade_silence, speech_clip, fade_silence])
            
            # 计算能量
            speech_array, _ = librosa.core.load(speech_file, sr=None)
            speech_rms = librosa.feature.rms(y=speech_array)[0].mean()
            
        elif use_speech:  # 尝试多个语音文件
            single_utterance = False
            speech_files = list(speech_dir.glob(f"p{page}_*.wav"))
            
            if len(speech_files) > 0:  # 🔥 修复：检查是否有文件
                speech_files = sorted(speech_files, key=lambda x: int(x.stem.split("_")[-1]))
                speech_clips = []
                
                for utt_idx, speech_file in enumerate(speech_files):
                    speech_clip = AudioFileClip(speech_file.__str__(), fps=audio_sample_rate)
                    
                    if utt_idx == 0:
                        timestamps.append([cur_duration + fade_duration,
                                         cur_duration + fade_duration + speech_clip.duration])
                        cur_duration += speech_clip.duration + fade_duration
                    elif utt_idx == len(speech_files) - 1:
                        timestamps.append([cur_duration,
                                         cur_duration + speech_clip.duration])
                        cur_duration += speech_clip.duration + fade_duration + slide_duration
                    else:
                        timestamps.append([cur_duration,
                                         cur_duration + speech_clip.duration])
                        cur_duration += speech_clip.duration
                    
                    speech_clips.append(speech_clip)
                
                speech_clip = concatenate_audioclips([fade_silence] + speech_clips + [fade_silence])
                
                # 计算能量
                speech_array, _ = librosa.core.load(speech_files[0], sr=None)
                speech_rms = librosa.feature.rms(y=speech_array)[0].mean()
            else:
                # 🔥 没有语音文件，使用默认时长
                default_duration = shot_profile.get("min_duration", 5.0)
                print(f"\n⚠️  页面 {page} 没有语音文件，使用默认时长 {default_duration:.1f} 秒")
                silence = AudioArrayClip(
                    np.zeros((int(audio_sample_rate * default_duration), 2)), 
                    fps=audio_sample_rate
                )
                speech_clip = concatenate_audioclips([fade_silence, silence, fade_silence])
                single_utterance = True
        else:
            # 🔥 完全没有语音，使用默认时长
            default_duration = shot_profile.get("min_duration", 5.0)
            print(f"\n⚠️  页面 {page} 使用默认时长 {default_duration:.1f} 秒（无语音模式）")
            silence = AudioArrayClip(
                np.zeros((int(audio_sample_rate * default_duration), 2)), 
                fps=audio_sample_rate
            )
            speech_clip = concatenate_audioclips([fade_silence, silence, fade_silence])
            single_utterance = True
        
        # 添加滑动静音
        if page == 1:
            speech_clip = concatenate_audioclips([speech_clip, slide_silence])
        else:
            speech_clip = concatenate_audioclips([slide_silence, speech_clip, slide_silence])
        
        # 记录时间戳
        if single_utterance:
            if page == 1:
                timestamps.append([cur_duration + fade_duration,
                                 cur_duration + speech_clip.duration - fade_duration - slide_duration])
                cur_duration += speech_clip.duration - slide_duration
            else:
                timestamps.append([cur_duration + fade_duration + slide_duration,
                                 cur_duration + speech_clip.duration - fade_duration - slide_duration])
                cur_duration += speech_clip.duration - slide_duration

        # ========== 2. 处理视频 / 图像 ==========
        base_clip = None
        video_file = None
        
        # 🔥 改进：优先使用传入的 video_paths 参数（按页面顺序）
        if video_paths and page <= len(video_paths):
            video_path = video_paths[page - 1]
            if video_path is not None and Path(video_path).exists():
                video_file = Path(video_path)
                print(f"📹 页面 {page}: 使用传入的视频片段: {video_file.name}")
        
        # 🔥 改进：如果没有传入的视频路径，从预先收集的文件中查找（确保按数字顺序）
        if video_file is None and page in all_video_files:
            video_file = all_video_files[page]
            print(f"📹 页面 {page}: 使用找到的视频文件: {video_file.name}")
        
        # 如果还是没有找到，尝试其他格式（向后兼容）
        if video_file is None:
            video_dir = story_dir / "video"
            if video_dir.exists():
                candidate_video = video_dir / f"p{page:04d}.mp4"
                if candidate_video.exists():
                    video_file = candidate_video
                else:
                    fallback_video = video_dir / f"p{page}.mp4"
                    if fallback_video.exists():
                        video_file = fallback_video

        if video_file:
            try:
                video_clip = VideoFileClip(str(video_file)).without_audio()
                # 🔥 确保视频时长与语音时长匹配（音画同步）
                video_duration = video_clip.duration
                speech_duration = speech_clip.duration
                
                if video_duration > speech_duration:
                    # 视频比语音长，裁剪视频
                    base_clip = video_clip.subclip(0, speech_duration)
                elif video_duration < speech_duration:
                    # 视频比语音短，循环播放或延长最后一帧
                    if video_duration > 0:
                        # 循环播放视频直到匹配语音时长
                        loops = int(speech_duration / video_duration) + 1
                        clips_to_loop = [video_clip] * loops
                        looped_clip = concatenate_videoclips(clips_to_loop, method="compose")
                        base_clip = looped_clip.subclip(0, speech_duration)
                    else:
                        base_clip = video_clip.set_duration(speech_duration)
                else:
                    # 时长匹配
                    base_clip = video_clip
                    
                print(f"  ✅ 视频片段已加载（时长: {base_clip.duration:.2f}秒，匹配语音: {speech_duration:.2f}秒）")
            except Exception as exc:
                print(f"⚠️  加载视频片段失败（第 {page} 页，{video_file.name}）：{exc}")
                base_clip = None

        if base_clip is None:
            # ========== 优先使用 Runway API 文本生成视频，失败后生成图片 ==========
            runway_video_file = None
            generated_image_file = None
            
            if use_runway and runway_api_key:
                try:
                    # 准备 Runway 配置
                    runway_cfg = runway_config or {}
                    # 🔥 优先使用文本提示词生成视频
                    prompt_text = runway_cfg.get("prompt_text", captions[page - 1] if page <= len(captions) else "")
                    duration = runway_cfg.get("duration", int(speech_clip.duration))
                    # 🔑🔑🔑 根据实际 API，model 可选值: gen3, gen4, gen4_turbo
                    model = runway_cfg.get("model", "gen4")
                    # 确保 model 值正确
                    if model not in ["gen3", "gen4", "gen4_turbo"]:
                        model = "gen4"  # 默认值
                    seed = runway_cfg.get("seed", None)
                    watermark = runway_cfg.get("watermark", False)
                    # 🔑🔑🔑 根据实际 API，ratio 可选值: 16:9, 9:16
                    ratio = runway_cfg.get("ratio", "16:9")
                    # 转换 ratio 格式
                    if ratio not in ["16:9", "9:16"]:
                        if "16" in ratio and "9" in ratio:
                            if ratio.startswith("16") or "16:9" in ratio:
                                ratio = "16:9"
                            else:
                                ratio = "9:16"
                        else:
                            ratio = "16:9"  # 默认值
                    timeout = runway_cfg.get("timeout", 300)
                    image_url = runway_cfg.get("image_url", None)  # 参考图片地址（可选）
                    
                    # 步骤1: 优先尝试使用文本提示词直接生成视频
                    if prompt_text and prompt_text.strip():
                        print(f"\n🎬 页面 {page}: 优先尝试使用文本提示词生成视频...")
                        video_dir = story_dir / "video"
                        video_dir.mkdir(parents=True, exist_ok=True)
                        
                        # 🔑🔑🔑 API 密钥已从配置中获取（runway.api_key 或环境变量 RUNWAY_API_KEY）
                        runway_video_file = generate_video_from_text_with_runway(
                            prompt_text=prompt_text,
                            api_key=runway_api_key,
                            duration=duration,
                            model=model,
                            seed=seed,
                            watermark=watermark,
                            ratio=ratio,
                            timeout=timeout,
                            save_dir=video_dir,
                            filename=f"p{page:04d}.mp4",
                            image_url=image_url
                        )
                        
                        if runway_video_file and Path(runway_video_file).exists():
                            # 使用 Runway 生成的视频
                            try:
                                video_clip = VideoFileClip(runway_video_file).without_audio()
                                base_clip = video_clip.set_duration(speech_clip.duration)
                                print(f"  ✅ 使用 Runway 文本生成的视频: {Path(runway_video_file).name}")
                            except Exception as exc:
                                print(f"  ⚠️  加载 Runway 生成的视频失败: {exc}，尝试生成图片")
                                runway_video_file = None
                        else:
                            print(f"  ⚠️  文本生成视频失败，尝试生成图片...")
                            runway_video_file = None
                    
                    # 步骤2: 如果视频生成失败，使用 DashScope 生成图片（fallback）
                    if base_clip is None and prompt_text and prompt_text.strip():
                        print(f"\n🖼️  页面 {page}: Runway 视频生成失败，回退到 DashScope 文生图...")
                        generated_image_file = generate_image_with_dashscope(
                            prompt_text=prompt_text,
                            image_dir=image_dir,
                            page=page,
                            image_generation_config=image_generation_config
                        )
                        
                        if generated_image_file and Path(generated_image_file).exists():
                            print(f"  ✅ 使用 DashScope 生成的回退图片: {Path(generated_image_file).name}")
                            # 将生成的图片路径设置为 image_file，后续会使用它
                            image_file = generated_image_file
                        else:
                            print(f"  ⚠️  DashScope 图片生成也失败，将尝试使用现有图片文件")
                            generated_image_file = None
                            
                except Exception as e:
                    print(f"  ⚠️  Runway API 调用出错: {e}，回退到图像处理")
                    import traceback
                    traceback.print_exc()
                    runway_video_file = None
                    generated_image_file = None
            
            # ========== 如果 Runway 未启用或生成失败，使用现有图像或生成动态片段 ==========
            if base_clip is None:
                # 🔥 改进：优先使用预先收集的文件（确保按数字顺序）
                image_file = None
                
                # 如果已经通过 Runway 生成了图片，使用它
                if generated_image_file and Path(generated_image_file).exists():
                    image_file = str(generated_image_file)
                    print(f"🖼️  页面 {page}: 使用 Runway 生成的图片: {Path(generated_image_file).name}")
                # 从预先收集的文件中查找（确保按数字顺序）
                elif page in all_image_files:
                    image_file = str(all_image_files[page])
                    print(f"🖼️  页面 {page}: 使用找到的图片文件: {all_image_files[page].name}")
                else:
                    # 向后兼容：尝试其他格式
                    image_file_4digit = image_dir / f"p{page:04d}.png"
                    image_file_1digit = image_dir / f"p{page}.png"
                    
                    if image_file_4digit.exists():
                        image_file = str(image_file_4digit)
                        print(f"🖼️  页面 {page}: 使用图片文件: {image_file_4digit.name}")
                    elif image_file_1digit.exists():
                        image_file = str(image_file_1digit)
                        print(f"🖼️  页面 {page}: 使用图片文件: {image_file_1digit.name}")
                    else:
                        # 如果都不存在，尝试查找任何匹配的文件
                        matching_files = list(image_dir.glob(f"p{page}*.png"))
                        if matching_files:
                            # 🔥 按数字顺序排序匹配的文件
                            matching_files.sort(key=lambda x: int(re.search(r'p(\d+)', x.stem).group(1)) if re.search(r'p(\d+)', x.stem) else 0)
                            image_file = str(matching_files[0])
                            print(f"⚠️  页面 {page}: 使用匹配的文件: {matching_files[0].name}")
                        else:
                            error_msg = f"页面 {page} 的图像文件不存在: 期望 p{page:04d}.png 或 p{page}.png"
                            print(f"\n❌ 错误：{error_msg}")
                            raise FileNotFoundError(error_msg)
            
            # 如果 Runway 生成失败或未启用，使用图像生成动态片段
            if base_clip is None:
                # 读取图像
                image_array = cv2.imread(image_file)
                image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
                
                # 🔥 保存图像路径用于平滑过渡
                image_paths.append(image_file)
                
                image_clip = ImageClip(image_array)
                image_clip = image_clip.set_duration(speech_clip.duration).set_fps(fps)
                
                # 🔥 如果使用平滑过渡，不在这里添加交叉淡化（会在 assemble_storyboard_sequence 中处理）
                if transition_method not in ["optical_flow", "hybrid"]:
                    image_clip = image_clip.crossfadein(fade_duration).crossfadeout(fade_duration)

                image_clip = apply_camera_motion(
                    image_clip,
                    shot_profile=shot_profile,
                    default_zoom_speed=zoom_speed,
                    default_move_ratio=move_ratio
                )

                base_clip = image_clip

        base_clip = base_clip.set_fps(fps)

        # ========== 3. 创建独立的字幕层 ==========
        caption = captions[page - 1] if page <= len(captions) else ""
        if caption:
            # 创建独立的字幕剪辑（不绘制在图像上）
            subtitle_clip = create_subtitle_clip_independent(
                caption,
                duration=speech_clip.duration,
                width=base_clip.w,
                height=base_clip.h,
                caption_config=dict(caption_config),
                anchor=shot_profile.get("subtitle_anchor", "lower")
            )
            # 合成：图像 + 字幕（字幕独立，不会随图像移动）
            page_clip = CompositeVideoClip([base_clip, subtitle_clip]).set_audio(speech_clip)
        else:
            page_clip = base_clip.set_audio(speech_clip)
        
        # 🔥 在添加主片段之前，先添加过渡片段（如果有）
        # 确保过渡片段被正确生成和插入
        if page > 1 and use_smooth_transition and len(image_paths) >= 2:
            try:
                prev_image_path = image_paths[-2]  # 前一张图像
                curr_image_path = image_paths[-1]  # 当前图像
                
                # 获取过渡类型
                prev_profile = storyboard_plan[page - 2] if page - 2 < len(storyboard_plan) else {}
                curr_profile = storyboard_plan[page - 1] if page - 1 < len(storyboard_plan) else {}
                
                in_transition = curr_profile.get("transition_in", "crossfade")
                
                # 如果过渡类型支持平滑过渡
                if in_transition in CROSSFADE_TRANSITIONS:
                    print(f"  生成页面 {page-1} -> {page} 的平滑过渡（{transition_method}）...")
                    
                    # 创建平滑过渡片段
                    transition_clip = create_smooth_transition_clip(
                        prev_image_path,
                        curr_image_path,
                        duration=transition_duration,
                        fps=fps,
                        transition_method=transition_method,
                        blend_mode="ease_in_out"
                    )
                    
                    # 确保过渡片段没有音频（音频由主片段提供）
                    transition_clip = transition_clip.without_audio()
                    
                    # 🔥 调整前一个片段的持续时间（移除淡出部分，但保持音频完整）
                    if len(video_clips) > 0:
                        prev_clip = video_clips[-1]
                        prev_duration = prev_clip.duration
                        
                        # 计算新的持续时间（移除过渡时长，但至少保留 transition_duration）
                        new_duration = max(transition_duration * 2, prev_duration - transition_duration)
                        
                        # 如果前一个片段有音频，需要保持音频同步
                        if prev_clip.audio is not None:
                            # 创建新的视频部分（缩短）和音频部分（保持原样）
                            new_video = prev_clip.subclip(0, new_duration)
                            # 保持原音频，但可能需要调整时长
                            if prev_clip.audio.duration > new_duration:
                                new_audio = prev_clip.audio.subclip(0, new_duration)
                            else:
                                new_audio = prev_clip.audio
                            video_clips[-1] = new_video.set_audio(new_audio)
                        else:
                            video_clips[-1] = prev_clip.subclip(0, new_duration)
                    
                    # 添加过渡片段
                    video_clips.append(transition_clip)
                    print(f"    ✅ 过渡片段已添加（时长: {transition_duration:.2f}秒）")
                    
            except Exception as e:
                print(f"⚠️  创建平滑过渡失败（页面 {page-1} -> {page}）: {e}")
                import traceback
                traceback.print_exc()
                # 失败时使用简单交叉淡化
                if transition_method not in ["optical_flow", "hybrid"]:
                    page_clip = page_clip.crossfadein(transition_duration)
        
        video_clips.append(page_clip)
        print(f"✅ 页面 {page} 处理完成（视频片段总数: {len(video_clips)}）")

    # 🔥 验证：确保所有页面都有对应的视频片段
    if len(video_clips) != num_pages:
        print(f"\n⚠️  警告：视频片段数量 ({len(video_clips)}) 与目标页面数 ({num_pages}) 不匹配！")
        print("   这可能导致某些页面被跳过。请检查图片和视频文件是否完整。")
        # 如果片段数少于页面数，尝试补充
        if len(video_clips) < num_pages:
            missing_pages = num_pages - len(video_clips)
            print(f"   缺少 {missing_pages} 个片段，将尝试补充...")
            # 这里可以添加补充逻辑，但为了安全，我们继续执行

    # ========== 4. 添加转场效果 ==========
    print(f"\n应用镜头转场与衔接（共 {len(video_clips)} 个片段）...")
    composite_clip = assemble_storyboard_sequence(
        video_clips, 
        storyboard_plan, 
        transition_duration,
        image_paths=image_paths if len(image_paths) == len(video_clips) else None,
        transition_method=transition_method,
        fps=fps
    )
    
    # ========== 5. 字幕处理 ==========
    # 🔥 字幕已经作为独立层添加到每个视频片段中，完整显示且不随图像移动
    print("✅ 字幕已作为独立层添加到视频中（完整显示，不随图像移动）")
    
    # ========== 6. 输出视频 ==========
    print(f"\n正在导出视频到: {save_path}")
    composite_clip.write_videofile(
        save_path.__str__(),
        audio_fps=audio_sample_rate,
        audio_codec=audio_codec,
    )
    
    print("\n" + "="*60)
    print(f"✅ 视频合成完成！")
    print(f"   输出路径: {save_path}")
    print(f"   视频时长: {composite_clip.duration:.2f} 秒")
    print(f"   包含页面: {num_pages} 页")
    if not use_speech:
        print(f"   ⚠️  注意：此视频不包含语音")
    print("="*60 + "\n")


@register_tool("slideshow_video_compose")
class SlideshowVideoComposeAgent:

    def __init__(self, cfg) -> None:
        self.cfg = cfg
        print("[OK] 视频合成器初始化完成")

    def adjust_caption_config(self, width, height):
        fontsize = int((width + height) / 2 * 0.024)
        safe_margin = int(height * 0.08)
        return {
            "fontsize": fontsize,
            "safe_margin": safe_margin,
            "safe_margin_ratio": safe_margin / max(height, 1),
            "line_spacing": 0.32,
            "background_opacity": 0.72,
            "width_ratio": 0.88,
            "bg_color": (0, 0, 0)
        }

    def call(self, params):
        height = params["height"]
        width = params["width"]
        pages = params["pages"]
        params["caption"].update(self.adjust_caption_config(width, height))
        
        # 🔥 从配置中获取过渡方法，默认为 "crossfade"
        slideshow_effect = params.get("slideshow_effect", {})
        transition_method = slideshow_effect.get("transition_method", "crossfade")
        
        # 🔥 从配置中获取 Runway API 相关参数
        runway_config = params.get("runway", {})
        use_runway = runway_config.get("enabled", False)
        # 🔑🔑🔑 需要填写：在配置文件中设置 runway.api_key 或设置环境变量 RUNWAY_API_KEY
        runway_api_key = runway_config.get("api_key", None)
        
        # 如果未配置 API 密钥，尝试从环境变量获取
        if not runway_api_key:
            runway_api_key = os.getenv("RUNWAY_API_KEY")
        
        if use_runway and not runway_api_key:
            print("⚠️  警告：已启用 Runway 但未配置 API 密钥")
            print("   请在配置文件中设置 runway.api_key 或设置环境变量 RUNWAY_API_KEY")
            use_runway = False
        
        # Runway API 配置参数
        runway_api_config = {
            "prompt_text": runway_config.get("prompt_text", ""),
            "duration": runway_config.get("duration", 5),
            "model": runway_config.get("model", "gen4"),  # 🔑🔑🔑 默认使用 gen4，可选: gen3, gen4, gen4_turbo
            "seed": runway_config.get("seed", None),
            "watermark": runway_config.get("watermark", False),
            "ratio": runway_config.get("ratio", "16:9"),  # 🔑🔑🔑 默认 16:9，可选: 16:9, 9:16
            "timeout": runway_config.get("timeout", 300),
            "image_url": runway_config.get("image_url", None)  # 参考图片地址（可选）
        }
        
        # 🔥 新增：获取 video_paths 参数（如果提供）
        video_paths = params.get("video_paths", None)
        if video_paths:
            # 转换为 Path 对象列表
            video_paths = [Path(v) if v is not None else None for v in video_paths]
            print(f"📹 检测到 {sum(1 for v in video_paths if v is not None)} 个视频片段路径")
        
        # 🔥 新增：获取 image_generation 配置（用于 DashScope fallback）
        image_generation_config = params.get("image_generation", None)
        
        compose_video(
            story_dir=Path(params["story_dir"]),
            save_path=Path(params["story_dir"]) / "output.mp4",
            captions=pages,
            num_pages=len(pages),
            fps=params["fps"],
            video_paths=video_paths,  # 🔥 新增：传递视频片段路径
            audio_sample_rate=params["audio_sample_rate"],
            audio_codec=params["audio_codec"],
            caption_config=params["caption"],
            transition_method=transition_method,
            use_runway=use_runway,
            runway_api_key=runway_api_key,  # 🔑🔑🔑 API 密钥已从配置中获取
            runway_config=runway_api_config if use_runway else None,
            image_generation_config=image_generation_config,  # 🔥 新增：传递图片生成配置
            **{k: v for k, v in slideshow_effect.items() if k != "transition_method"}
        )