# text2video.py - 花艺AI文生视频模块
# 导入必要的模块
import re
import random
import shutil
from PIL import Image, ImageDraw
import os
import json
import uuid
import time
import logging
import tempfile
import subprocess
import requests
import base64
import numpy as np
from PIL import Image
from io import BytesIO
from typing import Dict, List, Any, Optional, Tuple
from pydantic import BaseModel
from dotenv import load_dotenv

# 导入现有模块
from speech_interaction import speech_manager, SpeechSynthesisRequest

# 加载环境变量
load_dotenv()
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
STABILITY_AI_KEY = os.getenv("TEXT_TO_IMAGE_API_KEY", "sk-6681c4a50d444dee8a8a17f3b2ef9913")
LIVEPORTRAIT_API_KEY = os.getenv("LIVEPORTRAIT_API_KEY", "your_liveportrait_api_key")

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 输出目录
OUTPUT_DIR = "static/videos"
IMAGES_DIR = "static/images"
AUDIO_DIR = "static/audio"
TEMP_DIR = "static/temp"

# 确保目录存在
for directory in [OUTPUT_DIR, IMAGES_DIR, AUDIO_DIR, TEMP_DIR]:
    os.makedirs(directory, exist_ok=True)


# 模型定义
class ScriptRequest(BaseModel):
    topic: str
    style: str = "detailed"  # basic, detailed, step-by-step
    duration: int = 60  # 目标视频时长(秒)
    language: str = "mandarin"  # 生成文案的语言


class CharacterRequest(BaseModel):
    style: str = "anime"  # anime, realistic, painting
    gender: str = "female"  # male, female
    outfit: str = "professional"  # professional, casual, traditional
    background: str = "studio"  # studio, garden, natural


class VideoRequest(BaseModel):
    script_id: str
    character_id: str
    voice_id: Optional[str] = None
    background_music: Optional[str] = None
    resolution: str = "720p"  # 480p, 720p, 1080p


class TextToVideoRequest(BaseModel):
    topic: str
    character_style: str = "anime"
    voice_id: Optional[str] = None
    style: str = "detailed"
    duration: int = 60
    language: str = "mandarin"


class VideoResponse(BaseModel):
    video_id: str
    video_url: str
    thumbnail_url: str
    duration: float
    creation_time: str


# 文案生成类
class ScriptGenerator:
    def __init__(self, api_key: str = DEEPSEEK_API_KEY):
        self.api_key = api_key

    def generate_script(self, request: ScriptRequest, rag_system) -> Dict[str, Any]:
        """生成插花教学视频文案"""
        try:
            logger.info(f"正在为主题 '{request.topic}' 生成{request.style}风格的文案")

            # 使用现有的RAG系统获取相关知识
            context, sources = rag_system.retrieve_context_with_sources(request.topic, top_k=5)

            # 根据请求的风格选择适当的系统提示
            style_prompts = {
                "basic": "创建一个简洁的插花教学视频文案，使用友好的语气，避免过于专业的术语。",
                "detailed": "创建一个详细的插花教学视频文案，包含专业术语和技巧，同时保持友好的语气。",
                "step-by-step": "创建一个逐步讲解的插花教学视频文案，每个步骤都要清晰详细，适合初学者。"
            }

            # 估算每分钟的字数和段落数
            words_per_minute = 150 if request.language == "mandarin" else 130
            total_words = (request.duration / 60) * words_per_minute
            paragraphs = max(3, request.duration // 20)

            system_prompt = f"""你是一位专业的插花艺术视频编剧。请根据提供的参考信息，创建一个{request.duration}秒长的插花教学视频文案。
            {style_prompts.get(request.style, style_prompts["detailed"])}

            文案要求:
            1. 总字数约{total_words}字，分为{paragraphs}个段落，每段配有时间戳
            2. 包含开场白、主体内容和结束语
            3. 语言自然流畅，适合视频配音
            4. 只使用提供的参考信息和基本插花知识
            5. 使用第一人称，像真人教学一样

            最终输出格式:
            ```
            [00:00] 开场白...

            [00:10] 第一部分内容...

            [00:30] 第二部分内容...

            [结束] 总结和再见...
            ```
            """

            user_prompt = f"""主题: {request.topic}

            参考信息:
            {context}

            请根据上述主题和参考信息，创建一个专业的插花教学视频文案。"""

            # 调用DeepSeek API
            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.api_key}"
            }

            payload = {
                "model": "deepseek-chat",
                "messages": [
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                "temperature": 0.7,
                "max_tokens": 2048
            }

            response = requests.post(
                "https://api.deepseek.com/v1/chat/completions",
                headers=headers,
                json=payload
            )
            response.raise_for_status()

            # 处理响应
            result = response.json()
            script_text = result["choices"][0]["message"]["content"]

            # 提取时间戳和内容
            script_segments = []
            current_timestamp = None
            current_content = []

            for line in script_text.split('\n'):
                line = line.strip()
                if not line:
                    continue

                # 检查时间戳
                timestamp_match = re.search(r'\[([\d:]+|结束)\]', line)
                if timestamp_match:
                    # 保存上一个段落
                    if current_timestamp is not None and current_content:
                        script_segments.append({
                            "timestamp": current_timestamp,
                            "content": '\n'.join(current_content).strip()
                        })

                    # 提取新时间戳和内容
                    current_timestamp = timestamp_match.group(1)
                    content_start = line.find(']') + 1
                    current_content = [line[content_start:].strip()]
                else:
                    current_content.append(line)

            # 添加最后一个段落
            if current_timestamp is not None and current_content:
                script_segments.append({
                    "timestamp": current_timestamp,
                    "content": '\n'.join(current_content).strip()
                })

            # 生成脚本ID和保存路径
            script_id = f"script_{int(time.time())}_{uuid.uuid4().hex[:8]}"
            script_path = os.path.join(TEMP_DIR, f"{script_id}.json")

            # 准备脚本数据
            script_data = {
                "id": script_id,
                "topic": request.topic,
                "style": request.style,
                "language": request.language,
                "duration": request.duration,
                "created_at": time.strftime("%Y-%m-%d %H:%M:%S"),
                "segments": script_segments,
                "full_text": script_text
            }

            # 保存脚本
            with open(script_path, 'w', encoding='utf-8') as f:
                json.dump(script_data, f, ensure_ascii=False, indent=2)

            logger.info(f"文案生成成功，保存至 {script_path}")
            return script_data

        except Exception as e:
            logger.error(f"文案生成失败: {str(e)}")
            raise Exception(f"文案生成失败: {str(e)}")


# 虚拟花艺师生成类
class CharacterGenerator:
    def __init__(self, api_key: str = STABILITY_AI_KEY):
        self.api_key = api_key

    def generate_character(self, request: CharacterRequest) -> Dict[str, Any]:
        """生成虚拟花艺师角色立绘"""
        try:
            logger.info(f"生成{request.style}风格的虚拟花艺师角色")

            # 构建提示词
            gender_desc = "female florist" if request.gender == "female" else "male florist"
            style_desc = {
                "anime": "anime style, 2D illustration, clean lines",
                "realistic": "photorealistic portrait, detailed features",
                "painting": "digital painting style, artistic interpretation"
            }.get(request.style, "anime style")

            outfit_desc = {
                "professional": "wearing professional florist attire with apron",
                "casual": "wearing casual but neat attire suitable for flower arrangement",
                "traditional": "wearing traditional cultural attire appropriate for flower arrangement"
            }.get(request.outfit, "professional")

            background_desc = {
                "studio": "in a clean flower studio with soft lighting",
                "garden": "in a beautiful flower garden with natural lighting",
                "natural": "with a simple, neutral background"
            }.get(request.background, "studio")

            prompt = f"A professional {gender_desc}, {outfit_desc}, {background_desc}. {style_desc}, high-quality, detailed face with friendly expression, flower arrangement tools visible, vibrant colors, suitable for video tutorial, front-facing pose."

            # 添加负面提示词
            negative_prompt = "deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation"

            # 配置API请求
            api_url = "https://api.stability.ai/v1/generation/stable-diffusion-xl-1024-v1-0/text-to-image"
            headers = {
                "Content-Type": "application/json",
                "Accept": "application/json",
                "Authorization": f"Bearer {self.api_key}"
            }

            payload = {
                "text_prompts": [
                    {
                        "text": prompt,
                        "weight": 1.0
                    },
                    {
                        "text": negative_prompt,
                        "weight": -1.0
                    }
                ],
                "cfg_scale": 7.0,
                "height": 1024,
                "width": 1024,
                "samples": 1,
                "steps": 30
            }

            # 发送API请求
            logger.info(f"发送角色生成请求: {prompt}")
            response = requests.post(api_url, headers=headers, json=payload)
            response.raise_for_status()

            # 处理响应
            result = response.json()

            # 提取图像数据
            if "artifacts" in result and len(result["artifacts"]) > 0:
                image_data = result["artifacts"][0]["base64"]

                # 生成角色ID和保存路径
                character_id = f"character_{int(time.time())}_{uuid.uuid4().hex[:8]}"
                image_filename = f"{character_id}.png"
                image_path = os.path.join(IMAGES_DIR, image_filename)

                # 保存图像
                with open(image_path, "wb") as f:
                    f.write(base64.b64decode(image_data))

                # 为口型动画准备的变种图像
                if request.style == "anime":
                    self._create_mouth_variants(image_path, character_id)

                # 准备角色数据
                character_data = {
                    "id": character_id,
                    "style": request.style,
                    "gender": request.gender,
                    "outfit": request.outfit,
                    "background": request.background,
                    "image_url": f"/static/images/{image_filename}",
                    "created_at": time.strftime("%Y-%m-%d %H:%M:%S")
                }

                # 保存角色元数据
                metadata_path = os.path.join(TEMP_DIR, f"{character_id}.json")
                with open(metadata_path, 'w', encoding='utf-8') as f:
                    json.dump(character_data, f, ensure_ascii=False, indent=2)

                logger.info(f"角色生成成功，保存至 {image_path}")
                return character_data
            else:
                raise Exception("API未返回有效图像")

        except Exception as e:
            logger.error(f"角色生成失败: {str(e)}")
            raise Exception(f"角色生成失败: {str(e)}")

    def _create_mouth_variants(self, image_path: str, character_id: str):
        """创建不同口型的角色变体，用于简单动画"""
        try:
            # 加载原图
            with Image.open(image_path) as img:
                width, height = img.size

                # 估计脸部位置 (简化处理，假设脸在中心偏上位置)
                face_center_x = width // 2
                face_center_y = height // 2.5

                # 估计嘴部位置 (脸部下方)
                mouth_x = face_center_x
                mouth_y = int(face_center_y * 1.2)

                # 嘴部宽高
                mouth_width = width // 10
                mouth_height = height // 20

                # 嘴部区域
                mouth_left = mouth_x - mouth_width // 2
                mouth_top = mouth_y - mouth_height // 2
                mouth_right = mouth_x + mouth_width // 2
                mouth_bottom = mouth_y + mouth_height // 2

                # 备份原图用于其他变体
                original = img.copy()

                # 创建不同口型的变体
                variants = ["closed", "half_open", "open"]

                for variant in variants:
                    img_variant = original.copy()
                    draw = ImageDraw.Draw(img_variant)

                    if variant == "closed":
                        # 闭嘴 - 水平线
                        draw.line([(mouth_left, mouth_y), (mouth_right, mouth_y)],
                                  fill=(0, 0, 0), width=2)
                    elif variant == "half_open":
                        # 半开 - 小椭圆
                        draw.ellipse([(mouth_left, mouth_y - mouth_height // 4),
                                      (mouth_right, mouth_y + mouth_height // 4)],
                                     outline=(0, 0, 0), width=2)
                    elif variant == "open":
                        # 全开 - 大椭圆
                        draw.ellipse([(mouth_left, mouth_y - mouth_height // 2),
                                      (mouth_right, mouth_y + mouth_height // 2)],
                                     outline=(0, 0, 0), width=2)

                    # 保存变体
                    variant_path = os.path.join(IMAGES_DIR, f"{character_id}_{variant}.png")
                    img_variant.save(variant_path)
                    logger.info(f"创建口型变体: {variant_path}")

        except Exception as e:
            logger.warning(f"创建口型变体失败 (将使用LivePortrait作为替代): {str(e)}")


# 语音合成类
class VoiceSynthesizer:
    def __init__(self, speech_manager=speech_manager):
        self.speech_manager = speech_manager

    async def synthesize_voice(self, script_data: Dict[str, Any], voice_id: Optional[str] = None) -> Dict[str, Any]:
        """将文本转换为语音，并按段落分割"""
        try:
            logger.info(f"为脚本 {script_data['id']} 生成语音")

            language = script_data.get('language', 'mandarin')
            audio_id = f"audio_{script_data['id']}"

            # 创建保存目录
            audio_dir = os.path.join(AUDIO_DIR, audio_id)
            os.makedirs(audio_dir, exist_ok=True)

            # 处理每个段落
            segment_files = []
            full_audio_data = b''
            total_duration = 0

            for i, segment in enumerate(script_data['segments']):
                content = segment['content']

                # 创建语音合成请求
                tts_request = SpeechSynthesisRequest(
                    text=content,
                    dialect=language,
                    voice_id=voice_id,
                    speed=1.0
                )

                # 合成语音
                result = await self.speech_manager.process_text_to_speech(
                    tts_request.text,
                    tts_request.dialect,
                    tts_request.voice_id,
                    tts_request.speed
                )

                if not result.audio_base64:
                    logger.warning(f"段落 {i} 语音合成失败，使用空音频")
                    continue

                # 解码音频
                audio_data = base64.b64decode(result.audio_base64)

                # 保存段落音频
                segment_filename = f"segment_{i:03d}.wav"
                segment_path = os.path.join(audio_dir, segment_filename)

                with open(segment_path, "wb") as f:
                    f.write(audio_data)

                # 添加到段落列表
                segment_duration = result.duration or 0
                segment_files.append({
                    "index": i,
                    "path": segment_path,
                    "filename": segment_filename,
                    "duration": segment_duration,
                    "start_time": total_duration
                })

                total_duration += segment_duration
                full_audio_data += audio_data

            # 保存完整音频
            full_audio_path = os.path.join(audio_dir, "full_audio.wav")
            with open(full_audio_path, "wb") as f:
                f.write(full_audio_data)

            # 准备音频元数据
            audio_data = {
                "id": audio_id,
                "script_id": script_data['id'],
                "language": language,
                "voice_id": voice_id,
                "duration": total_duration,
                "full_audio_path": full_audio_path,
                "segments": segment_files,
                "created_at": time.strftime("%Y-%m-%d %H:%M:%S")
            }

            # 保存元数据
            metadata_path = os.path.join(audio_dir, "metadata.json")
            with open(metadata_path, 'w', encoding='utf-8') as f:
                json.dump(audio_data, f, ensure_ascii=False, indent=2)

            logger.info(f"语音合成成功，共 {len(segment_files)} 个段落，总时长 {total_duration:.2f} 秒")
            return audio_data

        except Exception as e:
            logger.error(f"语音合成失败: {str(e)}")
            raise Exception(f"语音合成失败: {str(e)}")


# 角色动画类
class CharacterAnimator:
    def __init__(self, api_key: str = LIVEPORTRAIT_API_KEY):
        self.api_key = api_key

    async def animate_character(self, character_data: Dict[str, Any], audio_data: Dict[str, Any]) -> Dict[str, Any]:
        """使用LivePortrait技术为角色添加口型动画"""
        try:
            logger.info(f"为角色 {character_data['id']} 生成口型动画")

            character_style = character_data.get('style', 'anime')
            animation_id = f"animation_{character_data['id']}_{audio_data['id']}"

            # 创建保存目录
            animation_dir = os.path.join(TEMP_DIR, animation_id)
            os.makedirs(animation_dir, exist_ok=True)

            # 获取角色图像
            character_image_path = os.path.join(IMAGES_DIR, f"{character_data['id']}.png")

            if not os.path.exists(character_image_path):
                logger.error(f"角色图像不存在: {character_image_path}")
                raise Exception("角色图像不存在")

            # 根据角色风格选择不同的动画方法
            if character_style == "anime" and self._has_mouth_variants(character_data['id']):
                # 使用简单口型变体动画 (低资源消耗)
                animations = await self._create_simple_animation(character_data['id'], audio_data, animation_dir)
            else:
                # 使用LivePortrait技术 (如果API密钥可用)
                if self.api_key and self.api_key != "your_liveportrait_api_key":
                    animations = await self._create_liveportrait_animation(character_image_path, audio_data,
                                                                           animation_dir)
                else:
                    # 降级为简单的角色展示 (无口型动画)
                    logger.warning("LivePortrait API密钥不可用，使用静态角色展示")
                    animations = await self._create_static_character_display(character_image_path, audio_data,
                                                                             animation_dir)

            # 准备动画元数据
            animation_data = {
                "id": animation_id,
                "character_id": character_data['id'],
                "audio_id": audio_data['id'],
                "style": character_style,
                "segments": animations,
                "created_at": time.strftime("%Y-%m-%d %H:%M:%S")
            }

            # 保存元数据
            metadata_path = os.path.join(animation_dir, "metadata.json")
            with open(metadata_path, 'w', encoding='utf-8') as f:
                json.dump(animation_data, f, ensure_ascii=False, indent=2)

            logger.info(f"角色动画生成成功，共 {len(animations)} 个片段")
            return animation_data

        except Exception as e:
            logger.error(f"角色动画生成失败: {str(e)}")
            raise Exception(f"角色动画生成失败: {str(e)}")

    def _has_mouth_variants(self, character_id: str) -> bool:
        """检查角色是否有口型变体"""
        variants = ["closed", "half_open", "open"]
        return all(os.path.exists(os.path.join(IMAGES_DIR, f"{character_id}_{v}.png")) for v in variants)

    async def _create_simple_animation(self, character_id: str, audio_data: Dict[str, Any], output_dir: str) -> List[
        Dict[str, Any]]:
        """使用简单口型变体创建动画序列"""
        animations = []

        # 加载口型变体
        variant_paths = {
            "closed": os.path.join(IMAGES_DIR, f"{character_id}_closed.png"),
            "half_open": os.path.join(IMAGES_DIR, f"{character_id}_half_open.png"),
            "open": os.path.join(IMAGES_DIR, f"{character_id}_open.png")
        }

        for i, segment in enumerate(audio_data['segments']):
            segment_path = segment['path']
            segment_duration = segment['duration']

            # 为每个音频段创建一个动画片段
            output_path = os.path.join(output_dir, f"segment_{i:03d}.mp4")

            # 分析音频能量，确定口型变化时机
            await self._create_simple_mouth_animation(
                variant_paths,
                segment_path,
                output_path,
                segment_duration
            )

            animations.append({
                "index": i,
                "path": output_path,
                "duration": segment_duration,
                "start_time": segment['start_time']
            })

        return animations

    async def _create_simple_mouth_animation(self, variant_paths: Dict[str, str], audio_path: str, output_path: str,
                                             duration: float):
        """根据音频能量创建简单的口型动画"""
        # 使用FFmpeg创建带有口型变化的视频
        frame_rate = 24
        total_frames = int(duration * frame_rate)

        # 创建临时帧目录
        frames_dir = tempfile.mkdtemp()

        try:
            # 分析音频能量
            audio_energy = self._analyze_audio_energy(audio_path, frame_rate, total_frames)

            # 生成每一帧
            for frame_idx in range(total_frames):
                energy = audio_energy[min(frame_idx, len(audio_energy) - 1)]

                # 根据能量选择口型
                if energy < 0.2:
                    variant = "closed"
                elif energy < 0.6:
                    variant = "half_open"
                else:
                    variant = "open"

                # 复制对应的口型图像到序列帧
                source_path = variant_paths[variant]
                target_path = os.path.join(frames_dir, f"frame_{frame_idx:06d}.png")
                shutil.copy(source_path, target_path)

            # 使用FFmpeg合成视频
            frames_pattern = os.path.join(frames_dir, "frame_%06d.png")
            cmd = [
                "ffmpeg", "-y",
                "-framerate", str(frame_rate),
                "-i", frames_pattern,
                "-i", audio_path,
                "-c:v", "libx264",
                "-preset", "medium",
                "-c:a", "aac",
                "-b:a", "192k",
                "-shortest",
                "-pix_fmt", "yuv420p",
                output_path
            ]

            subprocess.run(cmd, check=True)
            logger.info(f"简单口型动画生成成功: {output_path}")

        except Exception as e:
            logger.error(f"生成简单口型动画失败: {str(e)}")
            raise
        finally:
            # 清理临时目录
            shutil.rmtree(frames_dir, ignore_errors=True)

    def _analyze_audio_energy(self, audio_path: str, frame_rate: int, num_frames: int) -> List[float]:
        """分析音频能量，用于口型同步"""
        try:
            import librosa

            # 加载音频
            y, sr = librosa.load(audio_path, sr=None)

            # 计算音频能量包络
            energy = librosa.feature.rms(y=y)[0]

            # 重采样到视频帧率
            frames_per_energy = len(energy) / num_frames
            energy_resampled = []

            for i in range(num_frames):
                start_idx = int(i * frames_per_energy)
                end_idx = int((i + 1) * frames_per_energy)
                if end_idx > len(energy):
                    end_idx = len(energy)
                if start_idx >= len(energy):
                    energy_resampled.append(0.0)
                else:
                    energy_slice = energy[start_idx:end_idx]
                    if len(energy_slice) > 0:
                        energy_resampled.append(float(np.mean(energy_slice)))
                    else:
                        energy_resampled.append(0.0)

            # 归一化
            max_energy = max(energy_resampled) if energy_resampled else 1.0
            normalized_energy = [e / max_energy for e in energy_resampled]

            return normalized_energy

        except ImportError:
            logger.warning("librosa库未安装，使用随机口型动画")
            # 生成随机能量序列作为备选
            return [random.random() for _ in range(num_frames)]

    async def _create_liveportrait_animation(self, character_image_path: str, audio_data: Dict[str, Any],
                                             output_dir: str) -> List[Dict[str, Any]]:
        """使用LivePortrait API创建高质量口型动画"""
        animations = []

        for i, segment in enumerate(audio_data['segments']):
            segment_path = segment['path']
            segment_duration = segment['duration']

            # 为每个音频段创建一个动画片段
            output_path = os.path.join(output_dir, f"segment_{i:03d}.mp4")

            # 准备API请求
            with open(character_image_path, "rb") as f:
                image_data = base64.b64encode(f.read()).decode('utf-8')

            with open(segment_path, "rb") as f:
                audio_data_b64 = base64.b64encode(f.read()).decode('utf-8')

            # 调用LivePortrait API
            api_url = "https://api.liveportrait.com/v1/animations"
            headers = {
                "Content-Type": "application/json",
                "Authorization": f"Bearer {self.api_key}"
            }

            payload = {
                "image": image_data,
                "audio": audio_data_b64,
                "output_format": "mp4",
                "sync_type": "lip_sync",
                "emotion": "neutral"
            }

            try:
                response = requests.post(api_url, headers=headers, json=payload)
                response.raise_for_status()

                # 处理响应
                result = response.json()
                animation_url = result.get("animation_url")

                if animation_url:
                    # 下载动画
                    animation_response = requests.get(animation_url)
                    animation_response.raise_for_status()

                    with open(output_path, "wb") as f:
                        f.write(animation_response.content)

                    animations.append({
                        "index": i,
                        "path": output_path,
                        "duration": segment_duration,
                        "start_time": segment['start_time']
                    })

                    logger.info(f"LivePortrait动画片段 {i} 生成成功")
                else:
                    logger.error(f"LivePortrait API未返回动画URL: {result}")

                    # 降级为静态展示
                    await self._create_static_segment(character_image_path, segment_path, output_path)
                    animations.append({
                        "index": i,
                        "path": output_path,
                        "duration": segment_duration,
                        "start_time": segment['start_time']
                    })

            except Exception as e:
                logger.error(f"LivePortrait API调用失败: {str(e)}")

                # 降级为静态展示
                await self._create_static_segment(character_image_path, segment_path, output_path)
                animations.append({
                    "index": i,
                    "path": output_path,
                    "duration": segment_duration,
                    "start_time": segment['start_time']
                })

        return animations

    async def _create_static_character_display(self, character_image_path: str, audio_data: Dict[str, Any],
                                               output_dir: str) -> List[Dict[str, Any]]:
        """创建简单的静态角色展示 (无口型动画)"""
        animations = []

        for i, segment in enumerate(audio_data['segments']):
            segment_path = segment['path']
            segment_duration = segment['duration']

            # 为每个音频段创建一个静态视频片段
            output_path = os.path.join(output_dir, f"segment_{i:03d}.mp4")

            await self._create_static_segment(character_image_path, segment_path, output_path)

            animations.append({
                "index": i,
                "path": output_path,
                "duration": segment_duration,
                "start_time": segment['start_time']
            })

        return animations

    async def _create_static_segment(self, image_path: str, audio_path: str, output_path: str):
        """创建静态图像与音频结合的视频片段"""
        try:
            # 使用FFmpeg创建静态图像视频
            cmd = [
                "ffmpeg", "-y",
                "-loop", "1",
                "-i", image_path,
                "-i", audio_path,
                "-c:v", "libx264",
                "-tune", "stillimage",
                "-c:a", "aac",
                "-b:a", "192k",
                "-pix_fmt", "yuv420p",
                "-shortest",
                output_path
            ]

            subprocess.run(cmd, check=True)
            logger.info(f"静态角色视频片段生成成功: {output_path}")

        except Exception as e:
            logger.error(f"生成静态角色视频片段失败: {str(e)}")
            raise


# 视频合成类
class VideoSynthesizer:
    def __init__(self):
        pass

    async def create_video(self, script_data: Dict[str, Any], animation_data: Dict[str, Any],
                           audio_data: Dict[str, Any], request: VideoRequest) -> Dict[str, Any]:
        """合成最终教学视频"""
        try:
            logger.info(f"合成最终教学视频")

            video_id = f"video_{int(time.time())}_{uuid.uuid4().hex[:8]}"

            # 解析分辨率
            resolution_map = {
                "480p": (854, 480),
                "720p": (1280, 720),
                "1080p": (1920, 1080)
            }
            width, height = resolution_map.get(request.resolution, (1280, 720))

            # 创建输出目录
            video_dir = os.path.join(OUTPUT_DIR, video_id)
            os.makedirs(video_dir, exist_ok=True)

            # 输出视频路径
            output_path = os.path.join(video_dir, f"{video_id}.mp4")

            # 合并所有片段
            await self._merge_segments(animation_data['segments'], audio_data['full_audio_path'], output_path, width,
                                       height)

            # 生成缩略图
            thumbnail_path = os.path.join(video_dir, f"{video_id}_thumbnail.jpg")
            await self._create_thumbnail(output_path, thumbnail_path)

            # 获取视频信息
            duration = self._get_video_duration(output_path)

            # 准备视频元数据
            video_data = {
                "id": video_id,
                "script_id": script_data['id'],
                "animation_id": animation_data['id'],
                "audio_id": audio_data['id'],
                "resolution": request.resolution,
                "width": width,
                "height": height,
                "duration": duration,
                "video_url": f"/static/videos/{video_id}/{video_id}.mp4",
                "thumbnail_url": f"/static/videos/{video_id}/{video_id}_thumbnail.jpg",
                "created_at": time.strftime("%Y-%m-%d %H:%M:%S")
            }

            # 保存元数据
            metadata_path = os.path.join(video_dir, "metadata.json")
            with open(metadata_path, 'w', encoding='utf-8') as f:
                json.dump(video_data, f, ensure_ascii=False, indent=2)

            logger.info(f"视频合成成功: {output_path}")
            return video_data

        except Exception as e:
            logger.error(f"视频合成失败: {str(e)}")
            raise Exception(f"视频合成失败: {str(e)}")

    async def _merge_segments(self, animation_segments: List[Dict[str, Any]], full_audio_path: str, output_path: str,
                              width: int, height: int):
        """合并所有动画片段为一个完整视频"""
        # 创建临时文件列表
        temp_list_file = tempfile.NamedTemporaryFile(delete=False, suffix='.txt', mode='w')

        try:
            # 将所有片段写入列表文件
            for segment in animation_segments:
                temp_list_file.write(f"file '{segment['path']}'\n")

            temp_list_file.close()

            # 使用FFmpeg合并视频
            cmd = [
                "ffmpeg", "-y",
                "-f", "concat",
                "-safe", "0",
                "-i", temp_list_file.name,
                "-i", full_audio_path,  # 使用完整音频，确保无缝连接
                "-map", "0:v",
                "-map", "1:a",
                "-c:v", "libx264",
                "-c:a", "aac",
                "-b:a", "192k",
                "-vf", f"scale={width}:{height}",
                "-shortest",
                "-pix_fmt", "yuv420p",
                output_path
            ]

            subprocess.run(cmd, check=True)
            logger.info(f"视频片段合并成功: {output_path}")

        except Exception as e:
            logger.error(f"视频片段合并失败: {str(e)}")
            raise
        finally:
            # 清理临时文件
            try:
                os.unlink(temp_list_file.name)
            except:
                pass

    async def _create_thumbnail(self, video_path: str, thumbnail_path: str):
        """从视频中提取缩略图"""
        try:
            # 使用FFmpeg获取视频中间帧作为缩略图
            cmd = [
                "ffmpeg", "-y",
                "-i", video_path,
                "-ss", "00:00:03",  # 获取3秒处的帧
                "-frames:v", "1",
                "-q:v", "2",
                thumbnail_path
            ]

            subprocess.run(cmd, check=True)
            logger.info(f"缩略图生成成功: {thumbnail_path}")

        except Exception as e:
            logger.error(f"缩略图生成失败: {str(e)}")
            # 缩略图生成失败不阻止整体流程

    def _get_video_duration(self, video_path: str) -> float:
        """获取视频时长"""
        try:
            cmd = [
                "ffprobe",
                "-v", "error",
                "-show_entries", "format=duration",
                "-of", "default=noprint_wrappers=1:nokey=1",
                video_path
            ]

            result = subprocess.run(cmd, capture_output=True, text=True, check=True)
            duration = float(result.stdout.strip())

            return duration

        except Exception as e:
            logger.error(f"获取视频时长失败: {str(e)}")
            return 0.0


# 文生视频主类
class TextToVideoGenerator:
    def __init__(self, rag_system):
        self.script_generator = ScriptGenerator()
        self.character_generator = CharacterGenerator()
        self.voice_synthesizer = VoiceSynthesizer()
        self.character_animator = CharacterAnimator()
        self.video_synthesizer = VideoSynthesizer()
        self.rag_system = rag_system

    async def generate_video(self, request: TextToVideoRequest) -> Dict[str, Any]:
        """生成完整的文生视频流程"""
        try:
            logger.info(f"开始为主题 '{request.topic}' 生成视频")

            # 1. 生成文案
            script_request = ScriptRequest(
                topic=request.topic,
                style=request.style,
                duration=request.duration,
                language=request.language
            )
            script_data = self.script_generator.generate_script(script_request, self.rag_system)
            logger.info(f"文案生成完成: {script_data['id']}")

            # 2. 生成虚拟花艺师
            character_request = CharacterRequest(
                style=request.character_style,
                gender="female",  # 默认女性角色
                outfit="professional",
                background="studio"
            )
            character_data = self.character_generator.generate_character(character_request)
            logger.info(f"角色生成完成: {character_data['id']}")

            # 3. 生成语音
            audio_data = await self.voice_synthesizer.synthesize_voice(script_data, request.voice_id)
            logger.info(f"语音合成完成: {audio_data['id']}")

            # 4. 生成角色动画
            animation_data = await self.character_animator.animate_character(character_data, audio_data)
            logger.info(f"角色动画生成完成: {animation_data['id']}")

            # 5. 合成最终视频
            video_request = VideoRequest(
                script_id=script_data['id'],
                character_id=character_data['id'],
                voice_id=request.voice_id,
                resolution="720p"
            )
            video_data = await self.video_synthesizer.create_video(script_data, animation_data, audio_data,
                                                                   video_request)
            logger.info(f"视频合成完成: {video_data['id']}")

            # 准备返回结果
            result = {
                "video_id": video_data['id'],
                "video_url": video_data['video_url'],
                "thumbnail_url": video_data['thumbnail_url'],
                "duration": video_data['duration'],
                "topic": request.topic,
                "script_id": script_data['id'],
                "character_id": character_data['id'],
                "audio_id": audio_data['id'],
                "creation_time": video_data['created_at']
            }

            return result

        except Exception as e:
            logger.error(f"视频生成失败: {str(e)}")
            raise Exception(f"视频生成失败: {str(e)}")


