#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
唇形同步视频生成器
根据文案内容生成唇形同步视频，包括音频和背景图片
"""

import sys
import os
from typing import Dict, Any

from dotenv import load_dotenv
# 添加项目根目录到 Python 路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))

import json
import uuid
from datetime import datetime
from pathlib import Path
import dashscope
import requests
from http import HTTPStatus
from tools.audio_combine import merge_conversation_audio_timeline
from .voice_manager import voice_manager


class LipSyncGenerator:
    """
    唇形同步视频生成器类
    根据文案生成对应的语音文件，支持单人说话和两人对话场景
    """
    
    def __init__(self):
        load_dotenv()

        # 从环境变量获取基础目录
        self.base_lip_sync_dir = os.getenv("BASE_LIP_SYNC_DIR", "lip_sync_output")
        
        # 创建带日期的目录结构
        today = datetime.now().strftime("%Y-%m-%d")
        self.output_dir = Path(self.base_lip_sync_dir) / today
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # ElevenLabs API配置
        self.api_key = os.getenv("ELEVENLABS_API_KEY")
        
        # 使用VoiceManager管理声音
        self.voice_manager = voice_manager
        
        # 定义默认的声音ID
        self.voice_ids = {
            "unique": "N2lVS1w4EtoT3dr4eOWO",  # 默认的单人声音ID
            "left": "pqHfZKP75CvOlQylNhV4",   # 默认的左侧声音ID
            "right": "Xb7hH8MSUJpSbSDYk0k2"  # 默认的右侧声音ID
        }
        # DashScope API配置
        self.dashscope_api_key = os.getenv("DASHSCOPE_API_KEY")
        print(f"DashScope API Key: {self.dashscope_api_key}")
        if self.dashscope_api_key:
            dashscope.api_key = self.dashscope_api_key
    
    def generate_uuid_folder(self):
        """
        生成UUID命名的文件夹
        
        Returns:
            Path: UUID命名的文件夹路径
        """
        folder_name = str(uuid.uuid4())
        folder_path = self.output_dir / folder_name
        folder_path.mkdir(exist_ok=True)
        return folder_path
    
    def save_copywriting(self, folder_path, copywriting):
        """
        保存文案到文件
        
        Args:
            folder_path (Path): 目标文件夹路径
            copywriting (str): 文案内容
            
        Returns:
            Path: 文案文件路径
        """
        copywriting_file = folder_path / "copywriting.txt"
        with open(copywriting_file, "w", encoding="utf-8") as f:
            f.write(copywriting)
        return copywriting_file
    
    def add_audio(self, voice_id, text, output_path):
        """
        调用ElevenLabs API生成语音
        
        Args:
            voice_id (str): 声音ID
            text (str): 要转换为语音的文本
            output_path (Path): 音频输出路径
            
        Returns:
            bool: 是否成功生成
        """
        # 获取声音描述信息
        voice_info = self.voice_manager.get_voice(voice_id)
        voice_name = self.voice_manager.get_voice_name(voice_id)
        
        print(f"使用声音: {voice_name} ({voice_id})")
        if voice_info and voice_info.get("description"):
            print(f"声音描述: {voice_info['description']}")
        
        url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}?output_format=mp3_44100_128"
        
        headers = {
            "Content-Type": "application/json",
            "xi-api-key": self.api_key,
        }
        
        data = {
            "text": text,
            "model_id": "eleven_multilingual_v2",
        }
        
        try:
            response = requests.post(url, headers=headers, json=data)
            if response.status_code == 200:
                with open(output_path, "wb") as f:
                    f.write(response.content)
                return True
            else:
                print(f"请求失败: {response.status_code} - {response.text}")
                return False
        except Exception as e:
            print(f"生成音频时出错: {e}")
            return False
    
    def generate_image_prompt(self, dialogue_json):
        """
        根据对话内容生成图片提示词（使用固定提示词）
        
        Args:
            dialogue_json (str): 包含对话内容和声音信息的JSON字符串
            
        Returns:
            tuple: (成功状态, 提示词或错误信息)
        """
        try:
            # 解析JSON
            dialogue_data = json.loads(dialogue_json)
            
            # 提取说话人类型
            speakers_info = []
            for turn in dialogue_data.get("dialogue", []):
                speaker_info = {
                    "speaker": turn.get("speaker", "unknown"),
                }
                speakers_info.append(speaker_info)
            
            # 统计说话人类型
            speaker_types = [speaker.get('speaker', 'unknown') for speaker in speakers_info]
            unique_speakers = set(speaker_types)
            
            # 如果是单人说话场景
            if len(unique_speakers) == 1 and 'unique' in unique_speakers:
                # 固定提示词：50岁中年男人+足球强相关背景
                prompt_text = """
A hyper-realistic portrait of a 50-year-old European male with a strong European football background. Rugged facial features, light wrinkles, short graying hair, and a confident coach-like expression. Wearing a dark football coach jacket with subtle European club insignia.
Set in a football stadium with blurred floodlights, green pitch, and goalposts in the background. Natural stadium lighting creates cinematic shadows and realistic skin texture.
Captured with a Canon EOS R5, 85mm f/1.4 lens — sharp focus on eyes, shallow depth of field, soft bokeh.
Ultra-detailed textures on skin and fabric, HDR lighting, professional sports photography style, cinematic realism, storytelling portrait tied to European football culture.
"""
            else:
                # 固定提示词：26岁欧洲男和26岁非洲女+足球强相关背景
                prompt_text = """
A hyper-realistic portrait of a 26-year-old European male and a 26-year-old African female, both with strong European football backgrounds, engaged in conversation on the football pitch. They stand naturally facing each other, expressive and animated, showing friendly interaction and teamwork.
Wearing modern European football club training kits or athletic jackets with subtle club logos. A football rests naturally nearby on the green pitch.
Background depicts a European football stadium — lush green pitch, goalposts, stadium seating, and soft floodlights with shallow depth of field, cinematic and realistic.
Shot with professional studio-style lighting enhanced by natural cinematic light, emphasizing realistic skin tones, hair, and fabric textures.
Captured with a Canon EOS R5, 85mm f/1.4 lens — ultra-sharp focus on their faces, soft bokeh background.
Style: photorealistic, cinematic sports portrait, storytelling focused on European football culture, showing dialogue and teamwork, without props or training equipment.
"""
            
            print(f"使用固定图片提示词: {prompt_text}")
            return True, prompt_text
                
        except Exception as e:
            error_msg = f"生成图片提示词时出错: {str(e)}"
            print(error_msg)
            return False, error_msg
    
    def generate_image_from_prompt(self, prompt_text, output_path):
        """
        根据提示词生成图片
        
        Args:
            prompt_text (str): 图片生成提示词
            output_path (str): 输出图片路径
            
        Returns:
            tuple: (成功状态, 结果信息)
        """
        try:
            print(f"正在生成图片...")
            print(f"提示词: {prompt_text}")
            
            # 限制提示词长度
            if len(prompt_text) > 500:
                prompt_text = prompt_text[:500]
            
            # 增强提示词以强调写实风格和细节
            enhanced_prompt = prompt_text + ", photorealistic, high detail, professional photography, sharp focus, realistic textures, detailed facial features, natural lighting"
            
            # 使用DashScope文生图API，使用允许的尺寸和wanx-v1模型
            response = dashscope.ImageSynthesis.call(
                model="qwen-image-plus",  # 使用正确的模型名称
                prompt=enhanced_prompt,
                n=1,
                size='1664*928'  # 使用允许的尺寸
            )
            
            # 打印完整响应以调试
            print(f"图片生成API完整响应: {response}")
            
            # 检查响应是否成功
            if response.status_code != HTTPStatus.OK:
                error_msg = f"图片生成失败，状态码: {response.status_code}, 错误信息: {response.message}"
                print(error_msg)
                return False, error_msg
            
            # 检查响应内容是否有效
            if not response.output:
                error_msg = "图片生成失败，响应中没有output字段"
                print(error_msg)
                return False, error_msg
                
            if not response.output.results:
                error_msg = "图片生成失败，响应中没有results字段"
                print(error_msg)
                return False, error_msg
                
            if len(response.output.results) == 0:
                error_msg = "图片生成失败，响应中results数组为空"
                print(error_msg)
                return False, error_msg
                
            # 获取生成的图片URL
            image_url = response.output.results[0].url
            print(f"图片生成成功，URL: {image_url}")
            
            # 下载图片并保存
            image_response = requests.get(image_url)
            if image_response.status_code == 200:
                with open(output_path, "wb") as f:
                    f.write(image_response.content)
                print(f"图片已保存到: {output_path}")
                return True, str(output_path)
            else:
                error_msg = f"下载图片失败，状态码: {image_response.status_code}"
                print(error_msg)
                return False, error_msg
                
        except Exception as e:
            error_msg = f"生成图片时出错: {str(e)}"
            print(error_msg)
            return False, error_msg
    
    def _extract_topic_keywords(self, text_content):
        """
        从对话内容中提取主题关键词
        
        Args:
            text_content (str): 对话内容
            
        Returns:
            str: 提取的关键词，用逗号分隔
        """
        # 定义关键词映射
        keyword_mapping = {
            # 足球相关关键词
            '足球': 'football',
            '足球赛': 'football match',
            '比赛': 'match',
            '球队': 'team',
            '球员': 'player',
            '进球': 'goal',
            '射门': 'shot',
            '曼联': 'Manchester United',
            '利物浦': 'Liverpool',
            '冠军': 'champion',
            '联赛': 'league',
            
            # 体育相关关键词
            '体育': 'sports',
            '运动员': 'athlete',
            '训练': 'training',
            '教练': 'coach',
            
            # 新闻分析类关键词
            '分析': 'analysis',
            '节目': 'show',
            '报道': 'report',
            '新闻': 'news',
            '主持人': 'host',
            '演播室': 'studio',
            
            # 通用关键词
            '欢迎': 'welcome',
            '今天': 'today',
            '我们': 'we',
            
            # 增加更多细节描述关键词
            '专业': 'professional',
            '高清': 'high definition',
            '现场': 'live',
            '访谈': 'interview',
            '讨论': 'discussion',
            '评论': 'commentary'
        }
        
        # 查找匹配的关键词
        found_keywords = []
        for chinese_keyword, english_keyword in keyword_mapping.items():
            if chinese_keyword in text_content and english_keyword not in found_keywords:
                found_keywords.append(english_keyword)
        
        # 返回前5个关键词，提供更多细节描述
        return ', '.join(found_keywords[:5])
    
    def process_single_speaker(self, copywriting, background_image_path=None, voice_id=None):
        """
        处理单人说话场景
        
        Args:
            copywriting (str): 文案内容
            background_image_path (str, optional): 背景图片路径
            voice_id (str, optional): 声音ID
            
        Returns:
            dict: 处理结果信息
        """
        # 创建UUID文件夹
        folder_path = self.generate_uuid_folder()
        
        # 保存文案
        self.save_copywriting(folder_path, copywriting)
        
        # 获取声音信息
        if not voice_id:
            voice_id = self.voice_ids.get("unique", "N2lVS1w4EtoT3dr4eOWO")
        voice_info = self.voice_manager.get_voice(voice_id)
        
        # 复制背景图片（如果提供）或生成新图片
        background_dest = folder_path / "background.png"
        if background_image_path and os.path.exists(background_image_path):
            with open(background_image_path, "rb") as src, open(background_dest, "wb") as dst:
                dst.write(src.read())
        else:
            # 生成图片提示词，包含声音信息
            success, result = self.generate_image_prompt(copywriting, voice_info)
            if success:
                # 生成图片
                image_success, image_result = self.generate_image_from_prompt(result, str(background_dest))
                if not image_success:
                    print(f"图片生成失败: {image_result}")
            else:
                print(f"图片提示词生成失败: {result}")
        
        # 生成语音文件
        audio_path = folder_path / "unique.mp3"
        success = self.add_audio(voice_id, copywriting, audio_path)
        
        return {
            "success": success,
            "folder_path": str(folder_path),
            "audio_file": str(audio_path) if success else None
        }
    
    def process_conversation(self, left_text, right_text, copywriting, background_image_path=None, left_voice_id=None, right_voice_id=None):
        """
        处理两人对话场景
        
        Args:
            left_text (str): 左侧说话人文本
            right_text (str): 右侧说话人文本
            copywriting (str): 完整文案内容
            background_image_path (str, optional): 背景图片路径
            left_voice_id (str, optional): 左侧说话人声音ID
            right_voice_id (str, optional): 右侧说话人声音ID
            
        Returns:
            dict: 处理结果信息
        """
        # 创建UUID文件夹
        folder_path = self.generate_uuid_folder()
        
        # 保存文案
        self.save_copywriting(folder_path, copywriting)
        
        # 获取声音信息
        if not left_voice_id:
            left_voice_id = self.voice_ids.get("left", "pqHfZKP75CvOlQylNhV4")
        if not right_voice_id:
            right_voice_id = self.voice_ids.get("right", "Xb7hH8MSUJpSbSDYk0k2")
            
        left_voice_info = self.voice_manager.get_voice(left_voice_id)
        right_voice_info = self.voice_manager.get_voice(right_voice_id)
        
        # 复制背景图片（如果提供）或生成新图片
        background_dest = folder_path / "background.png"
        if background_image_path and os.path.exists(background_image_path):
            with open(background_image_path, "rb") as src, open(background_dest, "wb") as dst:
                dst.write(src.read())
        else:
            # 生成图片提示词，包含声音信息
            # 合并两个声音的信息
            combined_voice_info = {
                "name": f"{left_voice_info.get('name', '左说话人')} 和 {right_voice_info.get('name', '右说话人')}" if left_voice_info and right_voice_info else "对话场景",
                "description": f"左说话人: {left_voice_info.get('description', '无描述')}; 右说话人: {right_voice_info.get('description', '无描述')}" if left_voice_info and right_voice_info else "对话场景",
                "gender": f"{left_voice_info.get('gender', '未知')} 和 {right_voice_info.get('gender', '未知')}" if left_voice_info and right_voice_info else "未知",
                "language": left_voice_info.get('language', '多语言') if left_voice_info else '多语言'
            }
            
            success, result = self.generate_image_prompt(copywriting, combined_voice_info)
            if success:
                # 生成图片
                image_success, image_result = self.generate_image_from_prompt(result, str(background_dest))
                if not image_success:
                    print(f"图片生成失败: {image_result}")
            else:
                print(f"图片提示词生成失败: {result}")
        
        # 生成左侧说话人语音
        left_audio_path = folder_path / "left.mp3"
        left_success = self.add_audio(left_voice_id, left_text, left_audio_path)
        
        # 生成右侧说话人语音
        right_audio_path = folder_path / "right.mp3"
        right_success = self.add_audio(right_voice_id, right_text, right_audio_path)
        
        # 合并左右音频为一个对话时间线音频
        combined_audio_path = folder_path / "conversation.mp3"
        combined_success = False
        if left_success and right_success:
            try:
                merge_conversation_audio_timeline(str(left_audio_path), str(right_audio_path), str(combined_audio_path))
                combined_success = True
            except Exception as e:
                print(f"合并对话音频时出错: {e}")
                combined_success = False
        
        return {
            "success": left_success and right_success,
            "folder_path": str(folder_path),
            "left_audio_file": str(left_audio_path) if left_success else None,
            "right_audio_file": str(right_audio_path) if right_success else None,
            "combined_audio_file": str(combined_audio_path) if combined_success else None
        }
    
    def process_json_input(self, json_input):
        """
        处理JSON格式输入，支持多轮对话情景
        
        JSON格式示例:
        {
            "background_image": "path/to/background.png",  // 可选
            "dialogue": [
                {
                    "speaker": "left" | "right" | "unique",
                    "voice_id": "自定义声音ID（可选，如果提供则使用此声音ID）",
                    "text": "说话内容"
                },
                ...
            ]
        }
        
        对于多轮对话:
        - 如果只有一个说话人，生成unique.mp3
        - 如果有两个说话人，按顺序生成left.mp3和right.mp3
        
        Args:
            json_input (dict or str): JSON格式的输入数据，可以是字典或JSON字符串
            
        Returns:
            dict: 处理结果信息
        """
        # 如果输入是字符串，则解析为JSON
        if isinstance(json_input, str):
            try:
                data = json.loads(json_input)
            except json.JSONDecodeError as e:
                return {
                    "success": False,
                    "error": f"JSON解析失败: {str(e)}"
                }
        else:
            data = json_input
        
        # 检查必需字段
        if "dialogue" not in data:
            return {
                "success": False,
                "error": "缺少必需的'dialogue'字段"
            }
        
        dialogue = data.get("dialogue", [])
        background_image = data.get("background_image")
        
        # 检查对话内容
        if not dialogue:
            return {
                "success": False,
                "error": "对话内容不能为空"
            }
        
        # 判断是单人说话还是两人对话
        speakers = set(turn["speaker"] for turn in dialogue if "speaker" in turn)
        
        # 单人说话场景
        if speakers == {"unique"} or len(speakers) == 1 and "unique" in speakers:
            # 合并所有文本
            text = "\n".join(turn["text"] for turn in dialogue if "text" in turn)
            
            # 检查是否有自定义voice_id
            voice_id = None
            for turn in dialogue:
                if "voice_id" in turn and turn["voice_id"]:
                    voice_id = turn["voice_id"]
                    break
            
            # 如果没有自定义voice_id，使用默认的
            if not voice_id:
                voice_id = self.voice_ids.get("unique", "N2lVS1w4EtoT3dr4eOWO")
            
            # 创建UUID文件夹
            folder_path = self.generate_uuid_folder()
            
            # 保存文案
            self.save_copywriting(folder_path, text)
            
            # 复制背景图片（如果提供）或生成新图片
            background_dest = folder_path / "background.png"
            if background_image and os.path.exists(background_image):
                with open(background_image, "rb") as src, open(background_dest, "wb") as dst:
                    dst.write(src.read())
            else:
                # 获取声音信息
                voice_info = self.voice_manager.get_voice(voice_id)
                
                # 生成图片提示词，传递整个dialogue JSON，并添加声音描述信息
                # 创建包含声音信息的对话数据副本
                data_with_voice_info = data.copy()
                dialogue_with_voice = []
                
                # 为每个对话项添加声音信息
                for turn in data.get("dialogue", []):
                    turn_with_voice = turn.copy()
                    if "voice_id" in turn:
                        voice_info = self.voice_manager.get_voice(turn["voice_id"])
                        if voice_info:
                            turn_with_voice.update(voice_info)
                    elif "speaker" in turn:
                        # 使用默认声音ID获取声音信息
                        default_voice_id = self.voice_ids.get(turn["speaker"])
                        if default_voice_id:
                            voice_info = self.voice_manager.get_voice(default_voice_id)
                            if voice_info:
                                turn_with_voice.update(voice_info)
                    dialogue_with_voice.append(turn_with_voice)
                
                # 更新对话数据
                data_with_voice_info["dialogue"] = dialogue_with_voice
                dialogue_json = json.dumps(data_with_voice_info)
                success, result = self.generate_image_prompt(dialogue_json)
                if success:
                    # 生成图片
                    image_success, image_result = self.generate_image_from_prompt(result, str(background_dest))
                    if not image_success:
                        print(f"图片生成失败: {image_result}")
                else:
                    print(f"图片提示词生成失败: {result}")
            
            # 生成语音文件
            audio_path = folder_path / "unique.mp3"
            success = self.add_audio(voice_id, text, audio_path)
            
            return {
                "success": success,
                "folder_path": str(folder_path),
                "audio_file": str(audio_path) if success else None
            }
        
        # 两人对话场景：必须同时包含 left 和 right 说话人
        elif speakers == {"left", "right"}:
            # 分别收集左右说话人的文本和voice_id
            left_texts = []
            right_texts = []
            left_voice_id = None
            right_voice_id = None
            
            for turn in dialogue:
                if turn.get("speaker") == "left" and "text" in turn:
                    left_texts.append(turn["text"])
                    if not left_voice_id and "voice_id" in turn and turn["voice_id"]:
                        left_voice_id = turn["voice_id"]
                elif turn.get("speaker") == "right" and "text" in turn:
                    right_texts.append(turn["text"])
                    if not right_voice_id and "voice_id" in turn and turn["voice_id"]:
                        right_voice_id = turn["voice_id"]
            
            # 如果没有自定义voice_id，使用默认的
            if not left_voice_id:
                left_voice_id = self.voice_ids.get("left", "pqHfZKP75CvOlQylNhV4")
            if not right_voice_id:
                right_voice_id = self.voice_ids.get("right", "Xb7hH8MSUJpSbSDYk0k2")
            
            # 获取声音信息
            left_voice_info = self.voice_manager.get_voice(left_voice_id)
            right_voice_info = self.voice_manager.get_voice(right_voice_id)
            
            # 完整文案（所有文本）
            full_copywriting = "\n".join(turn["text"] for turn in dialogue if "text" in turn)
            
            # 创建UUID文件夹
            folder_path = self.generate_uuid_folder()
            
            # 保存文案
            self.save_copywriting(folder_path, full_copywriting)
            
            # 为每个说话人生成单独的音频片段
            left_audio_paths = []
            right_audio_paths = []
            
            # 生成左侧说话人语音片段
            for i, text in enumerate(left_texts):
                if text.strip():  # 确保文本不为空
                    audio_path = folder_path / f"left_{i}.mp3"
                    self.add_audio(left_voice_id, text, audio_path)
                    left_audio_paths.append(str(audio_path))
            
            # 生成右侧说话人语音片段
            for i, text in enumerate(right_texts):
                if text.strip():  # 确保文本不为空
                    audio_path = folder_path / f"right_{i}.mp3"
                    self.add_audio(right_voice_id, text, audio_path)
                    right_audio_paths.append(str(audio_path))
            
            # 使用音频合并功能创建自然对话音频
            left_combined_path = folder_path / "left.mp3"
            right_combined_path = folder_path / "right.mp3"
            
            try:
                # 使用时间轴合并功能创建自然对话
                path1, path2 = merge_conversation_audio_timeline(
                    left_audio_paths, 
                    right_audio_paths, 
                    str(folder_path / "combined"),
                    pause_sec=0.6,
                    random_pause=True,
                    debug=True
                )
                
                # 重命名合并后的文件
                import os
                os.rename(path1, left_combined_path)
                os.rename(path2, right_combined_path)
                print(f"成功合并音频文件: {left_combined_path}, {right_combined_path}")
                
            except Exception as e:
                print(f"音频合并失败: {e}")
                # 如果合并失败，回退到简单合并
                # 合并文本
                left_text = "\n".join(left_texts)
                right_text = "\n".join(right_texts)
                
                # 生成左侧说话人语音
                left_success = self.add_audio(left_voice_id, left_text, left_combined_path)
                
                # 生成右侧说话人语音
                right_success = self.add_audio(right_voice_id, right_text, right_combined_path)
            
            # 复制背景图片（如果提供）或生成新图片
            background_dest = folder_path / "background.png"
            if background_image and os.path.exists(background_image):
                with open(background_image, "rb") as src, open(background_dest, "wb") as dst:
                    dst.write(src.read())
            else:
                # 生成图片提示词，传递整个dialogue JSON，并添加声音描述信息
                # 创建包含声音信息的对话数据副本
                data_with_voice_info = data.copy()
                dialogue_with_voice = []
                
                # 为每个对话项添加声音信息
                for turn in data.get("dialogue", []):
                    turn_with_voice = turn.copy()
                    if "voice_id" in turn:
                        voice_info = self.voice_manager.get_voice(turn["voice_id"])
                        if voice_info:
                            turn_with_voice.update(voice_info)
                    elif "speaker" in turn:
                        # 使用默认声音ID获取声音信息
                        default_voice_id = self.voice_ids.get(turn["speaker"])
                        if default_voice_id:
                            voice_info = self.voice_manager.get_voice(default_voice_id)
                            if voice_info:
                                turn_with_voice.update(voice_info)
                    dialogue_with_voice.append(turn_with_voice)
                
                # 更新对话数据
                data_with_voice_info["dialogue"] = dialogue_with_voice
                dialogue_json = json.dumps(data_with_voice_info)
                success, result = self.generate_image_prompt(dialogue_json)
                if success:
                    # 生成图片
                    image_success, image_result = self.generate_image_from_prompt(result, str(background_dest))
                    if not image_success:
                        print(f"图片生成失败: {image_result}")
                else:
                    print(f"图片提示词生成失败: {result}")
            
            return {
                "success": True,
                "folder_path": str(folder_path),
                "left_audio_file": str(left_combined_path),
                "right_audio_file": str(right_combined_path)
            }
        
        else:
            return {
                "success": False,
                "error": f"不支持的说话人组合: {speakers}"
            }
    
    def generate_lipsync_video_from_json(self, json_input: Dict[str, Any]) -> Dict[str, Any]:
        """
        从JSON输入生成对口型视频的完整流程
        包括: 生成音频和图片 -> 上传文件 -> 生成视频
        
        Args:
            json_input (dict): 包含对话和背景图片信息的JSON数据
            
        Returns:
            dict: 包含生成结果的字典
                - success (bool): 是否成功
                - video_url (str): 生成的视频URL（如果成功）
                - folder_path (str): 生成文件的文件夹路径
                - error (str): 错误信息（如果失败）
        """
        try:
            # 步骤1: 生成音频和图片文件
            print("步骤1: 生成音频和图片文件...")
            audio_image_result = self.process_json_input(json_input)
            
            if not audio_image_result["success"]:
                return {
                    "success": False,
                    "error": f"音频和图片生成失败: {audio_image_result.get('error', '未知错误')}"
                }
            
            folder_path = audio_image_result["folder_path"]
            print(f"✓ 音频和图片生成成功，文件保存在: {folder_path}")
            
            # 步骤2: 上传文件并生成视频
            print("步骤2: 上传文件并生成视频...")
            try:
                from .wavespeed_ai import WaveSpeedAI
                wavespeed_ai = WaveSpeedAI()
                
                # 生成对口型视频
                video_url = wavespeed_ai.generate_lipsync_video_from_folder(
                    folder_path=folder_path,
                    order="meanwhile",
                    resolution="480p"
                )
                
                if video_url:
                    print("✓ 视频生成成功!")
                    return {
                        "success": True,
                        "video_url": video_url,
                        "folder_path": folder_path
                    }
                else:
                    return {
                        "success": False,
                        "error": "视频生成失败",
                        "folder_path": folder_path
                    }
                    
            except Exception as e:
                return {
                    "success": False,
                    "error": f"视频生成过程出错: {str(e)}",
                    "folder_path": folder_path
                }
                
        except Exception as e:
            return {
                "success": False,
                "error": f"生成过程中出现错误: {str(e)}"
            }

if __name__ == "__main__":
    # 使用示例
    generator = LipSyncGenerator()
    
    # 单人说话场景示例
    # result = generator.process_single_speaker(
    #     copywriting="这里是需要转换为语音的文案内容",
    #     background_image_path="path/to/background.png"
    # )
    # print(result)
    
    # 两人对话场景示例
    # result = generator.process_conversation(
    #     left_text="左侧说话人的内容",
    #     right_text="右侧说话人的内容",
    #     copywriting="完整文案内容",
    #     background_image_path="path/to/background.png"
    # )
    # print(result)
    
    # JSON输入示例 - 单人说话
    # json_input_single = {
    #     "background_image": "path/to/background.png",
    #     "dialogue": [
    #         {"speaker": "unique", "text": "这是单人说话的内容"}
    #     ]
    # }
    # result = generator.process_json_input(json_input_single)
    # print(result)
    
    # JSON输入示例 - 两人对话
    # json_input_conversation = {
    #     "background_image": "path/to/background.png",
    #     "dialogue": [
    #         {"speaker": "left", "text": "左侧说话人的第一句话"},
    #         {"speaker": "right", "text": "右侧说话人的回复"},
    #         {"speaker": "left", "text": "左侧说话人的第二句话"}
    #     ]
    # }
    # result = generator.process_json_input(json_input_conversation)
    # print(result)
    pass