# -*- coding: utf-8 -*-
import torch
import os
import sys
import multiprocessing
import traceback
from pathlib import Path

# 动态导入检查
try:
    import torch_npu
except (ImportError, ModuleNotFoundError):
    print("torch_npu not found, proceeding without NPU support.")

from diffusers import AnimateDiffPipeline, MotionAdapter, AutoencoderKL
from diffusers.schedulers import EulerDiscreteScheduler
from diffusers.utils import export_to_video
from huggingface_hub import hf_hub_download
from TTS.api import TTS
import ffmpeg

# --- Configuration ---
# --- 模型和输出路径配置 ---
# 使用 Path 对象来处理路径，更健壮
root_dir = Path(__file__).parent.parent.parent # 项目根目录 stv/
output_dir = root_dir / "backend/video_generation/outputs"
os.makedirs(output_dir, exist_ok=True)

# --- 输入文本 ---
prompt = "A white-robed swordsman stands on a cliff edge, clouds churning behind him, his longsword gleaming with a cold light, chinese ink painting style, expressive, white space, a masterpiece"
narration_text = "He had been waiting here for three days and three nights."

# --- 在线模型ID ---
base_model_id = "runwayml/stable-diffusion-v1-5"
motion_module_id = "guoyww/animatediff-motion-adapter-v1-5-2"
vae_id = "stabilityai/sd-vae-ft-mse-original" 
# 注意：VAE将作为单个文件加载，其ID指向Hugging Face上的仓库，文件名在加载时指定

# --- 输出文件路径 ---
narration_path = output_dir / "narration.wav"
temp_video_path = output_dir / "temp_video.mp4"
final_video_path = output_dir / "final_video.mp4"

# --- Isolated Process Functions ---

def get_device():
    """确定计算设备"""
    if 'torch_npu' in sys.modules and torch.npu.is_available():
        return "npu"
    elif torch.cuda.is_available():
        return "cuda"
    else:
        return "cpu"

def generate_video_process(prompt_text, output_file):
    """独立的文生视频进程 (AnimateDiff)"""
    try:
        device = get_device()
        dtype = torch.float16 if device != "cpu" else torch.float32
        print(f"[AnimateDiff Process] Using device: {device}, dtype: {dtype}")

        # 1. 从在线ID加载运动模块
        adapter = MotionAdapter.from_pretrained(motion_module_id, torch_dtype=dtype)
        
        # 2. 从在线ID加载基础模型，并集成运动模块
        # VAE将由pipeline根据base_model_id的配置自动加载，或在之后替换
        pipe = AnimateDiffPipeline.from_pretrained(
            base_model_id,
            motion_adapter=adapter,
            torch_dtype=dtype
        )
        pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)

        # 3. (可选但推荐) 单独加载并替换为更高质量的VAE
        print(f"[AnimateDiff Process] Downloading and swapping VAE from {vae_id}")
        vae_file_path = hf_hub_download(
            repo_id=vae_id,
            filename="vae-ft-mse-840000-ema-pruned.safetensors",
            local_dir=os.path.join(output_dir, "models_cache"), # 指定一个缓存目录
            local_dir_use_symlinks=False
        )
        vae = AutoencoderKL.from_single_file(
            vae_file_path, torch_dtype=dtype
        )
        pipe.vae = vae

        # 显存优化：启用模型CPU卸载
        pipe.enable_model_cpu_offload()

        print("[AnimateDiff Process] Generating frames...")
        output = pipe(
            prompt=prompt_text,
            num_frames=16,
            guidance_scale=7.5,
            num_inference_steps=25,
        )
        frames = output.frames[0]
        
        export_to_video(frames, str(output_file), fps=8)
        print(f"✅ [AnimateDiff Process] Silent video saved to {output_file}")

    except Exception:
        print(f"❌ [AnimateDiff Process] Error:")
        traceback.print_exc()

def generate_audio_process(text, output_file):
    """独立的语音生成进程"""
    try:
        device = get_device()
        print(f"[TTS Process] Using device: {device}")
        tts_model_id = "tts_models/zh-CN/baker/tacotron2-DDC-GST"
        tts = TTS(model_name=tts_model_id, progress_bar=True).to(device)
        tts.tts_to_file(text=text, file_path=str(output_file))
        print(f"✅ [TTS Process] Narration audio saved to {output_file}")
    except Exception:
        print(f"❌ [TTS Process] Error:")
        traceback.print_exc()

# --- Main Orchestrator ---

def run_orchestrator():
    """通过独立的进程来编排流水线，以确保完全的显存释放"""
    
    # 启动两个子进程
    print("\n[Orchestrator] Spawning generation processes...")
    p_video = multiprocessing.Process(target=generate_video_process, args=(prompt, temp_video_path))
    p_audio = multiprocessing.Process(target=generate_audio_process, args=(narration_text, narration_path))
    
    p_video.start()
    p_audio.start()
    
    p_video.join()
    p_audio.join()

    if not temp_video_path.exists() or not narration_path.exists():
        print("❌ Orchestrator: One of the generation steps failed. Aborting.")
        return

    print("\n[Orchestrator] Assembling final video...")
    try:
        input_video = ffmpeg.input(str(temp_video_path))
        input_audio = ffmpeg.input(str(narration_path))
        ffmpeg.concat(input_video, input_audio, v=1, a=1).output(str(final_video_path), y='-y').run(overwrite_output=True, quiet=True)
        print(f"🎉 Success! Final video saved to {final_video_path}")
        os.remove(temp_video_path)
        os.remove(narration_path)
    except Exception as e:
        print(f"❌ Orchestrator: Error in final video assembly: {e}")

if __name__ == "__main__":
    multiprocessing.set_start_method('spawn', force=True)
    run_orchestrator() 