import torch
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inversion import StableAudioInversionPipeline
from diffusers import AutoencoderKL, UNet2DConditionModel
from diffusers.schedulers.scheduling_cosine_dpmsolver_multistep_reverse import CosineDPMSolverMultistepScheduler  # 你自己的 scheduler

# === 加载 StableAudio 模型 ===
model_path = "E:\code\deepLearning\stable-audio\model\stable-audio-open-1.0" # 本地
pipeline = StableAudioInversionPipeline.from_pretrained(
    model_path,
    vae=AutoencoderKL.from_pretrained(model_path, subfolder="vae"),
    unet=UNet2DConditionModel.from_pretrained(model_path, subfolder="unet"),
    scheduler=CosineDPMSolverMultistepScheduler.from_pretrained(model_path, subfolder="scheduler"),
    torch_dtype=torch.float16,
).to("cuda")

# === 准备 audio ===
audio_tensor = torch.load("A relaxing piano melody in a calm environment.wav.pt").to("cuda")  # shape: (1, channels, samples)

# === inversion & reconstruction ===
result = pipeline.invert(audio_tensor, num_inversion_steps=50)

reconstructed_audio = result["reconstructed_audio"]
torch.save(reconstructed_audio, "reconstructed.pt")
