import torch
from diffusers.pipelines.stable_audio.pipeline_stable_audio import StableAudioPipeline
import soundfile as sf
def reconstruct_audio(
    latents_path: str,
    prompt: str,
    negative_prompt: str = "",
    model_path: str = "E:\code\deepLearning\stable-audio\model\stable-audio-open-1.0",
    device: str = "cuda" if torch.cuda.is_available() else "cpu",
    num_inference_steps: int = 200,
    guidance_scale: float = 7.0,
    output_audio_path: str = "reconstructed_audio.wav"
):
    """
    从潜变量重建音频
    
    参数:
        latents_path: 潜变量文件路径
        prompt: 原始提示词
        negative_prompt: 负向提示词
        model_path: 模型路径
        device: 计算设备
        num_inference_steps: 重建步数
        guidance_scale: 引导系数
        output_audio_path: 输出音频路径
    """
    # 1. 加载正向生成管道
    forward_pipeline = StableAudioPipeline.from_pretrained(
        model_path,
        torch_dtype=torch.float16,
    ).to(device)
    
    # 2. 加载潜变量
    inverted_latents = torch.load(latents_path).to(device)
    
    # 3. 重建音频
    with torch.no_grad():
        recon_output = forward_pipeline(
            prompt=prompt,
            negative_prompt=negative_prompt,
            latents=inverted_latents,
            guidance_scale=guidance_scale,
            num_inference_steps=num_inference_steps,
            return_dict=True,
            num_waveforms_per_prompt=1
        )
    
    # 4. 保存重建音频
    reconstructed_audio = recon_output.audios[0].T.float().cpu().numpy()
    sf.write(output_audio_path, reconstructed_audio, samplerate=forward_pipeline.vae.sampling_rate)
    print(f"重建音频已保存至: {output_audio_path}")
    
# 从潜变量重建音频
reconstruct_audio(
    latents_path="pr.pt",
    prompt="A relaxing piano melody in a calm environment_revert.wav",
    # negative_prompt="Low quality.",
    output_audio_path="reconstructed_Post-Rock.wav"
)