import torch
import soundfile as sf
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inversion import StableAudioInversionPipeline

# from diffusers.schedulers import EDMDPMSolverMultistepScheduler
from diffusers.schedulers import CosineDPMSolverMultistepScheduler
from diffusers.schedulers import CosineDPMSolverMultistepReverseScheduler

def invert_audio(
    audio_path: str,
    prompt: str,
    negative_prompt: str = "",
    model_path: str = "E:\code\deepLearning\stable-audio\model\stable-audio-open-1.0",
    device: str = "cuda" if torch.cuda.is_available() else "cpu",
    num_inference_steps: int = 200,
    guidance_scale: float = 1.0,
    output_latents_path: str = "inverted_latents.pt"
) -> torch.Tensor:
    """
    对音频进行反演，获取潜变量并保存
    
    参数:
        audio_path: 输入音频文件路径 (.wav)
        prompt: 原始生成音频时的提示词
        negative_prompt: 负向提示词 (默认为空)
        model_path: 模型路径 (本地或HuggingFace)
        device: 计算设备
        num_inference_steps: 反演步数
        guidance_scale: 引导系数
        output_latents_path: 潜变量保存路径
    
    返回:
        inverted_latents: 反演得到的潜变量张量
    """
    # 1. 加载反演管道
    inverse_pipeline = StableAudioInversionPipeline.from_pretrained(
        model_path,
        torch_dtype=torch.float16,
    ).to(device)
    print(f"反演管道信息: {inverse_pipeline.scheduler }")
    inverse_pipeline.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(inverse_pipeline.scheduler.config)
    print(f"修改后反演管道信息: {inverse_pipeline.scheduler }")
    
    # 2. 读取并预处理音频
    audio_data, sample_rate = sf.read(audio_path)
    audio_tensor = torch.tensor(audio_data.T, dtype=torch.float16).unsqueeze(0).to(device)
    
    # 3. 编码为潜变量
    with torch.no_grad():
        latents = inverse_pipeline.vae.encode(audio_tensor).latent_dist.sample()
    
    # 4. 执行反演
    with torch.no_grad():
        inversion_output = inverse_pipeline(
            prompt=prompt,
            negative_prompt=negative_prompt,
            latents=latents,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            return_dict=True,
            output_type="latent"
        )
    
    # 5. 保存反演潜变量
    inverted_latents = inversion_output.audios
    torch.save(inverted_latents, output_latents_path)
    print(f"反演潜变量已保存至: {output_latents_path}")
    
    # 6. 使用 VAE 解码潜变量得到音频
    with torch.no_grad():
        decoded_audio = inverse_pipeline.vae.decode(inverted_latents).sample
    
    # 转为 CPU、float32、(samples, channels)
    decoded_audio = decoded_audio.squeeze(0).cpu().float().numpy().T  # 转置成 (samples, channels)
    
    # 7. 保存为 WAV
    output_wav_path = "jazz_piano_latents_noise.wav"
    sf.write(output_wav_path, decoded_audio, sample_rate)
    print(f"反演音频已保存至: {output_wav_path}")
    
    return inverted_latents
  
  
# 反演音频获取潜变量
inverted_latents = invert_audio(
    audio_path="origin_audio.wav",
    prompt="A relaxing piano melody in a calm environment_revert.wav",
    # negative_prompt="Low quality.",
    output_latents_path="pr.pt"
)