import scipy
import torch
import soundfile as sf
# from diffusers import StableAudioPipeline
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inversion import StableAudioInversionPipeline



repo_id = "stabilityai/stable-audio-open-1.0" # 线上
model_path = "E:\code\deepLearning\stable-audio\model\stable-audio-open-1.0" # 本地
pipe = StableAudioInversionPipeline.from_pretrained(model_path, torch_dtype=torch.float16) # 在这里修改本地调用/使用线上
pipe = pipe.to("cuda")

# define the prompts
prompt = "The sound of a hammer hitting a wooden surface."
negative_prompt = "Low quality."

# set the seed for generator
generator = torch.Generator("cuda").manual_seed(0)

# run the generation
audio = pipe(
     prompt,
     negative_prompt=negative_prompt,
     num_inference_steps=200,
     audio_end_in_s=10.0,
     num_waveforms_per_prompt=3,
     generator=generator,
 ).audios

output = audio[0].T.float().cpu().numpy()
sf.write("hammer.wav", output, pipe.vae.sampling_rate)

# 音频处理
def preprocess_audio(audio_path, target_sr=44100):
    # 加载音频并转换采样率
    audio, sr = librosa.load(audio_path, sr=target_sr, mono=False)  # 保持双通道
    # 转换为 (batch_size, channels, length) 格式
    if audio.ndim == 1:
        audio = audio[None, None, :]  # 单通道：(1, 1, length)
    else:
        audio = audio.transpose(1, 0)[None, :, :]  # 双通道：(1, 2, length)
    # 转换为torch tensor并移动到设备
    audio_tensor = torch.tensor(audio, dtype=torch.float16).to(pipe.device)
    return audio_tensor
  
# 通过 VAE 将预处理后的音频编码到 latent 空间，得到 z₀
def encode_audio_to_latent(pipe, audio_tensor):
    with torch.no_grad():
        # VAE编码（得到latent分布）
        latent_dist = pipe.vae.encode(audio_tensor)
        # 从分布中采样得到z₀（latent空间的最终样本）
        z0 = latent_dist.sample()
    return z0