import torch
import numpy as np
# from diffusers import StableAudioPipeline
# from your_custom_module import StableAudioInversionPipeline, CosineDPMSolverMultistepInverseScheduler  # 修改为你实际的 import 路径
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inversion import StableAudioInversionPipeline
from diffusers.pipelines.stable_audio.pipeline_stable_audio import StableAudioPipeline
import soundfile as sf
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
import librosa.display
from diffusers.utils.torch_utils import randn_tensor



device = "cuda" if torch.cuda.is_available() else "cpu"
# prompt = "A relaxing piano melody in a calm environment"
prompt = "a relaxing piano melody in a calm environment"
negative_prompt = "Low quality."

# 格式化prompt：替换特殊符号，规范文件名
formatted_prompt = prompt.replace(": ", "-")  # 替换冒号+空格为连字符
formatted_prompt = formatted_prompt.replace(" | ", "-")  # 替换竖线分隔符为连字符
formatted_prompt = formatted_prompt.replace(" ", "_")  # 替换空格为下划线
formatted_prompt = formatted_prompt.replace("/", "_")
audio_save_path = f"{formatted_prompt}.wav"
audio_revert_path = f"{formatted_prompt}_revert.wav"
audio_revert_path_null = f"{formatted_prompt}_revert.wav"
audio_revert_path_sf = f"{formatted_prompt}_revert_sf.wav"
# audio_revert_path = f"{prompt}_revert_错误的prompt生成.wav"
print("处理后的路径：",formatted_prompt)

# 模型配置
repo_id = "stabilityai/stable-audio-open-1.0" # 线上
model_path = "E:\code\deepLearning\stable-audio\model\stable-audio-open-1.0" # 本地

# === 1. 正向过程：生成音频并保存 noise ===
print("[Step 1] 正向生成音频并保存初始噪声...")

forward_pipeline = StableAudioPipeline.from_pretrained(
    model_path,
    torch_dtype=torch.float16,
).to(device)

# # 手动生成 noise
generator = torch.Generator("cuda").manual_seed(0)
# dtype = torch.float16
# default_in_channels = 64
# default_sample_size = 1024
# shape = (1, default_in_channels, default_sample_size)

# latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # why：见pipeline的prepare_latents
# init_noise_sigma = StableAudioInversionPipeline.scheduler.init_noise_sigma
# latents = latents * init_noise_sigma# why：见pipeline的prepare_latents

# torch.save(latents, "original_noise.pt")  # 保存 initial noise

# 使用自定义 latents 采样
with torch.no_grad():
    output = forward_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        # latents=latents,
        guidance_scale=1.0,  # 避免加 prompt guidance 影响 inversion 验证
        num_inference_steps=200,
        return_dict=True,
        num_waveforms_per_prompt=1,
        generator=generator,
    )

generated_tensor = output.audios[0].T.float().cpu()
generated_audio = output.audios[0].T.float().cpu().numpy()
# torch.save(generated_tensor, f"{audio_save_path}.pt")
sf.write(audio_save_path, generated_audio, samplerate=forward_pipeline.vae.sampling_rate) # 保存.wav
print(f"音频保存到 {audio_save_path}")

# === 2. 使用 inversion pipeline 还原出 noise ===
print("[Step 2] 逆向推理，inversion 开始...")

inverse_pipeline = StableAudioInversionPipeline.from_pretrained(
    model_path,
    torch_dtype=torch.float16,
).to(device)


# 处理tensor
print("音频张量信息：",generated_tensor.dtype, generated_tensor.shape)
generated_tensor = generated_tensor.T.unsqueeze(0).half().to(device)
print("修改后张量信息：",generated_tensor.dtype, generated_tensor.shape)

# 编码成 latent（进入 inversion pipeline 的 latents 参数）【可行性存疑】
with torch.no_grad():
    latents = inverse_pipeline.vae.encode(generated_tensor).latent_dist.sample()  # shape: [B, latent_dim, L]
print("修改后latent信息：",latents.dtype, latents.shape)

# inversion 的核心调用
with torch.no_grad():
    inversion_output = inverse_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        latents=latents, # audio=generated_audio,
        num_inference_steps=200,
        guidance_scale=1.0,
        return_dict=True,
        output_type="latent"
    )
    intversion_stableflow = inverse_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        latents=latents*1.15, # audio=generated_audio,
        num_inference_steps=200,
        guidance_scale=1.0,
        return_dict=True,
        output_type="latent"
    )

inverted_noise = inversion_output.audios  # 其实是 latent，不是 audio
inverted_noise_sf = intversion_stableflow.audios
# torch.save(inverted_noise, "inverted_noise.pt")

# inverted_noise_tensor = inversion_output.audios[0].T.float().cpu()
# torch.save(inverted_noise_tensor, f"{audio_revert_path}.pt")

# === 3. 误差分析 ===
print("[Step 3] 对比 inversion 得到的噪声与原始噪声")

# print(f"inverted_noise.shape: {inverted_noise.shape}")
# print(f"original_noise.shape: {generated_tensor.shape}")

# original_noise = torch.load("original_noise.pt").to(inverted_noise.device)

# mse = torch.mean((inverted_noise - original_noise) ** 2).item()
# cos_sim = torch.nn.functional.cosine_similarity(inverted_noise.flatten(), original_noise.flatten(), dim=0).item()

# print(f"[Evaluation Result]")
# print(f"MSE: {mse:.6e}")
# print(f"Cosine Similarity: {cos_sim:.6f}")

# === 4. 可选：用 inversion noise 再重建音频进行闭环 ===
print("[Step 4] 反推出来的噪声再送入正向 pipeline")

# 带prompt 生成
with torch.no_grad():
    recon_output = forward_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        latents=inverted_noise,
        guidance_scale=1.0,  # 避免加 prompt guidance 影响 inversion 验证
        num_inference_steps=200,
        return_dict=True,
        # audio_end_in_s=45.0,
        num_waveforms_per_prompt=1,
        generator=generator,
    )

reconstructed_audio = recon_output.audios[0].T.float().cpu().numpy()

sf.write(audio_revert_path, reconstructed_audio, samplerate=forward_pipeline.vae.sampling_rate) # 保存.wav

# sf生成
with torch.no_grad():
    recon_output = forward_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        latents=inverted_noise_sf,
        guidance_scale=1.0,
        num_inference_steps=200,
        return_dict=True,
        # audio_end_in_s=45.0,
        num_waveforms_per_prompt=1,
        generator=generator,
    )

reconstructed_audio_sf = recon_output.audios[0].T.float().cpu().numpy()

sf.write(audio_revert_path_sf, reconstructed_audio_sf, samplerate=forward_pipeline.vae.sampling_rate) # 保存.wav



