import torch
import numpy as np
# from diffusers import StableAudioPipeline
# from your_custom_module import StableAudioInversionPipeline, CosineDPMSolverMultistepInverseScheduler  # 修改为你实际的 import 路径
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inversion import StableAudioInversionPipeline
# from diffusers.pipelines.stable_audio.pipeline_stable_audio_null_text_inversion import StableAudioInversionPipeline
from diffusers.pipelines.stable_audio.pipeline_stable_audio import StableAudioPipeline
import soundfile as sf
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
import librosa.display

from diffusers.schedulers.scheduling_cosine_dpmsolver_multistep_reverse import CosineDPMSolverMultistepReverseScheduler


device = "cuda" if torch.cuda.is_available() else "cpu"
# prompt = "A relaxing piano melody in a calm environment"
prompt = "Solo, Jazz, Piano, Relaxed, Gentle"
negative_prompt = "Low quality."

"""
Ambient Techno, meditation, Scandinavian Forest, 808 drum machine, 808 kick, claps, shaker, synthesizer, synth bass, Synth Drones, beautiful, peaceful, Ethereal, Natural, 122 BPM, Instrumental


Warm soft hug, comfort, low synths, twinkle, wind and leaves, ambient, peace, relaxed, water

Lofi hip hop beat, chillhop


Disco, Driving Drum Machine, Synthesizer, Bass, Piano, Guitars, Instrumental, Clubby, Euphoric, Chicago, New York, 115 BPM


Cyberpunk, Country Instrumental, Synthwave


Ambient house, 808 drum machine, 808 kick, claps, shaker, synthesizer, synth bass, modern, futuristic, Dancy, Euphoric, 125 BPM
"""

# 格式化prompt：替换特殊符号，规范文件名
formatted_prompt = prompt.replace(": ", "-")  # 替换冒号+空格为连字符
formatted_prompt = formatted_prompt.replace(" | ", "-")  # 替换竖线分隔符为连字符
formatted_prompt = formatted_prompt.replace(" ", "_")  # 替换空格为下划线
formatted_prompt = formatted_prompt.replace("/", "_")
audio_save_path = f"{formatted_prompt}.wav"
audio_revert_path = f"{formatted_prompt}_revert_x1.15.wav"
audio_revert_path_null = f"{formatted_prompt}_revert_不带prompt生成.wav"
# audio_revert_path = f"{prompt}_revert_错误的prompt生成.wav"
print("处理后的路径：",formatted_prompt)

# 模型配置
repo_id = "stabilityai/stable-audio-open-1.0" # 线上
model_path = "E:\code\deepLearning\stable-audio\model\stable-audio-open-1.0" # 本地

# === 1. 正向过程：生成音频并保存 noise ===
print("[Step 1] 正向生成音频并保存初始噪声...")

forward_pipeline = StableAudioPipeline.from_pretrained(
    model_path,
    torch_dtype=torch.float16,
).to(device)

# # 手动生成 noise
generator = torch.Generator("cuda").manual_seed(0)
# dtype = torch.float16
# default_in_channels = 64
# default_sample_size = 1024
# shape = (1, default_in_channels, default_sample_size)

# latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # why：见pipeline的prepare_latents
# init_noise_sigma = StableAudioInversionPipeline.scheduler.init_noise_sigma
# latents = latents * init_noise_sigma# why：见pipeline的prepare_latents

# torch.save(latents, "original_noise.pt")  # 保存 initial noise

# 使用自定义 latents 采样
with torch.no_grad():
    output = forward_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        # latents=latents,
        guidance_scale=7.0,  # 避免加 prompt guidance 影响 inversion 验证
        num_inference_steps=200,
        return_dict=True,
        num_waveforms_per_prompt=1,
        generator=generator,
    )

generated_tensor = output.audios[0].T.float().cpu()
generated_audio = output.audios[0].T.float().cpu().numpy()
# torch.save(generated_tensor, f"{audio_save_path}.pt")
sf.write(audio_save_path, generated_audio, samplerate=forward_pipeline.vae.sampling_rate) # 保存.wav
print(f"音频保存到 {audio_save_path}")

# === 2. 使用 inversion pipeline 还原出 noise ===
print("[Step 2] 逆向推理，inversion 开始...")

inverse_pipeline = StableAudioInversionPipeline.from_pretrained(
    model_path,
    torch_dtype=torch.float16,
).to(device)
# inverse_pipeline.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(inverse_pipeline.scheduler.config)

# 处理tensor
print("音频张量信息：",generated_tensor.dtype, generated_tensor.shape)
generated_tensor = generated_tensor.T.unsqueeze(0).half().to(device)
print("修改后张量信息：",generated_tensor.dtype, generated_tensor.shape)

# 编码成 latent（进入 inversion pipeline 的 latents 参数）【可行性存疑】
with torch.no_grad():
    latents = inverse_pipeline.vae.encode(generated_tensor).latent_dist.sample()  # shape: [B, latent_dim, L]
print("修改后latent信息：",latents.dtype, latents.shape)

# inversion 的核心调用
with torch.no_grad():
    inversion_output = inverse_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        latents=latents, # audio=generated_audio,
        num_inference_steps=200,
        guidance_scale=1.0,
        return_dict=True,
        output_type="latent"
    )
    # 尽管output_type是音频，但是返回的还是空音频（排除此代码问题 应该是pipeline问题或者scheduler问题）或者inverse.pipeline.scheduler不是这么调用的

inverted_noise = inversion_output.audios  # 其实是 latent，不是 audio

# 6. 使用 VAE 解码潜变量得到音频
with torch.no_grad():
    decoded_audio = inverse_pipeline.vae.decode(inverted_noise).sample
output_wav_path = "validator_noise.wav"
# 转为 CPU、float32、(samples, channels)
decoded_audio = decoded_audio.squeeze(0).cpu().float().numpy().T  # 转置成 (samples, channels)
# 7. 保存为 WAV
sf.write(output_wav_path, decoded_audio, forward_pipeline.vae.sampling_rate)
print(f"反演音频已保存至: {output_wav_path}")
# torch.save(inverted_noise, "inverted_noise.pt")

# inverted_noise_tensor = inversion_output.audios[0].T.float().cpu()
# torch.save(inverted_noise_tensor, f"{audio_revert_path}.pt")

# === 3. 误差分析 ===
print("[Step 3] 对比 inversion 得到的噪声与原始噪声")

# print(f"inverted_noise.shape: {inverted_noise.shape}")
# print(f"original_noise.shape: {generated_tensor.shape}")

# original_noise = torch.load("original_noise.pt").to(inverted_noise.device)

# mse = torch.mean((inverted_noise - original_noise) ** 2).item()
# cos_sim = torch.nn.functional.cosine_similarity(inverted_noise.flatten(), original_noise.flatten(), dim=0).item()

# print(f"[Evaluation Result]")
# print(f"MSE: {mse:.6e}")
# print(f"Cosine Similarity: {cos_sim:.6f}")

# === 4. 可选：用 inversion noise 再重建音频进行闭环 ===
print("[Step 4] 反推出来的噪声再送入正向 pipeline")

# 带prompt 生成
with torch.no_grad():
    recon_output = forward_pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        latents=inverted_noise,
        guidance_scale=7.0,  # 避免加 prompt guidance 影响 inversion 验证
        num_inference_steps=200,
        return_dict=True,
        # audio_end_in_s=45.0,
        num_waveforms_per_prompt=1,
        # generator=generator,
    )

reconstructed_audio = recon_output.audios[0].T.float().cpu().numpy()

sf.write(audio_revert_path, reconstructed_audio, samplerate=forward_pipeline.vae.sampling_rate) # 保存.wav

# 不带prompt 
# with torch.no_grad():
#     recon_output = forward_pipeline(
#         # prompt=prompt,
#         prompt="",
#         negative_prompt="",
#         # 尝试直接用反转后的噪声无条件生成（即文本为空），通常可以较好重建原图。 https://blog.csdn.net/vivi_cin/article/details/147494591
#         # negative_prompt=negative_prompt,
#         latents=inverted_noise,
#         guidance_scale=1.0,  # 避免加 prompt guidance 影响 inversion 验证
#         num_inference_steps=1000,
#         return_dict=True,
#         # audio_end_in_s=45.0,
#         num_waveforms_per_prompt=1,
#         generator=generator,
#     )

# reconstructed_audio = recon_output.audios[0].T.float().cpu().numpy()

# sf.write(audio_revert_path_null, reconstructed_audio, samplerate=forward_pipeline.vae.sampling_rate) # 保存.wav


# === 5. 可视化音频波形对比 ===
# print("绘制原始音频与重建音频波形对比...")
# plt.figure(figsize=(12, 4))
# plt.subplot(1, 2, 1)
# librosa.display.waveshow(generated_audio.cpu().numpy(), sr=forward_pipeline.config.sample_rate)
# plt.title("Original Audio")

# plt.subplot(1, 2, 2)
# librosa.display.waveshow(reconstructed_audio.cpu().numpy(), sr=forward_pipeline.config.sample_rate)
# plt.title("Reconstructed Audio from Inverted Noise")

# plt.tight_layout()
# plt.show()
