"""stable audio inversion 的二次封装
封装了音频的生成，反演，在生成，关注结果层面

相当于之前的sa_inversion_validator和inversion的结合
"""

# 基本导入
import re
import torch
import soundfile as sf
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
import librosa.display
import numpy as np
import gc

# 自定义导入
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inversion import StableAudioInversionPipeline
from diffusers.pipelines.stable_audio.pipeline_stable_audio_null_text_inversion import StableAudioNullTextInversionPipeline # null-text-inversion
from diffusers.pipelines.stable_audio.pipeline_stable_audio_null_text import StableAudioNullTextPipeline # # null-text
from diffusers.pipelines.stable_audio.pipeline_stable_audio_renoise_inversion import StableAudioRenoiseInversionPipeline # renoise
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inv_inversion import StableAudioInvInversionPipeline # inv-inversion
from diffusers.pipelines.stable_audio.pipeline_stable_audio import StableAudioPipeline


# scheduler
from diffusers.schedulers.scheduling_cosine_dpmsolver_multistep_reverse import CosineDPMSolverMultistepReverseScheduler

class StableAudioInversionProcessor:

    def __init__(
        self,
        model_path,
        method="ddim",  # 支持: "ddim", "null-text", "renoise", "inv-dpv"
        output_dir="output",
        seed=0
    ):
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model_path = model_path
        self.method = method
        self.output_dir = output_dir
        self.seed = seed
        self.generator = torch.Generator(self.device).manual_seed(seed)

        os.makedirs(self.output_dir, exist_ok=True)
        self.forward_pipeline, self.inverse_pipeline = self._load_pipelines()
    
    @staticmethod
    def sanitize_filename(text):
        """
        将文本转换为安全的文件名
        替换所有非字母数字字符为下划线，并限制文件名长度
        """
        # 替换特殊字符为下划线
        sanitized = re.sub(r'[^\w\s-]', '_', text)  # 保留字母、数字、下划线、空格和连字符
        sanitized = re.sub(r'[\s:|\/]+', '_', sanitized)  # 替换空格、冒号、竖线、斜杠等为下划线
        
        # 限制文件名长度（Windows最大路径255字符，保留扩展名空间）为了美观 裁剪到20
        max_length = 20
        if len(sanitized) > max_length:
            sanitized = sanitized[:max_length]
        
        return sanitized
    
    def _load_pipelines(self):
        """根据 method 加载 forward 和 inverse pipeline"""
        if self.method == "ddim":
            forward = StableAudioPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
            inverse = StableAudioInversionPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
        elif self.method == "null-text":
            forward = StableAudioNullTextPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
            inverse = StableAudioNullTextInversionPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
        elif self.method == "renoise":
            forward = StableAudioPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
            inverse = StableAudioRenoiseInversionPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
        elif self.method == "inv-dpv":
            forward = StableAudioPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
            inverse = StableAudioInvInversionPipeline.from_pretrained(self.model_path, torch_dtype=torch.float16).to(self.device)
        else:
            raise ValueError(f"未知的 inversion 方法: {self.method}")

        inverse.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(inverse.scheduler.config)
        return forward, inverse
    
    def generate_audio(self, prompt, negative_prompt="Low quality", steps=200, guidance_scale=7.0, uncond_embeddings=None):
        with torch.no_grad():
            output = self.forward_pipeline(
                prompt=prompt,
                negative_prompt=negative_prompt,
                guidance_scale=guidance_scale,
                num_inference_steps=steps,
                num_waveforms_per_prompt=1,
                generator=self.generator,
                return_dict=True,
            )
        return output.audios[0].T.float().cpu().numpy()

    def invert_audio(self, audio, prompt, negative_prompt="Low quality", steps=200, guidance_scale=1.0):
        # 支持 file / ndarray / tensor 输入
        if isinstance(audio, str):
            import soundfile as sf, numpy as np
            audio_np, _ = sf.read(audio)
            if audio_np.ndim == 1:
                audio_np = np.expand_dims(audio_np, axis=0)
            audio = audio_np.T
            audio = torch.tensor(audio).unsqueeze(0).half().to(self.device)

        elif isinstance(audio, np.ndarray):
            if audio.ndim == 2:
                audio = audio.T
            audio = torch.tensor(audio).unsqueeze(0).half().to(self.device)

        elif isinstance(audio, torch.Tensor):
            if audio.ndim == 2:
                audio = audio.unsqueeze(0)
            elif audio.ndim == 3 and audio.shape[-1] < audio.shape[-2]:
                audio = audio.permute(0, 2, 1)
            audio = audio.half().to(self.device)

        latents = self.inverse_pipeline.vae.encode(audio).latent_dist.mean
        result = self.inverse_pipeline(
            prompt=prompt,
            negative_prompt=negative_prompt,
            latents=latents,
            num_inference_steps=steps,
            guidance_scale=guidance_scale,
            return_dict=True,
            output_type="latent"
        )
        return result

    def reconstruct_audio(self, inverted_latents, prompt=None, negative_prompt=None,uncond_embeddings = None, steps=200, guidance_scale=7.0):
        with torch.no_grad():
            if self.method == "null-text":
                result = self.forward_pipeline(
                    prompt=prompt,
                    negative_prompt=negative_prompt,
                    latents=inverted_latents,
                    guidance_scale=guidance_scale,
                    num_inference_steps=steps,
                    return_dict=True,
                    uncond_embeddings=uncond_embeddings,  # null-text 专用
                    num_waveforms_per_prompt=1,
                    generator=self.generator,
                )
            else: 
                result = self.forward_pipeline(
                    prompt=prompt,
                    negative_prompt=negative_prompt,
                    latents=inverted_latents,
                    guidance_scale=guidance_scale,
                    num_inference_steps=steps,
                    return_dict=True,
                    num_waveforms_per_prompt=1,
                    generator=self.generator,
                )
        return result.audios[0].T.float().cpu().numpy()
    
    def run_batch_experiments(
        self,
        prompts,
        invert_scales=[1.0],
        reconstruct_scales=[7.0],
        num_steps_list=[50],
        invert_type="ddim",
        use_existing_audio=False
    ):
        """
        批量实验方法

        参数:
        prompts: List[str] - prompt 列表
        invert_scales: List[float] - 反演 guidance scale
        reconstruct_scales: List[float] - 重建 guidance scale
        num_steps_list: List[int] - 推理步数
        invert_type: str - 反演类型，文件命名用
        use_existing_audio: bool - 是否使用已有音频文件
        """
        for prompt in prompts:
            self.prompt = prompt
            self.formatted_prompt = self.sanitize_filename(prompt)
            
            for inv_scale in invert_scales:
                for rec_scale in reconstruct_scales:
                    for steps in num_steps_list:
                        self.num_inference_steps = steps
                        self.guidance_scale = rec_scale

                        print(f"\n=== Experiment: prompt='{prompt[:30]}...', inv_scale={inv_scale}, rec_scale={rec_scale}, steps={steps}, invert_type={invert_type} ===")

                        # 生成或加载音频
                        if use_existing_audio:
                            audio_path = os.path.join(self.output_dir, f"{self.formatted_prompt}.wav")
                            if not os.path.exists(audio_path):
                                print(f"警告: {audio_path} 不存在，将重新生成")
                                audio = self.generate_audio()
                            else:
                                audio = audio_path
                        else:
                            audio = self.generate_audio()

                        file_suffix = f"{self.formatted_prompt}_inv{invert_type}_invScale{inv_scale}_recScale{rec_scale}_steps{steps}"

                        # 反演
                        inverted_noise = self.invert_audio(
                            audio,
                            guidance_scale=inv_scale,
                            num_steps=steps,
                            file_suffix=file_suffix
                        )

                        # 重建
                        reconstructed_audio = self.reconstruct_audio(
                            inverted_noise,
                            guidance_scale=rec_scale,
                            with_prompt=True,
                            file_suffix=file_suffix
                        )

                        


if __name__ == "__main__":
    import os
    import soundfile as sf

    # ===== 配置参数 =====
    model_path = "E:\\code\\deepLearning\\stable-audio\\model\\stable-audio-open-1.0"  # 本地模型
    output_dir = "audio_output_0919"
    prompt = "A relaxing piano melody in a calm environment"

    # ===== 创建处理器 =====
    processor = StableAudioInversionProcessor(
        model_path=model_path,
        method="ddim",          # 这里固定方法，可换 "null-text" / "renoise" / "inv-dpv"
        output_dir=output_dir,
        seed=0,
    )

    # ===== 1. 生成音频 =====
    print(">>> 正在生成音频...")
    original_audio = processor.generate_audio(
        prompt=prompt,
        steps=50,
        guidance_scale=7.0
    )

    # 保存原始生成结果
    os.makedirs(output_dir, exist_ok=True)
    gen_path = os.path.join(output_dir, f"{processor.sanitize_filename(prompt)}_gen.wav")
    sf.write(gen_path, original_audio.T, 44100)
    print(f"生成音频已保存: {gen_path}")

    # ===== 2. 反演 =====
    print(">>> 正在反演...")
    inverted_latents = processor.invert_audio(
        audio=original_audio,
        prompt=prompt,
        steps=50,
        guidance_scale=1.0
    )

    # 也可以保存 latents
    inv_path = os.path.join(output_dir, f"{processor.sanitize_filename(prompt)}_inv.pt")
    torch.save(inverted_latents, inv_path)
    print(f"反演结果已保存: {inv_path}")

    # ===== 3. 重建 =====
    print(">>> 正在重建...")
    reconstructed_audio = processor.reconstruct_audio(
        inverted_latents=inverted_latents,
        prompt=prompt,
        steps=50,
        guidance_scale=7.0
    )
    # null-text: reconstructed_audio, uncon_embeddings = ...

    rec_path = os.path.join(output_dir, f"{processor.sanitize_filename(prompt)}_rec.wav")
    sf.write(rec_path, reconstructed_audio.T, 44100)
    print(f"重建音频已保存: {rec_path}")

    print("流程完成: 生成 → 反演 → 重建")
