"""stable audio inversion 的二次封装
封装了音频的生成，反演，在生成，关注结果层面

相当于之前的sa_inversion_validator和inversion的结合
"""

# 基本导入
import re
import torch
import soundfile as sf
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
import librosa.display
import numpy as np
import gc

# 自定义导入
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inversion import StableAudioInversionPipeline
from diffusers.pipelines.stable_audio.pipeline_stable_audio_null_text_inversion import StableAudioNullTextInversionPipeline # null-text-inversion
from diffusers.pipelines.stable_audio.pipeline_stable_audio_null_text import StableAudioNullTextPipeline # # null-text
from diffusers.pipelines.stable_audio.pipeline_stable_audio_renoise_inversion import StableAudioRenoiseInversionPipeline # renoise
from diffusers.pipelines.stable_audio.pipeline_stable_audio_inv_inversion import StableAudioInvInversionPipeline # inv-inversion
from diffusers.pipelines.stable_audio.pipeline_stable_audio import StableAudioPipeline


# scheduler
from diffusers.schedulers.scheduling_cosine_dpmsolver_multistep_reverse import CosineDPMSolverMultistepReverseScheduler

class StableAudioInversionProcessor:
    def __init__(
        self,
        model_path,
        prompt,
        negative_prompt="Low quality.",
        output_dir="output", 
        num_inference_steps=200,
        guidance_scale=7.0,
        seed=0
    ):
        """初始化音频反转处理器

        参数:
        model_path: 模型路径（本地或Hugging Face仓库ID）
        prompt: 音频描述文本
        negative_prompt: 负面提示词（可选）
        output_dir: 输出目录
        num_inference_steps: 推理步数
        guidance_scale: 引导尺度
        seed: 随机种子
        """
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model_path = model_path
        self.prompt = prompt
        self.negative_prompt = negative_prompt
        self.output_dir = output_dir
        self.num_inference_steps = num_inference_steps
        self.guidance_scale = guidance_scale
        self.seed = seed
        self.generator = torch.Generator(self.device).manual_seed(seed)
        
        # 创建输出目录
        os.makedirs(self.output_dir, exist_ok=True)
        
        # 格式化prompt作为文件名
        self.formatted_prompt = self.sanitize_filename(prompt)
        
        # 初始化文件路径
        self.audio_save_path = os.path.join(self.output_dir, f"{self.formatted_prompt}.wav")
        self.audio_revert_path = os.path.join(self.output_dir, f"{self.formatted_prompt}_revert.wav")
        
        print(f"处理后的文件名: {self.formatted_prompt}")
        print(f"输出目录: {self.output_dir}")
        
        # 加载模型
        self.load_models()
    
    @staticmethod
    def sanitize_filename(text):
        """
        将文本转换为安全的文件名
        替换所有非字母数字字符为下划线，并限制文件名长度
        """
        # 替换特殊字符为下划线
        sanitized = re.sub(r'[^\w\s-]', '_', text)  # 保留字母、数字、下划线、空格和连字符
        sanitized = re.sub(r'[\s:|\/]+', '_', sanitized)  # 替换空格、冒号、竖线、斜杠等为下划线
        
        # 限制文件名长度（Windows最大路径255字符，保留扩展名空间）为了美观 裁剪到20
        max_length = 20
        if len(sanitized) > max_length:
            sanitized = sanitized[:max_length]
        
        return sanitized
    
    def load_models(self):
        """加载正向和逆向模型"""
        print("加载正向模型...")
        self.forward_pipeline = StableAudioPipeline.from_pretrained(
            self.model_path,
            torch_dtype=torch.float16,
        ).to(self.device)
        
        # print("加载逆向模型...")
        # self.inverse_pipeline = StableAudioInversionPipeline.from_pretrained(
        #     self.model_path,
        #     torch_dtype=torch.float16,
        # ).to(self.device)
        # self.inverse_pipeline.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(self.inverse_pipeline.scheduler.config)
        
        # print("加载inv逆向模型...")
        # self.inverse_pipeline = StableAudioInvInversionPipeline.from_pretrained(
        #     self.model_path,
        #     torch_dtype=torch.float16,
        # ).to(self.device)
        # self.inverse_pipeline.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(self.inverse_pipeline.scheduler.config)
        
        # print("加载null text逆向模型...")
        # self.inverse_pipeline = StableAudioNullTextInversionPipeline.from_pretrained(
        #     self.model_path,
        #     torch_dtype=torch.float16,
        # ).to(self.device)
        # self.inverse_pipeline.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(self.inverse_pipeline.scheduler.config)
        
        print("加载renoise逆向模型...")
        self.inverse_pipeline = StableAudioRenoiseInversionPipeline.from_pretrained(
            self.model_path,
            torch_dtype=torch.float16,
        ).to(self.device)
        self.inverse_pipeline.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(self.inverse_pipeline.scheduler.config)
        
    def load_forward_model(self):
        """加载正向stable audio模型"""
        # if self.forward_pipeline is None:
        #     print("加载正向模型...")
        #     self.forward_pipeline = StableAudioPipeline.from_pretrained(
        #         self.model_path,
        #         torch_dtype=torch.float16,
        #     ).to(self.device)
            
        print("加载null-text重建模型")
        self.forward_pipeline = StableAudioNullTextPipeline.from_pretrained(
                self.model_path,
                torch_dtype=torch.float16,
            ).to(self.device)
            
    def load_inverse_model(self):
        """加载反演模型"""
        if self.inverse_pipeline is None:
            print("加载逆向模型...")
            self.inverse_pipeline = StableAudioInversionPipeline.from_pretrained(
                self.model_path,
                torch_dtype=torch.float16,
            ).to(self.device)
            self.inverse_pipeline.scheduler = CosineDPMSolverMultistepReverseScheduler.from_config(self.inverse_pipeline.scheduler.config)

    def unload_forward_model(self):
        """卸载正向模型并释放显存"""
        if self.forward_pipeline is not None:
            print("卸载正向模型...")
            del self.forward_pipeline
            self.forward_pipeline = None
            torch.cuda.empty_cache()
            gc.collect()
    
    def unload_inverse_model(self):
        """卸载逆向模型并释放显存"""
        if self.inverse_pipeline is not None:
            print("卸载逆向模型...")
            del self.inverse_pipeline
            self.inverse_pipeline = None
            torch.cuda.empty_cache()
            gc.collect()
    
    def generate_audio(self):
        """正向生成音频"""
        print("[Step 1] 正向生成音频...")
        
        with torch.no_grad():
            output = self.forward_pipeline(
                prompt=self.prompt,
                negative_prompt=self.negative_prompt,
                guidance_scale=7.0,
                num_inference_steps=self.num_inference_steps,
                return_dict=True,
                num_waveforms_per_prompt=1,
                generator=self.generator,
            )
        
        # 保存生成的音频
        generated_audio = output.audios[0].T.float().cpu().numpy()
        sf.write(self.audio_save_path, generated_audio, samplerate=self.forward_pipeline.vae.sampling_rate)
        print(f"音频保存到 {self.audio_save_path}")
        
        # 返回生成的音频张量
        return generated_audio
    
    def generate_audio_null_text(self, uncond_embeddings):
        """正向生成音频"""
        print("[Step 1] 正向生成音频...")
        
        with torch.no_grad():
            output = self.forward_pipeline(
                prompt=self.prompt,
                negative_prompt=self.negative_prompt,
                uncond_embeddings = uncond_embeddings,
                guidance_scale=7.0,
                num_inference_steps=self.num_inference_steps,
                return_dict=True,
                num_waveforms_per_prompt=1,
                generator=self.generator,
            )
        
        # 保存生成的音频
        generated_audio = output.audios[0].T.float().cpu().numpy()
        sf.write(self.audio_save_path, generated_audio, samplerate=self.forward_pipeline.vae.sampling_rate)
        print(f"音频保存到 {self.audio_save_path}")
        
        # 返回生成的音频张量
        return generated_audio
    
    def invert_audio(self, audio, sf_flag=1.0, guidance_scale=1.0,num_steps = None, file_suffix=None):
        """逆向推理，还原音频中的噪声成分
        
        输入:
            audio: numpy.ndarray | torch.Tensor | str
                原始音频数据或音频文件路径。
            sf_flag: float
                参考stable flow 在反转的时候把x0的latent*1.15（见：https://arxiv.org/pdf/2411.14430）P5右
        
        输出:
            torch.Tensor
                逆向推理生成的音频张量，形状为 (1, 通道数, 采样点数)
                包含还原的噪声特征，数据类型为float16
        """
        print("[Step 2] 逆向推理，inversion 开始...")
        audio_tensor = None
        
        # 如果输入是文件路径，则读取音频
        if isinstance(audio, str):
            if not os.path.exists(audio):
                raise FileNotFoundError(f"音频文件不存在: {audio}")
            audio_np, sr = sf.read(audio)
            print(f"读取音频 {audio}, 采样率: {sr}")
            if audio_np.ndim == 1:
                audio_np = np.expand_dims(audio_np, axis=0)
            # else:
            #     audio_np = audio_np.T # # 转置成 (channels, samples)
            audio = audio_np
            # 处理音频张量
            # audio_tensor = torch.tensor(audio).unsqueeze(0).half().to(self.device) # 编码成 latent

        # 如果是 numpy array
        if isinstance(audio, np.ndarray):
            if audio.shape[0] > 2097152:
                audio = audio[:2097152, :] # 裁剪音频到模型输入范围
            print("audio shape:", audio.shape) # (2097152, 2)
            # audio: (samples, channels)
            if audio.ndim == 2:
                audio = audio.T  # 变成 (channels, samples)
            audio_tensor = torch.tensor(audio).unsqueeze(0).half().to(self.device)  # (1, channels, samples)

        # 如果是 torch tensor
        elif isinstance(audio, torch.Tensor):
            if audio.ndim == 2:  # (channels, samples)
                audio = audio.unsqueeze(0)  # (1, channels, samples)
            elif audio.ndim == 3 and audio.shape[-1] < audio.shape[-2]:
                # 如果是 (batch, samples, channels)，也要转置
                audio = audio.permute(0, 2, 1)
            audio_tensor = audio.half().to(self.device)

        # 编码成 latent
        with torch.no_grad():
            latents = self.inverse_pipeline.vae.encode(audio_tensor).latent_dist.mean

        print("[demo]Latents shape after encode:", latents.shape)
        with torch.no_grad(): # vae解码 看一下待invert音频的音频范围
            test_audio = self.inverse_pipeline.vae.decode(latents).sample
        print("[demo]audio min/max:", test_audio.min().item(), test_audio.max().item())
        
        # 新增：验证VAE编码-解码的损耗
        # with torch.no_grad():
        #     vae_encode = self.inverse_pipeline.vae.encode(audio_tensor).latent_dist.mean
        #     vae_decode = self.inverse_pipeline.vae.decode(vae_encode).sample
        # # 保存VAE直接重建的音频，检查是否失真
        # vae_test_audio = vae_decode.squeeze(0).cpu().float().numpy().T
        # vae_test_path = os.path.join(self.output_dir, f"{self.formatted_prompt}_vae_test.wav")
        # sf.write(vae_test_path, vae_test_audio, self.inverse_pipeline.vae.sampling_rate)
        # print(f"VAE编码-解码测试音频已保存至: {vae_test_path}")

        # inversion 核心调用[如果使用null-text的反演，需要注释掉torch.no_grad()]
        # with torch.no_grad(): # 只有ddim禁用梯度
        inversion_output = self.inverse_pipeline(
            prompt=self.prompt,
            negative_prompt=self.negative_prompt,
            latents=latents,
            num_inference_steps=self.num_inference_steps if num_steps is None else num_steps,  # 提升重建效果
            guidance_scale=guidance_scale,
            return_dict=True,
            output_type="latent"
        )
        
        # null-text:
        # inversion_output, uncon_embedding = self.inverse_pipeline(
        #     prompt=self.prompt,
        #     negative_prompt=self.negative_prompt,
        #     latents=latents,
        #     num_inference_steps=self.num_inference_steps,  # 提升重建效果
        #     guidance_scale=guidance_scale,
        #     return_dict=True,
        #     output_type="latent"
        # )
        # print("[demo] null text embedding:", uncon_embedding)
        
        # 根据返回null-text还是别的，选择不同解包方式? or修改返回的东西
        print("output type:", inversion_output)
        # inverted_noise = inversion_output[0].audios  # latent null-text
        inverted_noise = inversion_output.audios  # latent renoise inversion
        
        # 保存 latent tensor
        tensor_save_path = os.path.join(self.output_dir, f"{file_suffix}_noise_latents.pt")
        torch.save(inverted_noise, tensor_save_path)
        print(f"反演隐变量已保存至: {tensor_save_path}")

        # 保存反演音频
        with torch.no_grad():
            decoded_audio = self.inverse_pipeline.vae.decode(inverted_noise).sample
        decoded_audio = decoded_audio.squeeze(0).cpu().float().numpy().T
        output_path = os.path.join(self.output_dir, f"{file_suffix}_noise.wav")
        sf.write(output_path, decoded_audio, self.inverse_pipeline.vae.sampling_rate)
        print(f"反演音频已保存至: {output_path}")
        
        return inversion_output.audios

    def reconstruct_audio(self, inverted_noise, with_prompt=True, guidance_scale=7.0, file_suffix=None):
        """使用反转噪声重建音频"""
        
        # 如果传入的是文件路径字符串，则加载张量
        if isinstance(inverted_noise, str) and inverted_noise.endswith('.pt'):
            print(f"从文件加载隐变量: {inverted_noise}")
            inverted_noise = torch.load(inverted_noise)
            # 确保张量在正确的设备上
            if hasattr(self, 'device'):
                inverted_noise = inverted_noise.to(self.device)
                
        if with_prompt:
            print("[Step 4] 使用带提示词的反转噪声重建音频...")
            prompt = self.prompt
            negative_prompt = self.negative_prompt
            output_path = self.audio_revert_path
        
        # 重建音频
        with torch.no_grad():
            recon_output = self.forward_pipeline(
                prompt=self.prompt if with_prompt else None,
                negative_prompt=self.negative_prompt if with_prompt else None,
                latents=inverted_noise,
                guidance_scale=guidance_scale,
                num_inference_steps=self.num_inference_steps,
                return_dict=True,
                num_waveforms_per_prompt=1,
                generator=self.generator,
            )
        
        # 保存重建的音频
        reconstructed_audio = recon_output.audios[0].T.float().cpu().numpy()
        output_path = os.path.join(self.output_dir, f"{file_suffix}.wav")
        sf.write(output_path, reconstructed_audio, samplerate=self.forward_pipeline.vae.sampling_rate)
        print(f"重建音频保存到 {output_path}")
        
        return reconstructed_audio
    
    def run_full_pipeline(self):
        """运行完整的音频生成、反转和重建流程
        使用方法：
        self.load_forward_model()
        ...
        self.unload_inverse_model()
        """
        # 1. 生成原始音频
        original_audio = self.generate_audio()
        
        # 2. 反转音频得到噪声
        inverted_noise = self.invert_audio(original_audio)
        
        # 3. 使用反转噪声重建音频（带提示词）
        reconstructed_with_prompt = self.reconstruct_audio(inverted_noise, with_prompt=True)
        
        # 4. 使用反转噪声重建音频（不带提示词）
        # reconstructed_without_prompt = self.reconstruct_audio(inverted_noise, with_prompt=False)

        print("处理流程完成！")


    def run_batch_experiments(
        self,
        prompts,
        invert_scales=[1.0],
        reconstruct_scales=[7.0],
        num_steps_list=[50],
        invert_type="ddim",
        use_existing_audio=False
    ):
        """
        批量实验方法

        参数:
        prompts: List[str] - prompt 列表
        invert_scales: List[float] - 反演 guidance scale
        reconstruct_scales: List[float] - 重建 guidance scale
        num_steps_list: List[int] - 推理步数
        invert_type: str - 反演类型，文件命名用
        use_existing_audio: bool - 是否使用已有音频文件
        """
        for prompt in prompts:
            self.prompt = prompt
            self.formatted_prompt = self.sanitize_filename(prompt)
            
            for inv_scale in invert_scales:
                for rec_scale in reconstruct_scales:
                    for steps in num_steps_list:
                        self.num_inference_steps = steps
                        self.guidance_scale = rec_scale

                        print(f"\n=== Experiment: prompt='{prompt[:30]}...', inv_scale={inv_scale}, rec_scale={rec_scale}, steps={steps}, invert_type={invert_type} ===")

                        # 生成或加载音频
                        if use_existing_audio:
                            audio_path = os.path.join(self.output_dir, f"{self.formatted_prompt}.wav")
                            if not os.path.exists(audio_path):
                                print(f"警告: {audio_path} 不存在，将重新生成")
                                audio = self.generate_audio()
                            else:
                                audio = audio_path
                        else:
                            audio = self.generate_audio()

                        file_suffix = f"{self.formatted_prompt}_inv{invert_type}_invScale{inv_scale}_recScale{rec_scale}_steps{steps}"

                        # 反演
                        inverted_noise = self.invert_audio(
                            audio,
                            guidance_scale=inv_scale,
                            num_steps=steps,
                            file_suffix=file_suffix
                        )

                        # 重建
                        reconstructed_audio = self.reconstruct_audio(
                            inverted_noise,
                            guidance_scale=rec_scale,
                            with_prompt=True,
                            file_suffix=file_suffix
                        )

                        


if __name__ == "__main__":
    # 配置参数
    model_path = "E:\\code\\deepLearning\\stable-audio\\model\\stable-audio-open-1.0"  # 本地模型路径
    # model_path = "stabilityai/stable-audio-open-1.0"  # 或者使用在线模型
    # prompt = "A relaxing piano melody in a calm environment"
    # prompt = "Uplifting acoustic loop. 120 BPM."
    # prompt = "Trance, Ibiza, Beach, Sun, 4 AM, Progressive, Synthesizer, 909, Dramatic chords, Choir, Euphoric, Nostalgic, Dynamic, Flowing"
    prompt = "A relaxing piano melody in a calm environment"
    output_dir = "audio_output_1011"
    
    # 创建处理器并运行完整流程
    # processor = StableAudioInversionProcessor(
    #     model_path=model_path,
    #     prompt=prompt,
    #     negative_prompt="Low quality",
    #     output_dir=output_dir,
    #     num_inference_steps=50,  # 可减少步数以加快测试
    #     seed=0
    # )
    
    prompts = [
        "Sports car passing by. High-quality, stereo.",
        "Fireworks. High-quality, stereo.",
        "Trance, Ibiza, Beach, Sun, 4 AM, Progressive, Synthesizer, 909, Dramatic chords, Choir, Euphoric, Nostalgic, Dynamic, Flowing",
        "Post Rock, echoing electric guitars with chorus, well recorded drum-kit, Electric Bass, occasional soaring harmonies, Moving, Epic, Climactic, 125 BPM",
        "A relaxing piano melody in a calm environment",
        "Rainy piano vibes. Well mixed and spatially paned vocals.",
        "A meditative track with some reggae influences, composed by a very repetitive drumsguitar pattern and some dreamy notes made by asian instruments" # 6
    ]
    
    processor = StableAudioInversionProcessor(
        model_path=model_path,
        prompt=prompts[6],  # 先填第一个，后续在批量方法里会更新
        negative_prompt="Low quality",
        output_dir=output_dir,
        num_inference_steps=200,
        guidance_scale=7,
        seed=0
    )
    
    # processor.run_batch_experiments(
    #     prompts=prompts,
    #     invert_scales=[1.0, 7.0],
    #     reconstruct_scales=[1.0, 7.0],
    #     num_steps_list=[50, 200],
    #     invert_type="ddim",
    #     use_existing_audio=False
    # )
    
    # processor.run_full_pipeline()
    
    # original_audio = processor.generate_audio()
    # original_audio = "E:\\code\\deepLearning\\diffusers\\diffusersRepo\\_d\\audio_output_0926\\Rainy piano vibes. Well mixed and spatially paned vocals.mp3" # 使用已保存的音频当为原音频，进行反演
    original_audio = "E:\\code\\deepLearning\\diffusers\\diffusersRepo\\_d\\audio_output_0926\\A meditative track with some reggae influences, composed by a very repetitive drumsguitar pattern and some dreamy notes made by asian instruments.mp3" # 使用已保存的音频当为原音频，进行反演
    
    
    # 2. 反转音频得到噪声
    inverted_noise = processor.invert_audio(original_audio, guidance_scale=1, file_suffix="A meditative track_renoise_11")
    # inverted_noise = "E:\\code\\deepLearning\\diffusers\\diffusersRepo\\_d\\audio_output\\post_rock.pt"
    
    # 3. 使用反转噪声重建音频（带提示词）
    reconstructed_with_prompt = processor.reconstruct_audio(inverted_noise, with_prompt=True, guidance_scale=1, file_suffix="A meditative track_renoise_11")
    
    # 使用方法：
    # processor.run_full_pipeline()
    # 或者单独使用各个步骤
    # original = processor.generate_audio()
    # inverted_noise = processor.invert_audio(original)
    # reconstructed = processor.reconstruct_audio(inverted_noise, with_prompt=False)