import torch
from diffusers import StableAudioPipeline

def randn_tensor(shape, generator=None, device=None, dtype=None):
    if generator is None:
        return torch.randn(shape, device=device, dtype=dtype)
    else:
        return torch.randn(shape, generator=generator, device=device, dtype=dtype)

class StableAudioInversionPipeline(StableAudioPipeline):
    def invert(
        self,
        audio: torch.Tensor,
        noise: torch.Tensor = None,
        num_inversion_steps: int = 50,
        generator: torch.Generator = None,
        return_dict: bool = True,
        **kwargs,
    ):
        device = audio.device
        vae = self.vae
        unet = self.unet
        scheduler = self.scheduler

        # === 1. Encode audio to latent z_0 ===
        with torch.no_grad():
            z_0 = vae.encode(audio).latent_dist.mean

        # === 2. Generate noise ===
        if noise is None:
            noise = randn_tensor(z_0.shape, generator=generator, device=device, dtype=z_0.dtype)

        # === 3. Prepare inversion timesteps ===
        timesteps = scheduler.get_inversion_timesteps(num_inversion_steps).to(device)

        # === 4. Add noise to get x_T ===
        z_t = scheduler.add_noise(z_0, noise, timesteps[0])

        latents = z_t

        # === 5. Iterate through reverse denoising steps ===
        for i, t in enumerate(timesteps):
            # Optionally scale model input
            latent_model_input = scheduler.scale_model_input(latents, t)

            # Predict noise using UNet
            with torch.no_grad():
                noise_pred = unet(latent_model_input, t, encoder_hidden_states=None).sample

            # Reverse diffusion step
            latents = scheduler.step(noise_pred, t, latents).prev_sample

        # === 6. Decode to audio ===
        with torch.no_grad():
            reconstructed_audio = vae.decode(latents).sample

        if return_dict:
            return {
                "reconstructed_audio": reconstructed_audio,
                "latents": latents,
                "noise": noise,
                "timesteps": timesteps,
            }
        else:
            return reconstructed_audio, latents, noise, timesteps
