import os
import imageio
import numpy as np
from typing import Union

import torch
import torch.nn.functional as F
import torchvision

from tqdm import tqdm
from einops import rearrange
import ipdb

def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=4, fps=8):
    videos = rearrange(videos, "b c t h w -> t b c h w")
    outputs = []
    for x in videos:
        x = torchvision.utils.make_grid(x, nrow=n_rows)
        x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
        if rescale:
            x = (x + 1.0) / 2.0  # -1,1 -> 0,1
        x = (x * 255).numpy().astype(np.uint8)
        outputs.append(x)

    os.makedirs(os.path.dirname(path), exist_ok=True)
    imageio.mimsave(path, outputs, fps=fps)


# DDIM Inversion
@torch.no_grad()
def init_prompt(prompt, pipeline):
    uncond_input = pipeline.tokenizer(
        [""], padding="max_length", max_length=pipeline.tokenizer.model_max_length,
        return_tensors="pt"
    )
    uncond_embeddings = pipeline.text_encoder(uncond_input.input_ids.to(pipeline.device))[0]
    text_input = pipeline.tokenizer(
        [prompt],
        padding="max_length",
        max_length=pipeline.tokenizer.model_max_length,
        truncation=True,
        return_tensors="pt",
    )
    text_embeddings = pipeline.text_encoder(text_input.input_ids.to(pipeline.device))[0]
    context = torch.cat([uncond_embeddings, text_embeddings])

    return context


def next_step(model_output: Union[torch.FloatTensor, np.ndarray], timestep: int,
              sample: Union[torch.FloatTensor, np.ndarray], ddim_scheduler):
    timestep, next_timestep = min(
        timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999), timestep
    alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod
    alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
    beta_prod_t = 1 - alpha_prod_t
    next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
    next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
    next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction
    return next_sample


def get_noise_pred_single(latents, t, context, unet, image_embeds=None):
    noise_pred = unet(latents, t, encoder_hidden_states=context, class_labels=image_embeds)["sample"]
    return noise_pred


@torch.no_grad()
def ddim_loop(pipeline, ddim_scheduler, latent, num_inv_steps, prompt):
    context = init_prompt(prompt, pipeline)
    uncond_embeddings, cond_embeddings = context.chunk(2)
    all_latent = [latent]
    latent = latent.clone().detach()
    for i in tqdm(range(num_inv_steps)):
        t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1]
        noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet)
        latent = next_step(noise_pred, t, latent, ddim_scheduler)
        all_latent.append(latent)
    return all_latent


@torch.no_grad()
def ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=""):
    ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)
    return ddim_latents


# DDIM Inversion
@torch.no_grad()
def init_image_embed(image_embeds, pipeline, noise_level, generator):
    dtype = next(pipeline.image_encoder.parameters()).dtype
    device = pipeline.image_encoder.device

    noise_level = torch.tensor([noise_level], device=device)

    image_embeds = pipeline.noise_image_embeddings(
        image_embeds=image_embeds,
        noise_level=noise_level,
        generator=generator,
    ) # 1,1024

    return image_embeds

def next_step_velocity(model_output: Union[torch.FloatTensor, np.ndarray], timestep: int,
              sample: Union[torch.FloatTensor, np.ndarray], ddim_scheduler):
    timestep, next_timestep = min(
        timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999), timestep
    alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod
    alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
    beta_prod_t = 1 - alpha_prod_t
    beta_prod_t_next = 1 - alpha_prod_t_next

    next_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
    next_pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
    next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * next_pred_epsilon
    next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction

    return next_sample

@torch.no_grad()
def ddim_loop_unclip(pipeline, ddim_scheduler, latent, num_inv_steps, prompt, image_embed, noise_level, generator):
    context = init_prompt(prompt, pipeline)
    uncond_embeddings, cond_embeddings = context.chunk(2)
    assert image_embed is not None

    if not image_embed.dim() == 0:
        image_embeddings = init_image_embed(image_embed, pipeline, noise_level, generator)
    else:
        image_embeddings = image_embed


    all_latent = [latent]
    latent = latent.clone().detach()
    for i in tqdm(range(num_inv_steps)):
        t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1]
        noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet, image_embeddings)
        latent = next_step_velocity(noise_pred, t, latent, ddim_scheduler)
        all_latent.append(latent)
    return all_latent


@torch.no_grad()
def ddim_inversion_unclip(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt="", image_embed=None, noise_level=0, seed=0):
    '''
    generator should be fixed here for consistent latent estimation
    '''
    generator = torch.Generator(device=video_latent.device)
    generator.manual_seed(seed)

    ddim_latents = ddim_loop_unclip(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt, image_embed, noise_level, generator)
    return ddim_latents

def next_step_sample(model_output: Union[torch.FloatTensor, np.ndarray], timestep: int,
              num_inference_steps: int, ddim_scheduler):
    timestep, next_timestep = min(
        timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999), timestep
    alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod
    alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
    beta_prod_t = 1 - alpha_prod_t
    next_original_sample = model_output
    next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
    next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction
    return next_sample

def get_noise_pred_single_prior(latents, t, prior, prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask):
    noise_pred = prior(latents, timestep=t, proj_embedding=prior_prompt_embeds,
                encoder_hidden_states=prior_text_encoder_hidden_states,
                attention_mask=prior_text_mask,).predicted_image_embedding
    return noise_pred

@torch.no_grad()
def ddim_loop_prior(pipeline, ddim_scheduler, latent, num_inv_steps, prompt):
    prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = pipeline._encode_prior_prompt(prompt=prompt, device=pipeline.prior.device, num_images_per_prompt=1, do_classifier_free_guidance=False)
    all_latent = [latent]
    latent = latent.clone().detach()
    # ipdb.set_trace()
    for i in tqdm(range(num_inv_steps)):
        t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1]
        noise_pred = get_noise_pred_single_prior(latent, t, pipeline.prior, prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask)
        latent = next_step_sample(noise_pred, t, num_inv_steps, ddim_scheduler)
        all_latent.append(latent)
    return all_latent

@torch.no_grad()
def ddim_inversion_prior(pipeline, ddim_scheduler, latent, num_inv_steps, prompt=""):
    # ipdb.set_trace()
    ddim_latents = ddim_loop_prior(pipeline, ddim_scheduler, latent, num_inv_steps, prompt)
    return ddim_latents
