# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.

# SPDX-License-Identifier: Apache-2.0

import warnings
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union

import numpy as np
import torch
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNet2DConditionModel
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer

import ttnn


@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete
class TtLMSDiscreteSchedulerOutput:
    prev_sample: ttnn.Tensor
    pred_original_sample: Optional[ttnn.Tensor] = None


class TtLMSDiscreteScheduler:
    def __init__(
        self,
        num_train_timesteps: int = 1000,
        beta_start: float = 0.0001,
        beta_end: float = 0.02,
        beta_schedule: str = "linear",
        trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
        prediction_type: str = "epsilon",
        device=None,
    ):
        self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
        self.num_train_timesteps = num_train_timesteps
        self.alphas = 1.0 - self.betas
        self.betas = ttnn.from_torch(self.betas)
        self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)

        sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
        sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
        self.sigmas = torch.from_numpy(sigmas)

        # standard deviation of the initial noise distribution
        self.init_noise_sigma = self.sigmas.max()

        # setable values
        self.num_inference_steps = None
        timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
        self.timesteps = torch.from_numpy(timesteps)
        self.derivatives = []
        self.is_scale_input_called = False

    def scale_model_input(self, sample, sigma, device) -> ttnn.Tensor:
        value = (sigma**2 + 1) ** 0.5
        denominator = ttnn.full(
            sample.shape, fill_value=value, device=device, dtype=ttnn.bfloat16, layout=ttnn.TILE_LAYOUT
        )
        denominator = ttnn.reciprocal(denominator)
        sample = ttnn.mul(sample, denominator)
        self.is_scale_input_called = True
        return sample

    def set_timesteps(self, num_inference_steps: int, device=None):
        self.num_inference_steps = num_inference_steps

        timesteps = np.linspace(0, self.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
        sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
        sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
        sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)

        self.sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32)
        self.sigmas = ttnn.from_torch(self.sigmas, layout=ttnn.TILE_LAYOUT, device=device, dtype=ttnn.bfloat16)

        self.timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32).unsqueeze(0)
        self.timesteps = ttnn.from_torch(self.timesteps, layout=ttnn.TILE_LAYOUT, device=device, dtype=ttnn.bfloat16)

        self.derivatives = []

    def step(
        self,
        model_output,
        sample,
        sigma,
        lms_coeffs,
        device,
        order: int = 4,
        return_dict: bool = True,
    ) -> Union[TtLMSDiscreteSchedulerOutput, Tuple]:
        if not self.is_scale_input_called:
            warnings.warn(
                "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
                "See `StableDiffusionPipeline` for a usage example."
            )

        # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
        pred_original_sample = sample - sigma * model_output
        # 2. Convert to an ODE derivative
        numerator = sample - pred_original_sample
        denominator = ttnn.full(
            numerator.shape, fill_value=sigma, device=device, dtype=ttnn.bfloat16, layout=ttnn.TILE_LAYOUT
        )
        denominator = ttnn.reciprocal(denominator)
        derivative = numerator * denominator
        self.derivatives.append(derivative)
        if len(self.derivatives) > order:
            self.derivatives.pop(0)

        if len(self.derivatives) > 1:
            derivative_tensor = ttnn.concat(self.derivatives[::-1], dim=0)
        else:
            derivative_tensor = self.derivatives[0]
        derivative_tensor = derivative_tensor * lms_coeffs
        if derivative_tensor.shape[0] > 1:
            derivative_tensor = ttnn.permute(derivative_tensor, (3, 1, 2, 0))
            derivative_tensor = ttnn.sum(derivative_tensor, dim=-1, keepdim=True)
            derivative_tensor = ttnn.permute(derivative_tensor, (3, 1, 2, 0))
        prev_sample = sample + derivative_tensor

        if not return_dict:
            return (prev_sample,)

        return TtLMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)


def tt_guide(noise_pred, guidance_scale):  # will return latents
    noise_pred_uncond = noise_pred[:1, :, :, :]
    noise_pred_text = ttnn.slice(
        noise_pred,
        [1, 0, 0, 0],
        [
            noise_pred.shape[0],
            noise_pred.shape[1],
            noise_pred.shape[2],
            noise_pred.shape[3],
        ],
    )
    noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
    return noise_pred


def run(
    model,
    config,
    tt_vae,
    input_latents,
    input_encoder_hidden_states,
    _tlist,
    time_step,
    guidance_scale,
    ttnn_scheduler,
):
    ttnn_latents = input_latents
    # Denoising loop
    for index in tqdm(range(len(time_step))):
        # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
        ttnn_latent_model_input = ttnn.concat([ttnn_latents, ttnn_latents], dim=0)
        _t = _tlist[index]
        t = time_step[index]
        # predict the noise residual
        ttnn_output = model(
            ttnn_latent_model_input,  # input
            timestep=_t,
            encoder_hidden_states=input_encoder_hidden_states,
            class_labels=None,
            attention_mask=None,
            cross_attention_kwargs=None,
            return_dict=True,
            config=config,
        )
        # perform guidance
        noise_pred = tt_guide(ttnn_output, guidance_scale)

        ttnn_latents = ttnn_scheduler.step(noise_pred, t, ttnn_latents).prev_sample

    # scale and decode the image latents with vae
    latents = 1 / 0.18215 * ttnn_latents

    ttnn_output = ttnn.permute(latents, [0, 2, 3, 1])
    ttnn_output = tt_vae.decode(ttnn_output)
    ttnn_output = ttnn.reshape(ttnn_output, [1, 512, 512, ttnn_output.shape[3]])
    return ttnn.permute(ttnn_output, [0, 3, 1, 2])


def compile_trace_sd(
    device, model, config, tt_vae, input_latents, _tlist, time_step, guidance_scale, ttnn_scheduler, num_inference_steps
):
    ttnn_text_embeddings_device = ttnn.allocate_tensor_on_device(
        ttnn.Shape([2, 96, 768]), ttnn.bfloat16, ttnn.TILE_LAYOUT, device, ttnn.DRAM_MEMORY_CONFIG
    )
    encoder_hidden_states_rand = torch.randn([2, 77, 768])
    encoder_hidden_states_rand = torch.nn.functional.pad(encoder_hidden_states_rand, (0, 0, 0, 19))
    encoder_hidden_states_rand = ttnn.from_torch(
        encoder_hidden_states_rand, dtype=ttnn.bfloat16, layout=ttnn.TILE_LAYOUT
    )

    # COMPILE
    ttnn_scheduler.set_timesteps(num_inference_steps)
    ttnn.copy_host_to_device_tensor(encoder_hidden_states_rand, ttnn_text_embeddings_device, cq_id=0)
    output = ttnn.from_device(
        run(
            model,
            config,
            tt_vae,
            input_latents,
            ttnn_text_embeddings_device,
            _tlist,
            time_step,
            guidance_scale,
            ttnn_scheduler,
        )
    )

    # CAPTURE
    ttnn_scheduler.set_timesteps(num_inference_steps)
    ttnn.copy_host_to_device_tensor(encoder_hidden_states_rand, ttnn_text_embeddings_device, cq_id=0)
    output.deallocate(True)
    tid = ttnn.begin_trace_capture(device, cq_id=0)
    output = run(
        model,
        config,
        tt_vae,
        input_latents,
        ttnn_text_embeddings_device,
        _tlist,
        time_step,
        guidance_scale,
        ttnn_scheduler,
    )
    ttnn.end_trace_capture(device, tid, cq_id=0)
    ttnn.synchronize_device(device)

    return ttnn_text_embeddings_device, output, tid


def reshard_for_output_channels_divisibility(hidden_states, out_channels):
    """
    Reshard tensor to ensure output channels/32 are divisible by x dimension of shard grid.

    Args:
        hidden_states: Input tensor with block sharded memory layout
        out_channels: Number of output channels for the convolution

    Returns:
        Resharded tensor if needed, otherwise original tensor
    """
    is_bs = hidden_states.memory_config().memory_layout == ttnn.TensorMemoryLayout.BLOCK_SHARDED

    if is_bs:
        out_channels_tiles = out_channels // ttnn.TILE_SIZE
        current_grid = hidden_states.memory_config().shard_spec.grid
        max_x = current_grid.bounding_box().grid_size().x
        max_y = current_grid.bounding_box().grid_size().y

        if out_channels_tiles % max_x != 0:
            # Find the largest divisor of out_channels_tiles that is <= max_x
            new_x = None
            for candidate_x in range(min(out_channels_tiles, max_x), 0, -1):
                if out_channels_tiles % candidate_x == 0:
                    new_x = candidate_x
                    break

            if new_x is not None and new_x != max_x:
                # Keep the original y dimension, only change x dimension
                mem_cfg = ttnn.create_sharded_memory_config(
                    hidden_states.shape, ttnn.CoreGrid(x=new_x, y=max_y), ttnn.ShardStrategy.BLOCK
                )
                hidden_states = ttnn.reshard(hidden_states, mem_cfg)

    return hidden_states


STABLE_DIFFUSION_V1_4_MODEL_LOCATION = "CompVis/stable-diffusion-v1-4"
CLIP_VIT_LARGE_PATCH14_MODEL_LOCATION = "openai/clip-vit-large-patch14"
STABLE_DIFFUSION_CIV2_MODEL_LOCATION = "stable-diffusion-v1-4"
CLIP_VIT_LARGE_PATCH14_CIV2_MODEL_LOCATION = "clip-vit-large-patch14"


def get_reference_vae(is_ci_env, is_ci_v2_env, model_location_generator):
    model_location = model_location_generator(
        f"{STABLE_DIFFUSION_CIV2_MODEL_LOCATION}/vae", download_if_ci_v2=True, ci_v2_timeout_in_s=1800
    )
    vae = AutoencoderKL.from_pretrained(
        STABLE_DIFFUSION_V1_4_MODEL_LOCATION if not is_ci_v2_env else model_location,
        subfolder="vae" if not is_ci_v2_env else None,
        local_files_only=is_ci_env or is_ci_v2_env,
        use_safetensors=True,
    )
    return vae.to("cpu")


def get_reference_unet(is_ci_env, is_ci_v2_env, model_location_generator):
    model_location = model_location_generator(
        f"{STABLE_DIFFUSION_CIV2_MODEL_LOCATION}/unet", download_if_ci_v2=True, ci_v2_timeout_in_s=1800
    )
    unet = UNet2DConditionModel.from_pretrained(
        STABLE_DIFFUSION_V1_4_MODEL_LOCATION if not is_ci_v2_env else model_location,
        subfolder="unet" if not is_ci_v2_env else None,
        local_files_only=is_ci_env or is_ci_v2_env,
        use_safetensors=True,
    )
    return unet.to("cpu")


def get_reference_clip_tokenizer(is_ci_env, is_ci_v2_env, model_location_generator):
    model_location = model_location_generator(
        CLIP_VIT_LARGE_PATCH14_CIV2_MODEL_LOCATION, download_if_ci_v2=True, ci_v2_timeout_in_s=1800
    )
    tokenizer = CLIPTokenizer.from_pretrained(
        CLIP_VIT_LARGE_PATCH14_MODEL_LOCATION if not is_ci_v2_env else model_location,
        local_files_only=is_ci_env or is_ci_v2_env,
        use_safetensors=True,
    )
    return tokenizer


def get_reference_clip_text_encoder(is_ci_env, is_ci_v2_env, model_location_generator):
    model_location = model_location_generator(
        CLIP_VIT_LARGE_PATCH14_CIV2_MODEL_LOCATION, download_if_ci_v2=True, ci_v2_timeout_in_s=1800
    )
    text_encoder = CLIPTextModel.from_pretrained(
        CLIP_VIT_LARGE_PATCH14_MODEL_LOCATION if not is_ci_v2_env else model_location,
        local_files_only=is_ci_env or is_ci_v2_env,
        use_safetensors=True,
    )
    return text_encoder


def get_reference_stable_diffusion_pipeline(is_ci_env, is_ci_v2_env, model_location_generator):
    model_location = model_location_generator(
        STABLE_DIFFUSION_CIV2_MODEL_LOCATION, download_if_ci_v2=True, ci_v2_timeout_in_s=1800
    )
    pipeline = StableDiffusionPipeline.from_pretrained(
        STABLE_DIFFUSION_V1_4_MODEL_LOCATION if not is_ci_v2_env else model_location,
        local_files_only=is_ci_env or is_ci_v2_env,
        use_safetensors=True,
    )
    return pipeline.to("cpu")
