"""
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci

"""

import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid

from utils.util import (
    exists,
    default,
    count_params,
    instantiate_from_config,
    make_beta_schedule,
    extract_into_tensor,
    noise_like,
)
from modules.ema import LitEma
from abc import abstractmethod


class DDPM(pl.LightningModule):
    """
    For arbitrary input data modal instead of images, use denoised diffusion probabilistic model for data restoration and processing.

    Processing Data Modal:
      - type: Tensor
    """

    def __init__(
        self,
        denoiser_config,
        timesteps=1000,
        beta_schedule="linear",
        loss_type="l2",
        ckpt_path=None,
        ignore_keys=[],
        load_only_unet=False,
        monitor="val/loss",
        use_ema=True,
        first_stage_key="image",
        log_every_t=100,
        clip_denoised=True,
        linear_start=1e-4,
        linear_end=2e-2,
        cosine_s=8e-3,
        given_betas=None,
        original_elbo_weight=0.0,
        v_posterior=0.0,  # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
        l_simple_weight=1.0,
        conditioning_key=None,
        parameterization="eps",  # all assuming fixed variance schedules
        scheduler_config=None,
        use_positional_encodings=False,
        learn_logvar=False,
        logvar_init=0.0,
    ):
        super().__init__()
        assert parameterization in [
            "eps",
            "x0",
        ], 'currently only supporting "eps" and "x0"'
        self.parameterization = parameterization
        print(
            f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
        )
        self.cond_stage_model = None
        self.clip_denoised = clip_denoised
        self.log_every_t = log_every_t
        self.first_stage_key = first_stage_key
        self.use_positional_encodings = use_positional_encodings
        self.denoiser = DenoiserWrapper(denoiser_config, conditioning_key)
        count_params(self.denoiser, verbose=True)
        self.use_ema = use_ema
        if self.use_ema:
            self.denoiser_ema = LitEma(self.denoiser)
            print(f"Keeping EMAs of {len(list(self.denoiser_ema.buffers()))}.")

        self.scheduler_config = scheduler_config

        self.v_posterior = v_posterior
        self.original_elbo_weight = original_elbo_weight
        self.l_simple_weight = l_simple_weight

        if monitor is not None:
            self.monitor = monitor
        if ckpt_path is not None:
            self.init_from_ckpt(
                ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet
            )

        self.register_schedule(
            given_betas=given_betas,
            beta_schedule=beta_schedule,
            timesteps=timesteps,
            linear_start=linear_start,
            linear_end=linear_end,
            cosine_s=cosine_s,
        )

        self.loss_type = loss_type
        self.learn_logvar = learn_logvar
        self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
        if self.learn_logvar:
            self.logvar = nn.Parameter(self.logvar, requires_grad=True)

    def register_schedule(
        self,
        given_betas=None,
        beta_schedule="linear",
        timesteps=1000,
        linear_start=1e-4,
        linear_end=2e-2,
        cosine_s=8e-3,
    ):
        if exists(given_betas):
            betas = given_betas
        else:
            betas = make_beta_schedule(
                beta_schedule,
                timesteps,
                linear_start=linear_start,
                linear_end=linear_end,
                cosine_s=cosine_s,
            )
        alphas = 1.0 - betas
        alphas_cumprod = np.cumprod(alphas, axis=0)
        alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])

        (timesteps,) = betas.shape
        self.num_timesteps = int(timesteps)
        self.linear_start = linear_start
        self.linear_end = linear_end
        assert (
            alphas_cumprod.shape[0] == self.num_timesteps
        ), "alphas have to be defined for each timestep"

        to_torch = partial(torch.tensor, dtype=torch.float32)

        self.register_buffer("betas", to_torch(betas))
        self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
        self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev))

        # calculations for diffusion q(x_t | x_{t-1}) and others
        self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod)))
        self.register_buffer(
            "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod))
        )
        self.register_buffer(
            "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod))
        )
        self.register_buffer(
            "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod))
        )
        self.register_buffer(
            "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))
        )

        # calculations for posterior q(x_{t-1} | x_t, x_0)
        posterior_variance = (1 - self.v_posterior) * betas * (
            1.0 - alphas_cumprod_prev
        ) / (1.0 - alphas_cumprod) + self.v_posterior * betas
        # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
        self.register_buffer("posterior_variance", to_torch(posterior_variance))
        # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
        self.register_buffer(
            "posterior_log_variance_clipped",
            to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
        )
        self.register_buffer(
            "posterior_mean_coef1",
            to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)),
        )
        self.register_buffer(
            "posterior_mean_coef2",
            to_torch(
                (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod)
            ),
        )

        if self.parameterization == "eps":
            lvlb_weights = self.betas**2 / (
                2
                * self.posterior_variance
                * to_torch(alphas)
                * (1 - self.alphas_cumprod)
            )
        elif self.parameterization == "x0":
            lvlb_weights = (
                0.5
                * np.sqrt(torch.Tensor(alphas_cumprod))
                / (2.0 * 1 - torch.Tensor(alphas_cumprod))
            )
        else:
            raise NotImplementedError("mu not supported")
        # TODO how to choose this term
        lvlb_weights[0] = lvlb_weights[1]
        self.register_buffer("lvlb_weights", lvlb_weights, persistent=False)
        assert not torch.isnan(self.lvlb_weights).all()

    @contextmanager
    def ema_scope(self, context=None):
        if self.use_ema:
            self.denoiser_ema.store(self.denoiser.parameters())
            self.denoiser_ema.copy_to(self.denoiser)
            if context is not None:
                print(f"{context}: Switched to EMA weights")
        try:
            yield None
        finally:
            if self.use_ema:
                self.denoiser_ema.restore(self.denoiser.parameters())
                if context is not None:
                    print(f"{context}: Restored training weights")

    def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
        sd = torch.load(path, map_location="cpu")
        if "state_dict" in list(sd.keys()):
            sd = sd["state_dict"]
        keys = list(sd.keys())
        for k in keys:
            for ik in ignore_keys:
                if k.startswith(ik):
                    print("Deleting key {} from state_dict.".format(k))
                    del sd[k]
        missing, unexpected = (
            self.load_state_dict(sd, strict=False)
            if not only_model
            else self.denoiser.load_state_dict(sd, strict=False)
        )
        print(
            f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
        )
        if len(missing) > 0:
            print(f"Missing Keys: {missing}")
        if len(unexpected) > 0:
            print(f"Unexpected Keys: {unexpected}")

    def q_mean_variance(self, x_start, t):
        """
        Get the distribution q(x_t | x_0).
        :param x_start: the [N x C x ...] tensor of noiseless inputs.
        :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
        :return: A tuple (mean, variance, log_variance), all of x_start's shape.
        """
        mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
        variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
        log_variance = extract_into_tensor(
            self.log_one_minus_alphas_cumprod, t, x_start.shape
        )
        return mean, variance, log_variance

    def predict_start_from_noise(self, x_t, t, noise):
        return (
            extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
            - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
            * noise
        )

    def q_posterior(self, x_start, x_t, t):
        posterior_mean = (
            extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
            + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
        )
        posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
        posterior_log_variance_clipped = extract_into_tensor(
            self.posterior_log_variance_clipped, t, x_t.shape
        )
        return posterior_mean, posterior_variance, posterior_log_variance_clipped

    def p_mean_variance(self, x_noisy, pred_noise, t, clip_denoised: bool):
        if self.parameterization == "eps":
            x_recon = self.predict_start_from_noise(x_noisy, t=t, noise=pred_noise)
        elif self.parameterization == "x0":
            x_recon = pred_noise
        if clip_denoised:
            x_recon.clamp_(-1.0, 1.0)

        model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
            x_start=x_recon, x_t=x_noisy, t=t
        )
        return model_mean, posterior_variance, posterior_log_variance

    @torch.no_grad()
    def sample(self, x_noisy, return_intermediates=False):
        device = self.betas.device
        b = x_noisy.shape[0]
        intermediates = [self.log_samples(x_noisy)]
        for i in tqdm(
            reversed(range(0, self.num_timesteps)),
            desc="Sampling t",
            total=self.num_timesteps,
        ):
            t = torch.full((b,), i, device=device, dtype=torch.long)
            x_noisy = self.p_sample(x_noisy, t, self.clip_denoised)
            if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
                intermediates.append(self.log_samples(x_noisy))
        if return_intermediates:
            return x_noisy, intermediates
        return x_noisy

    @torch.no_grad()
    @abstractmethod
    def p_sample(self, x, t, clip_denoised=True) -> torch.Tensor:
        """
        用于从带噪图像中完成去噪得到图像，即生成过程。
        Arguments:
          x (torch.Tensor) : 输入的带噪图像，应当是完整的输入数据结构
          t (torch.Tensor) : 在批维度上与x对应的时间步，一般是随机采样取得
        Returns:
          x_noisy (torch.Tensor) : 生成的去噪图像

        Examples:
        >>> b, *_ = x.shape
        >>> pred_noise = self.denoiser(x, t)
        >>> x_noisy = x[:, -1:]
        >>> model_mean, _, model_log_variance = self.p_mean_variance(x_noisy, pred_noise, t=t, clip_denoised=clip_denoised)
        >>> noise = torch.randn_like(x)
        >>> # no noise when t == 0
        >>> nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x_noisy.shape) - 1)))
        >>> output = x.detach().clone()
        >>> output[:, -1:] = model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
        >>> return output
        """
        # b, *_ = x.shape
        # pred_noise = self.denoiser(x, t)
        # x_noisy = x[:, -1:]
        # model_mean, _, model_log_variance = self.p_mean_variance(
        #     x_noisy, pred_noise, t=t, clip_denoised=clip_denoised
        # )
        # noise = torch.randn_like(x)
        # # no noise when t == 0
        # nonzero_mask = (1 - (t == 0).float()).reshape(
        #     b, *((1,) * (len(x_noisy.shape) - 1))
        # )
        # output = x.detach().clone()
        # output[:, -1:] = (
        #     model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
        # )
        # return output
        raise NotImplementedError

    @abstractmethod
    def q_sample(self, x_start, t) -> tuple[torch.Tensor, torch.Tensor]:
        """
        扩散过程主函数，用于从输入图像获取完成噪声化的带噪图像。\\
        use reparametrization trick to sample from q(x_t | x_0)
        x_t = sqrt(alpha_t) * x_0 + sqrt(1 - alpha_t) * noise\\
        Arguments:
          x_start (torch.Tensor) : 输入的数据结构，可以是单张图（最简单的DDPM），也可以是其他的类型。如检测框、分割模板等。
          t (int) : 在批维度上与x_start对应的时间步，一般是随机采样取得。
        Returns:
          x_noisy (torch.Tensor) : 生成的带噪图像
          noise (torch.Tensor) : 生成的噪声，充当GT
        """
        # noise = default(noise, lambda: torch.randn_like(x_start))
        # return (
        #     extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
        #     + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
        #     * noise
        # )
        raise NotImplementedError

    def get_loss(self, pred, target, mean=True):
        """
        用于从噪声估计器的输出值和某个输入值计算损失，这里的输出值和输入值都可以不是图像格式，需要注意的是，返回值一定是形状等于batch数的列向量，例如：
        >>> def get_loss(self, pred, target, mean=True):
        >>>   if self.loss_type == 'l1':
        >>>     loss = (target - pred).abs().mean(dim=[1, 2, 3])
        >>>     if mean:
        >>>       loss = loss.mean()
        >>>   return loss
        """
        if self.loss_type == "l1":
            loss = (target - pred).abs().mean(dim=[1, 2, 3])
            if mean:
                loss = loss.mean()
        elif self.loss_type == "l2":
            if mean:
                loss = torch.nn.functional.mse_loss(target, pred)
            else:
                loss = torch.nn.functional.mse_loss(
                    target, pred, reduction="none"
                ).mean(dim=[1, 2, 3])
        else:
            raise NotImplementedError(f"unknown loss type '{self.loss_type}'")

        return loss

    def p_losses(self, x_start, t):
        """
        本步骤是主要的训练步骤
        Arguments:
            x_start (torch.Tensor) : 输入的数据结构，可以是单张图（最简单的DDPM），也可以是其他的类型。如检测框、分割模板等。
            t (int) : 在批维度上与x_start对应的时间步，一般是随机采样取得。
            noise (torch.Tensor) : 输入的噪声，形状上应当与x_start相同（但是不一定每个通道都有噪声）。
        """
        # noise = default(noise, lambda: torch.randn_like(x_start))
        x_noisy, noise = self.q_sample(x_start=x_start, t=t)
        pred_noise = self.denoiser(x_noisy, t)

        loss_dict = {}
        if self.parameterization == "eps":
            target = noise
        elif self.parameterization == "x0":
            target = x_start
        else:
            raise NotImplementedError(
                f"Paramterization {self.parameterization} not yet supported"
            )

        loss = self.get_loss(pred_noise, target, mean=False)

        log_prefix = "train" if self.training else "val"

        loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()})
        loss_simple = loss.mean() * self.l_simple_weight

        loss_vlb = (self.lvlb_weights[t] * loss).mean()
        loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb})

        loss = loss_simple + self.original_elbo_weight * loss_vlb

        loss_dict.update({f"{log_prefix}/loss": loss})

        return loss, loss_dict

    def forward(self, x, *args, **kwargs):
        # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
        # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
        t = torch.randint(
            0, self.num_timesteps, (x.shape[0],), device=self.device
        ).long()
        return self.p_losses(x, t, *args, **kwargs)

    def generate(self, x_noisy):
        t = torch.randint(
            0, self.num_timesteps, (x_noisy.shape[0],), device=self.device
        ).long()
        samples = self.p_sample(x_noisy, t)
        return samples

    @abstractmethod
    def get_input(self, batch: dict, k: list):
        """
        从数据加载器的batch中获取输入数据，需要注意是，加载的数据张量的形状，通道量被放在了最后一个维度，而如果要使用`torch`自带的卷积等模板，应当将通道数放在第二个维度。
        Arguments:
            batch (dict) : 数据加载器的一个batch
            k (list) : batch中的键值，用于获取输入数据
        """
        x = batch[k]
        if len(x.shape) == 3:
            x = x[..., None]
        x = rearrange(x, "b h w c -> b c h w")
        x = x.to(memory_format=torch.contiguous_format).float()
        return x

    def shared_step(self, batch):
        x = self.get_input(batch, self.first_stage_key)
        loss, loss_dict = self(x)
        return loss, loss_dict

    def training_step(self, batch, batch_idx):
        loss, loss_dict = self.shared_step(batch)

        self.log_dict(
            loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True
        )

        self.log(
            "global_step",
            self.global_step,
            prog_bar=True,
            logger=True,
            on_step=True,
            on_epoch=False,
        )
        opt = self.optimizers()
        opt.zero_grad()
        loss.backward()
        opt.step()

    def on_train_batch_end(self, *args, **kwargs):
        if self.use_ema:
            self.denoiser_ema(self.denoiser)

    def on_train_epoch_end(self, *args, **kwargs):
        if self.scheduler_config is not None:
            scheduler = self.lr_schedulers()
            scheduler.step()

    @torch.no_grad()
    def validation_step(self, batch, batch_idx):
        _, loss_dict_no_ema = self.shared_step(batch)
        with self.ema_scope():
            _, loss_dict_ema = self.shared_step(batch)
            loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema}
        self.log_dict(
            loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True
        )
        self.log_dict(
            loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True
        )

    # @abstractmethod
    # @torch.no_grad()
    # def predict_step(self, batch, batch_idx):
    #     x = self.get_input(batch, self.first_stage_key)
    #     t = torch.randint(
    #         0, self.num_timesteps, (x.shape[0],), device=self.device
    #     ).long()
    #     noise = self.noiser(x)
    #     samples, _ = self.sample(img=x, noise=noise, return_intermediates=True)
    #     return samples

    @abstractmethod
    def noiser(self, x_start):
        """
        噪声器，用于按照某个格式的输入数据生成噪声，这里添加的噪声和数据的格式应当保证形状相同。
        Arguments:
            x_start (torch.Tensor) : 输入的数据结构，可以是单张图（最简单的DDPM），也可以是更复杂的数据结构。
        Examples:
            例如要完成图像的分割，输入的数据格式是将图像与单通道的分割模板拼合在一起得到的4通道张量，而噪声只需要加在分割模板上，因此本函数定义为：
            >>> def noiser(self, x_start):
            >>>   noise = x_start.clone()
            >>>   noise[:, -1] = torch.randn_like(x_start[:, -1])
            >>>   return noise
        """
        return torch.randn_like(x_start)

    def _get_rows_from_list(self, samples):
        n_imgs_per_row = len(samples)
        denoise_grid = rearrange(samples, "n b c h w -> b n c h w")
        denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w")
        denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
        return denoise_grid

    @abstractmethod
    def log_samples(self, samples):
        """
        可视化数据输出接口，用于从模型生成结果中提取特定通道、特定位置或者增加特定元素（如检测框）完成图像可视化输出。
        Arguments:
            samples (torch.Tensor) : 模型输出，不一定是图像格式
        Examples:
          例如，要完成图像分割，接受的参数是4通道张量，其中最后一个通道是分割模板，前3个通道是图像通道，如果要将分割模板绘制于图像上，一个可取的方式是取rg通道，而将b通道替换为模板：
          >>> def log_samples(self, samples):
          >>>   x = samples[:, :2]
          >>>   seg = samples[:, -1:]
          >>>   return torch.cat([x, seg], dim=1)
        """
        raise NotImplementedError

    @torch.no_grad()
    def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
        log = dict()
        x = self.get_input(batch, self.first_stage_key)
        N = min(x.shape[0], N)
        n_row = min(x.shape[0], n_row)
        x = x.to(self.device)[:N]
        log["inputs"] = self.log_samples(x)

        # get diffusion row
        x_start = x[:n_row]
        diffusion_row = []

        for t in range(self.num_timesteps):
            if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
                t = repeat(torch.tensor([t]), "1 -> b", b=n_row).to(self.device).long()
                x_noisy, _ = self.q_sample(x_start=x_start, t=t)
                diffusion_row.append(self.log_samples(x_noisy))

        log["diffusion_row"] = self._get_rows_from_list(diffusion_row)

        if sample:
            # get denoise row
            with self.ema_scope("Plotting"):
                samples, denoise_row = self.sample(
                    x_noisy=x_noisy, return_intermediates=True
                )

            log["samples"] = self.log_samples(samples)
            log["denoise_row"] = self._get_rows_from_list(denoise_row)

        if return_keys:
            if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
                return log
            else:
                return {key: log[key] for key in return_keys}
        return log

    def configure_optimizers(self):
        lr = self.learning_rate
        params = list(self.denoiser.parameters())
        if self.learn_logvar:
            params = params + [self.logvar]
        opt = torch.optim.AdamW(params, lr=lr)
        if self.scheduler_config is not None:
            scheduler = torch.optim.lr_scheduler.ExponentialLR(
                opt, **self.scheduler_config
            )
            print(f"Setting up {scheduler.__class__} scheduler...")
            return [opt], [scheduler]
        return opt


class DenoiserWrapper(pl.LightningModule):
    def __init__(self, denoiser_config, conditioning_key):
        super().__init__()
        self.diffusion_model = instantiate_from_config(denoiser_config)
        self.conditioning_key = conditioning_key
        assert self.conditioning_key in [None, "concat", "crossattn", "hybrid", "adm"]

    def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
        if self.conditioning_key is None:
            out = self.diffusion_model(x, t)
        elif self.conditioning_key == "concat":
            xc = torch.cat([x] + c_concat, dim=1)
            out = self.diffusion_model(xc, t)
        elif self.conditioning_key == "crossattn":
            cc = torch.cat(c_crossattn, 1)
            out = self.diffusion_model(x, t, context=cc)
        elif self.conditioning_key == "hybrid":
            xc = torch.cat([x] + c_concat, dim=1)
            cc = torch.cat(c_crossattn, 1)
            out = self.diffusion_model(xc, t, context=cc)
        elif self.conditioning_key == "adm":
            cc = c_crossattn[0]
            out = self.diffusion_model(x, t, y=cc)
        else:
            raise NotImplementedError()

        return out
