"""
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci

for Diffusion-based Detection Networks (RPN based)
"""

import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from ..misc.utils import (
    extract_into_tensor,
    make_beta_schedule,
    default,
    instantiate_from_config,
)
import pytorch_lightning as pl
from contextlib import contextmanager
from ..misc.ema import LitEma


class GenerizedDDPM(pl.LightningModule):
    """
    For arbitrary input data modal instead of images, use denoised diffusion probabilistic model for data restoration and processing.

    Processing Data Modal:
      - type: Tensor
    """

    def __init__(
        self,
        *,
        denoiser_config,
        timesteps=1000,
        beta_schedule="linear",
        loss_type="l2",
        ckpt_path=None,
        ignore_keys=[],
        load_only_unet=False,
        first_stage_key="image",
        monitor="val/loss",
        image_size: tuple = (256, 256),
        channels=3,
        log_every_t=100,
        clip_denoised=True,
        linear_start=1e-4,
        linear_end=2e-2,
        cosine_s=8e-3,
        given_betas=None,
        original_elbo_weight=0.0,
        v_posterior=0.0,  # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
        l_simple_weight=1.0,
        parameterization="eps",  # all assuming fixed variance schedules
        use_positional_encodings=False,
        learn_logvar=False,
        logvar_init=0.0,
        use_ema=True,
        automatic_optimization=True,
        **kwargs,
    ):
        super().__init__()
        assert parameterization in [
            "eps",
            "x0",
        ], 'currently only supporting "eps" and "x0"'
        self.parameterization = parameterization
        self.automatic_optimization = automatic_optimization
        print(
            f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
        )
        self.cond_stage_model = None
        self.clip_denoised = clip_denoised
        self.log_every_t = log_every_t
        self.first_stage_key = first_stage_key
        self.image_size = image_size
        self.channels = channels
        self.use_positional_encodings = use_positional_encodings
        self.denoiser = instantiate_from_config(denoiser_config)
        self.use_ema = use_ema

        if monitor is not None:
            self.monitor = monitor

        if self.use_ema:
            self.denoiser_ema = LitEma(self.denoiser)
            print(f"Keeping EMAs of {len(list(self.denoiser_ema.buffers()))}.")

        self.v_posterior = v_posterior
        self.original_elbo_weight = original_elbo_weight
        self.l_simple_weight = l_simple_weight

        self.register_schedule(
            given_betas=given_betas,
            beta_schedule=beta_schedule,
            timesteps=timesteps,
            linear_start=linear_start,
            linear_end=linear_end,
            cosine_s=cosine_s,
        )

        self.loss_type = loss_type

        self.learn_logvar = learn_logvar
        self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
        if self.learn_logvar:
            self.logvar = nn.Parameter(self.logvar, requires_grad=True)
        if ckpt_path is not None:
            self.init_from_ckpt(
                ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet
            )

    def register_schedule(
        self,
        given_betas=None,
        beta_schedule="linear",
        timesteps=1000,
        linear_start=1e-4,
        linear_end=2e-2,
        cosine_s=8e-3,
    ):
        if given_betas:
            betas = given_betas
        else:
            betas = make_beta_schedule(
                beta_schedule,
                timesteps,
                linear_start=linear_start,
                linear_end=linear_end,
                cosine_s=cosine_s,
            )
        alphas = 1.0 - betas
        alphas_cumprod = np.cumprod(alphas, axis=0)
        alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])

        (timesteps,) = betas.shape
        self.num_timesteps = int(timesteps)
        self.linear_start = linear_start
        self.linear_end = linear_end
        assert (
            alphas_cumprod.shape[0] == self.num_timesteps
        ), "alphas have to be defined for each timestep"

        to_torch = partial(torch.tensor, dtype=torch.float32)

        self.register_buffer("betas", to_torch(betas))
        self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
        self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev))

        # calculations for diffusion q(x_t | x_{t-1}) and others
        self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod)))
        self.register_buffer(
            "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod))
        )
        self.register_buffer(
            "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod))
        )
        self.register_buffer(
            "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod))
        )
        self.register_buffer(
            "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))
        )

        # calculations for posterior q(x_{t-1} | x_t, x_0)
        posterior_variance = (1 - self.v_posterior) * betas * (
            1.0 - alphas_cumprod_prev
        ) / (1.0 - alphas_cumprod) + self.v_posterior * betas
        # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
        self.register_buffer("posterior_variance", to_torch(posterior_variance))
        # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
        self.register_buffer(
            "posterior_log_variance_clipped",
            to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
        )
        self.register_buffer(
            "posterior_mean_coef1",
            to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)),
        )
        self.register_buffer(
            "posterior_mean_coef2",
            to_torch(
                (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod)
            ),
        )

        if self.parameterization == "eps":
            lvlb_weights = self.betas**2 / (
                2
                * self.posterior_variance
                * to_torch(alphas)
                * (1 - self.alphas_cumprod)
            )
        elif self.parameterization == "x0":
            lvlb_weights = (
                0.5
                * np.sqrt(torch.Tensor(alphas_cumprod))
                / (2.0 * 1 - torch.Tensor(alphas_cumprod))
            )
        else:
            raise NotImplementedError("mu not supported")
        # TODO how to choose this term
        lvlb_weights[0] = lvlb_weights[1]
        self.register_buffer("lvlb_weights", lvlb_weights, persistent=False)
        assert not torch.isnan(self.lvlb_weights).all()

    @contextmanager
    def ema_scope(self, context=None):
        if self.use_ema:
            self.denoiser_ema.store(self.denoiser.parameters())
            self.denoiser_ema.copy_to(self.denoiser)
            if context is not None:
                print(f"{context}: Switched to EMA weights")
        try:
            yield None
        finally:
            if self.use_ema:
                self.denoiser_ema.restore(self.denoiser.parameters())
                if context is not None:
                    print(f"{context}: Restored training weights")

    def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
        sd = torch.load(path, map_location="cpu")
        if "state_dict" in list(sd.keys()):
            sd = sd["state_dict"]
        keys = list(sd.keys())
        for k in keys:
            for ik in ignore_keys:
                if k.startswith(ik):
                    print("Deleting key {} from state_dict.".format(k))
                    del sd[k]
        missing, unexpected = (
            self.load_state_dict(sd, strict=False)
            if not only_model
            else self.denoiser.load_state_dict(sd, strict=False)
        )
        print(
            f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
        )
        if len(missing) > 0:
            print(f"Missing Keys: {missing}")
        if len(unexpected) > 0:
            print(f"Unexpected Keys: {unexpected}")

    def q_mean_variance(self, x_start, t):
        """
        Get the distribution q(x_t | x_0).
        :param x_start: the [N x C x ...] tensor of noiseless inputs.
        :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
        :return: A tuple (mean, variance, log_variance), all of x_start's shape.
        """
        mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
        variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
        log_variance = extract_into_tensor(
            self.log_one_minus_alphas_cumprod, t, x_start.shape
        )
        return mean, variance, log_variance

    def predict_start_from_noise(self, x_t, t, noise):
        return (
            extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
            - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
            * noise
        )

    def q_posterior(self, x_start, x_t, t):
        posterior_mean = (
            extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
            + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
        )
        posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
        posterior_log_variance_clipped = extract_into_tensor(
            self.posterior_log_variance_clipped, t, x_t.shape
        )
        return posterior_mean, posterior_variance, posterior_log_variance_clipped

    def p_mean_variance(self, x, t, clip_denoised: bool, **kwargs):
        pred_noise = self.denoiser(x, t, **kwargs)
        if self.parameterization == "eps":
            x_recon = self.predict_start_from_noise(x, t=t, noise=pred_noise)
        elif self.parameterization == "x0":
            x_recon = pred_noise
        if clip_denoised:
            x_recon.clamp_(-1.0, 1.0)

        model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
            x_start=x_recon, x_t=x, t=t
        )
        return model_mean, posterior_variance, posterior_log_variance

    # @torch.no_grad()
    def p_sample(self, x, t, clip_denoised=True, **kwargs):
        b = x.shape[0]
        model_mean, _, model_log_variance = self.p_mean_variance(
            x=x, t=t, clip_denoised=clip_denoised, **kwargs
        )
        noise = torch.randn_like(x, device=x.device)
        # no noise when t == 0
        nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
        return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise

    @torch.no_grad()
    def p_sample_loop(self, x_noisy: torch.Tensor, return_intermediates=False):
        device = self.betas.device
        b = x_noisy.shape[0]
        img = x_noisy.detach().clone()
        intermediates = [img]
        for i in tqdm(
            reversed(range(0, self.num_timesteps)),
            desc="Sampling t",
            total=self.num_timesteps,
        ):
            img = self.p_sample(
                img,
                torch.full((b,), i, device=device, dtype=torch.long),
                clip_denoised=self.clip_denoised,
            )
            if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
                intermediates.append(img)
        if return_intermediates:
            return img, intermediates
        return img

    def q_sample(self, x_start, t, noise=None):
        """
        use reparametrization trick to sample from q(x_t | x_0)
        x_t = sqrt(alpha_t) * x_0 + sqrt(1 - alpha_t) * noise
        """
        noise = default(noise, lambda: torch.randn_like(x_start, device=x_start.device))
        return (
            extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
            + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
            * noise
        )

    def get_loss(self, pred, target, mean=True):
        if self.loss_type == "l1":
            loss = (target - pred).abs()
            if mean:
                loss = loss.mean()
        elif self.loss_type == "l2":
            if mean:
                loss = torch.nn.functional.mse_loss(target, pred)
            else:
                loss = torch.nn.functional.mse_loss(target, pred, reduction="none")
        else:
            raise NotImplementedError("unknown loss type '{loss_type}'")

        return loss

    def p_losses(self, x_start, t, noise=None, **kwargs):
        pred_noise = self.denoise(x_start, t, noise)

        loss_dict = {}
        if self.parameterization == "eps":
            target = noise
        elif self.parameterization == "x0":
            target = x_start
        else:
            raise NotImplementedError(
                f"Paramterization {self.parameterization} not yet supported"
            )

        loss = self.get_loss(pred_noise, target, mean=False).mean(dim=[1, 2, 3])

        log_prefix = "train" if self.training else "val"

        loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()})
        loss_simple = loss.mean() * self.l_simple_weight

        loss_vlb = (self.lvlb_weights[t] * loss).mean()
        loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb})

        loss = loss_simple + self.original_elbo_weight * loss_vlb

        loss_dict.update({f"{log_prefix}/loss": loss})

        return loss, loss_dict

    def loss_rec(self, x_start, pred_noise, t, noise=None):
        if self.parameterization == "eps":
            target = noise
        elif self.parameterization == "x0":
            target = x_start
        else:
            raise NotImplementedError(
                f"Paramterization {self.parameterization} not yet supported"
            )

        loss = self.get_loss(pred_noise, target, mean=False).mean(dim=[1, 2, 3])

        loss_simple = loss.mean() * self.l_simple_weight

        loss_vlb = (self.lvlb_weights[t] * loss).mean()

        loss = loss_simple + self.original_elbo_weight * loss_vlb

        return loss

    def forward(self, batch, *args, **kwargs):
        # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
        # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
        x = self.get_input(batch, self.first_stage_key)
        t = torch.randint(
            0, self.num_timesteps, (x.shape[0],), device=self.device
        ).long()
        return self.p_losses(x, t, *args, **kwargs)

    def get_input(self, batch, keys):
        output = []
        for k in keys:
            if k in batch:
                x = batch[k]
                if len(x.shape) == 3:
                    x = x[..., None].to(memory_format=torch.contiguous_format).float()
                elif len(x.shape) == 4:
                    x = x.to(memory_format=torch.contiguous_format).float()
                else:
                    x = x.to(memory_format=torch.contiguous_format).float()
                output.append(x)
        return torch.cat(output, dim=1).to(self.device).permute(0, 3, 1, 2)

    def _get_rows_from_list(self, samples):
        n_imgs_per_row = len(samples)
        denoise_grid = rearrange(samples, "n b c h w -> b n c h w")
        denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w")
        denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
        return denoise_grid

    @torch.no_grad()
    def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
        log = dict()
        x = self.get_input(batch, self.first_stage_key)
        N = min(x.shape[0], N)
        n_row = min(x.shape[0], n_row)
        x = x.to(self.device)[:N]
        log["inputs"] = x

        # get diffusion row
        diffusion_row = list()
        x_start = x[:n_row]

        for t in range(self.num_timesteps):
            if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
                t = torch.tensor([t] * n_row)
                t = t.to(self.device).long()
                noise = torch.randn_like(x_start, device=x_start.device)
                x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
                diffusion_row.append(x_noisy)

        log["diffusion_row"] = self._get_rows_from_list(diffusion_row)

        if sample:
            # get denoise row
            samples, denoise_row = self.p_sample_loop(noise, return_intermediates=True)

            log["samples"] = samples
            log["denoise_row"] = self._get_rows_from_list(denoise_row)

        if return_keys:
            if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
                return log
            else:
                return {key: log[key] for key in return_keys}
        return log


class DDPM(GenerizedDDPM):
    def __init__(
        self,
        *,
        denoiser_config,
        scheduler_config,
        timesteps=1000,
        beta_schedule="linear",
        monitor="val/loss",
        loss_type="l2",
        ckpt_path=None,
        ignore_keys=[],
        load_only_unet=False,
        use_ema=True,
        first_stage_key="image",
        image_size: tuple = (256, 256),
        channels=3,
        log_every_t=100,
        clip_denoised=True,
        linear_start=0.0001,
        linear_end=0.02,
        cosine_s=0.008,
        given_betas=None,
        original_elbo_weight=0,
        v_posterior=0,
        l_simple_weight=1,
        parameterization="eps",
        use_positional_encodings=False,
        learn_logvar=False,
        logvar_init=0,
        learning_rate=5e-5,
        automatic_optimization=True,
        **kwargs,
    ):
        super().__init__(
            denoiser_config=denoiser_config,
            timesteps=timesteps,
            beta_schedule=beta_schedule,
            loss_type=loss_type,
            ckpt_path=ckpt_path,
            ignore_keys=ignore_keys,
            load_only_unet=load_only_unet,
            use_ema=use_ema,
            monitor=monitor,
            first_stage_key=first_stage_key,
            image_size=image_size,
            channels=channels,
            log_every_t=log_every_t,
            clip_denoised=clip_denoised,
            linear_start=linear_start,
            linear_end=linear_end,
            cosine_s=cosine_s,
            given_betas=given_betas,
            original_elbo_weight=original_elbo_weight,
            v_posterior=v_posterior,
            l_simple_weight=l_simple_weight,
            parameterization=parameterization,
            use_positional_encodings=use_positional_encodings,
            learn_logvar=learn_logvar,
            logvar_init=logvar_init,
            automatic_optimization=automatic_optimization,
            **kwargs,
        )
        self.scheduler_config = scheduler_config
        self.learning_rate = learning_rate

    def p_losses(self, x_start, t, noise=None):
        noise = default(noise, lambda: torch.randn_like(x_start, device=x_start.device))
        x_noisy, pred_noise = self.denoise(x_start, t, noise)

        loss_dict = {}
        if self.parameterization == "eps":
            target = noise
        elif self.parameterization == "x0":
            target = x_start
        else:
            raise NotImplementedError(
                f"Paramterization {self.parameterization} not yet supported"
            )

        loss = self.get_loss(pred_noise, target, mean=False).mean(dim=[1, 2, 3])

        log_prefix = "train" if self.training else "val"

        loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()})
        loss_simple = loss.mean() * self.l_simple_weight

        loss_vlb = (self.lvlb_weights[t] * loss).mean()
        loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb})

        loss = loss_simple + self.original_elbo_weight * loss_vlb

        loss_dict.update({f"{log_prefix}/loss": loss})

        return loss, loss_dict

    def denoise(self, x_start, t, noise=None):
        x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
        pred_noise = self.denoiser(x_noisy, t)
        return x_noisy, pred_noise

    @torch.no_grad()
    def test_step(self, batch, batch_idx):
        log = {}
        x_start = self.get_input(batch, self.first_stage_key)
        noise = torch.randn_like(x_start, device=x_start.device)
        t = (
            torch.randint(0, self.num_timesteps, (1,), device=self.device)
            .long()
            .repeat(x_start.shape[0])
        )
        x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
        samples = self.p_sample_loop(noise)
        log["samples"] = samples
        log["inputs"] = x_start
        return log

    def training_step(self, batch, batch_idx):
        loss, loss_dict = self(batch)

        opt = self.optimizers()
        opt.zero_grad()
        loss.backward()
        opt.step()

        self.log_dict(
            loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True
        )

        self.log(
            "global_step",
            self.global_step,
            prog_bar=False,
            logger=True,
            on_step=True,
            on_epoch=True,
        )

    @torch.no_grad()
    def validation_step(self, batch, batch_idx):
        _, loss_dict_no_ema = self(batch)
        with self.ema_scope():
            _, loss_dict_ema = self(batch)
            loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema}
        self.log_dict(
            loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True
        )
        self.log_dict(
            loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True
        )

    def on_train_batch_end(self, *args, **kwargs):
        if self.use_ema:
            self.denoiser_ema(self.denoiser)

    def on_train_epoch_end(self, *args, **kwargs):
        if self.scheduler_config:
            lr = self.lr_schedulers()
            lr.step()

    def configure_optimizers(self):
        lr = self.learning_rate
        params = list(self.denoiser.parameters())
        if self.learn_logvar:
            params = params + [self.logvar]
        opt = torch.optim.AdamW(params, lr=lr)
        if self.scheduler_config is not None:
            scheduler = torch.optim.lr_scheduler.ExponentialLR(
                opt, **self.scheduler_config
            )
            print(f"Setting up {scheduler.__class__} scheduler...")
            return [opt], [scheduler]
        return opt


class ConditionDDPM(GenerizedDDPM):
    def __init__(
        self,
        *,
        denoiser_config,
        scheduler_config,
        timesteps=1000,
        beta_schedule="linear",
        monitor="val/loss",
        loss_type="l2",
        ckpt_path=None,
        ignore_keys=[],
        load_only_unet=False,
        use_ema=True,
        first_stage_key="image",
        image_size: tuple = (256, 256),
        channels=3,
        log_every_t=100,
        clip_denoised=True,
        linear_start=0.0001,
        linear_end=0.02,
        cosine_s=0.008,
        given_betas=None,
        original_elbo_weight=0,
        v_posterior=0,
        l_simple_weight=1,
        parameterization="eps",
        use_positional_encodings=False,
        learn_logvar=False,
        logvar_init=0,
        learning_rate=5e-5,
        automatic_optimization=True,
        **kwargs,
    ):
        super().__init__(
            denoiser_config=denoiser_config,
            timesteps=timesteps,
            beta_schedule=beta_schedule,
            loss_type=loss_type,
            ckpt_path=ckpt_path,
            ignore_keys=ignore_keys,
            load_only_unet=load_only_unet,
            use_ema=use_ema,
            monitor=monitor,
            first_stage_key=first_stage_key,
            image_size=image_size,
            channels=channels,
            log_every_t=log_every_t,
            clip_denoised=clip_denoised,
            linear_start=linear_start,
            linear_end=linear_end,
            cosine_s=cosine_s,
            given_betas=given_betas,
            original_elbo_weight=original_elbo_weight,
            v_posterior=v_posterior,
            l_simple_weight=l_simple_weight,
            parameterization=parameterization,
            use_positional_encodings=use_positional_encodings,
            learn_logvar=learn_logvar,
            logvar_init=logvar_init,
            automatic_optimization=automatic_optimization,
            **kwargs,
        )
        self.scheduler_config = scheduler_config
        self.learning_rate = learning_rate

    # @torch.no_grad()
    def p_sample(self, x, t, cond, clip_denoised=True, **kwargs):
        b = x.shape[0]
        model_mean, _, model_log_variance = self.p_mean_variance(
            x=x, t=t, clip_denoised=clip_denoised, **kwargs
        )
        noise = torch.randn_like(x, device=x.device)
        # no noise when t == 0
        nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
        return (
            model_mean
            + cond * noise
            + nonzero_mask * (0.5 * model_log_variance).exp() * noise
        )

    @torch.no_grad()
    def p_sample_loop(self, x_noisy: torch.Tensor, cond, return_intermediates=False):
        device = self.betas.device
        b = x_noisy.shape[0]
        img = x_noisy.detach().clone()
        intermediates = [img]
        for i in tqdm(
            reversed(range(0, self.num_timesteps)),
            desc="Sampling t",
            total=self.num_timesteps,
        ):
            img = self.p_sample(
                img,
                torch.full((b,), i, device=device, dtype=torch.long),
                cond,
                clip_denoised=self.clip_denoised,
            )
            if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
                intermediates.append(img)
        if return_intermediates:
            return img, intermediates
        return img

    @torch.no_grad()
    def test_step(self, batch, batch_idx, s=0.04):
        log = {}
        x_start = self.get_input(batch, self.first_stage_key)
        noise = torch.randn_like(x_start, device=x_start.device)
        # t = (
        #     torch.randint(0, self.num_timesteps, (1,), device=self.device)
        #     .long()
        #     .repeat(x_start.shape[0])
        # )
        # x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
        samples = self.p_sample_loop(noise, cond=s * x_start)
        log["samples"] = samples
        log["inputs"] = x_start
        return log
