from models.generic_ddpm import GenerizedDDPM
import torch
from torch.nn import functional as F
from einops import repeat
import numpy as np
from modules.blocks.openaimodels import UNet
from utils.util import default, extract_into_tensor


class WollebUNet(UNet):
    def forward(self, x, *args, **kwargs):
        h = super().forward(x, *args, **kwargs)
        # h = F.softmax(h, dim=1)
        return h


class WollebDDPM(GenerizedDDPM):
    def __init__(
        self,
        *,
        denoiser_config,
        ckpt_path=None,
        ignore_keys=[],
        first_stage_key,
        monitor="val/loss",
        use_ema=True,
        log_every_t=100,
        clip_denoised=True,
        scheduler_config=None,
        logvar_init=0,
        automatic_optimization=True,
        **kwargs,
    ):
        super().__init__(
            denoiser_config=denoiser_config,
            ckpt_path=ckpt_path,
            ignore_keys=ignore_keys,
            monitor=monitor,
            first_stage_key=first_stage_key,
            use_ema=use_ema,
            log_every_t=log_every_t,
            clip_denoised=clip_denoised,
            scheduler_config=scheduler_config,
            logvar_init=logvar_init,
            **kwargs,
        )
        self.automatic_optimization = automatic_optimization

    def get_input(self, batch, keys):
        output = []
        for k in keys:
            if k in batch:
                x = batch[k]
                if len(x.shape) == 3:
                    x = (
                        x[..., None]
                        .permute(0, 3, 1, 2)
                        .to(memory_format=torch.contiguous_format)
                        .float()
                    )
                elif len(x.shape) == 4:
                    x = (
                        x.permute(0, 3, 1, 2)
                        .to(memory_format=torch.contiguous_format)
                        .float()
                    )
                else:
                    x = x.to(memory_format=torch.contiguous_format).float()
                output.append(x)
        return torch.cat(output, dim=1)

    def get_loss(self, pred: torch.Tensor, target: torch.Tensor, mean=True):
        pred = pred[:, -1:]
        target = target[:, -1:]
        # target = torch.cat([target[:, -1:], -target[:, -1:]], dim=1)
        if self.loss_type == "bce":
            target = (target + 1) / 2
            dice_loss = 1 - (pred * target) / (
                pred.sum(dim=[1, 2, 3], keepdim=True)
                + target.sum(dim=[1, 2, 3], keepdim=True)
            )
            bce_loss = F.binary_cross_entropy_with_logits(
                pred, target, reduction="none"
            )
            loss = dice_loss + bce_loss
        elif self.loss_type == "l2":
            loss = F.mse_loss(target, pred, reduction="none")
        elif self.loss_type == "l1":
            loss = F.smooth_l1_loss(target, pred, reduction="none")
        else:
            raise NotImplementedError(f"unknown loss type '{self.loss_type}'")
        if mean:
            return loss.mean()
        return loss

    def predict_start_from_noise(self, x_t, t, noise):
        output = x_t.clone()
        output[:, -1:] = (
            extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t[:, -1:].shape)
            * x_t[:, -1:]
            - extract_into_tensor(
                self.sqrt_recipm1_alphas_cumprod, t, x_t[:, -1:].shape
            )
            * noise[:, -1:]
        )
        return output

    def q_sample(self, x_start, t, noise=None):
        """
        use reparametrization trick to sample from q(x_t | x_0)
        x_t = sqrt(alpha_t) * x_0 + sqrt(1 - alpha_t) * noise
        """
        noise = default(noise, lambda: self.noiser(x_start))
        output = x_start.clone()
        output[:, -1:] = (
            extract_into_tensor(self.sqrt_alphas_cumprod, t, noise[:, -1:].shape)
            * x_start[:, -1:]
            + extract_into_tensor(
                self.sqrt_one_minus_alphas_cumprod, t, noise[:, -1:].shape
            )
            * noise[:, -1:]
        )
        return output

    def predict_start_from_noise(self, x_t, t, noise):
        output = x_t.clone()
        output[:, -1:] = (
            extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t[:, -1:].shape)
            * x_t[:, -1:]
            - extract_into_tensor(
                self.sqrt_recipm1_alphas_cumprod, t, x_t[:, -1:].shape
            )
            * noise[:, -1:]
        )
        return output

    def q_posterior(self, x_start, x_t, t):
        posterior_mean = x_t.clone()
        posterior_mean[:, -1:] = (
            extract_into_tensor(self.posterior_mean_coef1, t, x_t[:, -1:].shape)
            * x_start[:, -1:]
            + extract_into_tensor(self.posterior_mean_coef2, t, x_t[:, -1:].shape)
            * x_t[:, -1:]
        )
        posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
        posterior_log_variance_clipped = extract_into_tensor(
            self.posterior_log_variance_clipped, t, x_t.shape
        )
        return posterior_mean, posterior_variance, posterior_log_variance_clipped

    def training_step(self, batch, batch_idx):
        loss, loss_dict = self.shared_step(batch)

        self.log_dict(
            loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True
        )
        opt = self.optimizers()
        opt.zero_grad()
        loss.backward()
        opt.step()
        return loss

    def test_step(self, batch, batch_idx):
        x = self.get_input(batch, self.first_stage_key)
        t = torch.randint(
            0, self.num_timesteps, (x.shape[0],), device=self.device
        ).long()
        x_start = x.clone()
        x_start[:, -1] = torch.randn_like(x_start[:, -1])
        samples = self.sample(img=x_start, return_intermediates=False)
        samples = self.log_samples(samples)
        self.logger.experiment.add_image("sample", samples, dataformats="NCHW")

    def on_train_epoch_end(self, *args, **kwargs):
        if self.scheduler_config is not None:
            scheduler = self.lr_schedulers()
            scheduler.step()

    def log_samples(self, samples):
        """
        对单幅不一定是3通道的采样“图像”处理，将其转换为标准情况下三通道的输出图像
        Arguments:
          sample (torch.Tensor) : 4通道的带有输入图像和分割模板的拼合
        """
        x = samples[:, :2]
        seg = samples[:, -1:]
        return torch.cat([x, seg], dim=1)

    # @rank_zero_only
    @torch.no_grad()
    def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
        log = dict()
        x_seg = self.get_input(batch, self.first_stage_key)
        N = min(x_seg.shape[0], N)
        n_row = min(x_seg.shape[0], n_row)
        x_seg = x_seg.to(self.device)[:N]
        log["inputs"] = self.log_samples(x_seg)

        # get diffusion row
        x_start = x_seg[:n_row]
        diffusion_row = []

        for t in range(self.num_timesteps):
            if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
                t = repeat(torch.tensor([t]), "1 -> b", b=n_row).to(self.device).long()
                x_noisy = self.q_sample(x_start=x_start, t=t)
                x_noisy = self.log_samples(x_noisy)
                diffusion_row.append(x_noisy)

        log["diffusion_row"] = self._get_rows_from_list(diffusion_row)

        if sample:
            # get denoise row
            with self.ema_scope("Plotting"):
                x = x_start.clone()
                x[:, -1] = torch.randn_like(x[:, -1])
                samples, denoise_row = self.sample(img=x, return_intermediates=True)

            log["samples"] = self.log_samples(samples)
            log["denoise_row"] = self._get_rows_from_list(denoise_row)

        if return_keys:
            if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
                return log
            else:
                return {key: log[key] for key in return_keys}
        return log

    def noiser(self, x_start):
        noise = torch.randn_like(x_start)
        noise[:, :3, ...] = 0
        return noise
