# ppgddpm的实际扩散模型
# 在此类中需完成前向扩散、反向扩散、计算损失的整个过程

from utils.ddpm_utils.gaussian_diffusion import *
from utils.ddpm_utils.gaussian_diffusion import _extract_into_tensor, _WrappedModel
import torch
from utils import logger
from PIL import Image
import numpy as np
import torch.fft as fft
import matplotlib.pyplot as plt
from astropy.convolution import convolve_fft

class GaussianDiffusion(GaussianDiffusion):
    """
    :param beta_scale: float, to scale the variance.
    :param alphas: a 1-D numpy array of alphas for each diffusion timestep, starting at 1 and going to T.
    :param betas: a 1-D numpy array of betas for each diffusion timestep, starting at 1 and going to T.
    :param diffusion_type: a DiffusionType determing which diffusion model is used.
    :param model_mean_type: a ModelMeanType determining what the model outputs.
    :param model_var_type: a ModelVarType determining how variance is output.
    :param predictor_type: a PredictorType determing which predictor is used.
    :param corrector_type: a CorrectorType determing which corrector is used.
    :param loss_type: a LossType determining the loss function to use.
    :param rescale_timesteps: if True, pass floating point timesteps into the model so that they are always scaled like
        in the original paper (0 to 1000).
    :param sampling_kwargs: hyper-paramters used for predictor or corrector.
    """

    def __init__(self, beta_scale, *args, **kwargs):

        # ususally it is a [N x 2 x ...] tensor of 0-1 values indicate the under-sampled position.

        self.beta_scale = beta_scale
        super().__init__(*args, **kwargs)

        self.step = 0

    def q_sample(self, x_start, t):
        """
        Diffuse the data for a given number of diffusion steps. In other words, sample from q(x_t | x_0).

        :param x_start: the initial data batch, img for under-sampled part.
        :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
        :return: A noisy version of x_start (under-sampled position), img for under-sampled part, and noise.


        """
        x_t, noise = super().q_sample(x_start, t)

        return x_t, noise

    def q_posterior_mean_variance(self, x_start, x_t, t):
        """
        Compute the mean and variance of the diffusion posterior: q(x_{t-1} | x_t, x_0).

        """
        posterior_mean, posterior_variance, posterior_log_variance_clipped = (
            super().q_posterior_mean_variance(x_start, x_t, t)
        )
        return (

            posterior_mean,
            posterior_variance,
            posterior_log_variance_clipped
        )

    def p_eps_std(self, model, x_t, t, model_kwargs):
        """
        Apply the model to compute "epsilon" item and std parameter in predictor or corrector.

        :param model: the model, which takes a signal and a batch of timesteps as input.
        :param x_t: the [N x C x ...] tensor at time t, img for under-sampled part.
        :param t: a 1-D Tensor of timesteps.
        :return: (eps, std), eps and std only contain under-sampled part, sampled-part is all 0.

        """
        assert model_kwargs is not None, "model_kwargs contains the condtions"
        eps, std = super().p_eps_std(model, x_t, t, model_kwargs=model_kwargs)
        return eps, std

    def ddim_predictor(self, model, x_t, t, model_kwargs, clip=False):
        """
        DDIM-Predictor

        :param model: the model to sample from.
        :param x_t: the current tensor at x_t, img for under-sampled part.
        :param t: the value of t, starting at T for the first diffusion step.
        :param clip: if True, clip the x_start prediction to [-1, 1].
        :return: a random sample from the model, img for under-sampled part.

        """
        if self.sampling_kwargs is None:
            eta = 0.0
        else:
            assert "eta" in self.sampling_kwargs.keys(), "in ddim-predictor, eta is a hyper-parameter"
            eta = self.sampling_kwargs["eta"]
        assert model_kwargs is not None, "model_kwargs contains the condtions"
        eps, std = self.p_eps_std(model, x_t, t, model_kwargs=model_kwargs)

        # compute model mean
        pred_xstart = _extract_into_tensor(self.recip_bar_alphas, t, x_t.shape) * \
                      (x_t - _extract_into_tensor(self.bar_betas, t, x_t.shape) * eps)
        pred_xstart = self._clip(pred_xstart, clip=clip)

        eps = (x_t - _extract_into_tensor(self.bar_alphas, t, x_t.shape) * pred_xstart) / \
              _extract_into_tensor(self.bar_betas, t, x_t.shape)

        # this code is according to guided-diffusion code
        bar_alpha = _extract_into_tensor(self.bar_alphas_square, t, x_t.shape)
        bar_alpha_prev = _extract_into_tensor(self.bar_alphas_square_prev, t, x_t.shape)
        # we only modifies this line, others are kept unchanged or not important.
        sigma = eta * th.sqrt((1 - bar_alpha_prev) / (1 - bar_alpha)) \
                * th.sqrt(1 - bar_alpha / bar_alpha_prev)

        mean_pred = pred_xstart * th.sqrt(bar_alpha_prev) + th.sqrt(1 - bar_alpha_prev - sigma ** 2) * eps

        noise = th.randn_like(x_t)
        nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x_t.shape) - 1)))  # no noise when t == 0
        sample = mean_pred + nonzero_mask * sigma * noise

        return sample

    def sample_loop(self, model, shape, img, model_kwargs, clip=False, noise=None):
        """
        Generate samples from the model. When noise is not sampled from N(0, 1), it should be not None.

        :param model: the model module.
        :param shape: the shape of the samples, (N, C, H, W).
        :param clip: if True, clip x_start predictions to [-1, 1].
        :param noise: if specified, the noise from the encoder to sample. Should be of the same shape as `shape`.
        :return: a non-differentiable batch of samples
        """
        device = dev() # cpu
        assert isinstance(shape, (tuple, list))

        # 确保model_kwargs不为空，因为它包含了psf等信息
        assert model_kwargs is not None, "model_kwargs contains the conditions"

        img = img.to(device)

        # 添加：记录原始图像的最大亮度值，并对输入图像进行归一化处理
        img_max = img.max()  # 记录原始图像的最大亮度
        img_input = 2 * (img / img_max) - 1  # 将图像亮度归一化到[-1, 1]

        indices = list(range(500))[::-1]

        if self.predictor_type == PredictorType.DDPM:
            predictor = self.ddpm_predictor
        elif self.predictor_type == PredictorType.DDIM:
            predictor = self.ddim_predictor
        else:
            raise NotImplementedError(self.predictor_type)

        for i in indices:
            t = th.tensor([i] * shape[0], device=device)
            with th.no_grad():
                img_input = predictor(model, img_input, t, model_kwargs=model_kwargs, clip=True)
            
            # 可选：加入debug信息
            if torch.isnan(img_input).any():
                print(f"NaN detected at t={i}")
                break

        # 恢复原始图像的亮度范围
        img_output = (img_input + 1) / 2 * img_max

        return img_output
    
    def ddpm_predictor(self, model, x_t, t, model_kwargs=None, clip=False):
        assert self.sampling_kwargs is None, "in ddpm-predictor, no hyper-parameter"

        # eps 预测值 std 标准差
        eps, std = self.p_eps_std(model, x_t, t, model_kwargs=model_kwargs)

        # 计算后验分布的均值
        pred_xstart = _extract_into_tensor(self.recip_bar_alphas, t, x_t.shape) * \
                      (x_t - _extract_into_tensor(self.bar_betas, t, x_t.shape) * eps)
        pred_xstart = self._clip(pred_xstart, clip=clip)
        model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x_t, t=t)

        # print(f"[debug] x_t range:[{x_t.min()}, {x_t.max()}]"
        #       f" eps range:[{eps.min()}, {eps.max()}]"
        #       f" pred_xstart range:[{pred_xstart.min()}, {pred_xstart.max()}]")

        # 计算x_{t-1}
        noise = th.randn_like(x_t)
        nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x_t.shape) - 1)))  # no noise when t == 0
        sample = model_mean + nonzero_mask * std * noise

        return sample

    def save_img(self, img, step):
        # 将张量从 GPU 移动到 CPU，并转换为 NumPy 数组
        img_np = img.detach().cpu().numpy()

        # 调整数据范围到0-255
        img_tensor = ((img_np + 1) / 2) * 255  # 先缩放到[0, 255]范围
        img_tensor = np.clip(img_tensor, 0, 255).astype(np.uint8)  # 替代 clamp

        # 移除颜色通道维度 (假设输入是 [1, 1, H, W])
        img_tensor = img_tensor.squeeze()  # 变成 [H, W]

        # 计算图像显示范围（去除极端值影响）
        vmin = np.nanpercentile(img_tensor, 1)
        vmax = np.nanpercentile(img_tensor, 99)

        output_png = f'output/x_0_theta_{step}.png'

        # 创建图像并保存
        plt.figure(figsize=(10, 8))
        plt.imshow(img_tensor, cmap='viridis', origin='lower', vmin=vmin, vmax=vmax)
        plt.title(f'Predict Image {step}')
        plt.xlabel('X Pixel')
        plt.ylabel('Y Pixel')
        plt.colorbar(label='Intensity')
        plt.tight_layout()
        plt.savefig(output_png, dpi=150)
        plt.close()

    def conv_fft(self, x, psf):
        B, C, H, W = x.shape
        _, _, h, w = psf.shape

        # 1. 将 PSF 中心移到 (0,0) 并填充到 (H, W)
        psf = torch.fft.fftshift(psf, dim=(-2, -1))  # 中心对齐
        psf_pad = torch.zeros(B, C, H, W, device=x.device, dtype=x.dtype)
        pad_h = (H - h) // 2
        pad_w = (W - w) // 2
        psf_pad[:, :, pad_h:pad_h+h, pad_w:pad_w+w] = psf

        # 2. ✅ 关键：对填充后的 PSF 再次归一化，使其 sum=1
        psf_pad = psf_pad / (psf_pad.sum(dim=(-1,-2), keepdim=True) + 1e-8)

        # 3. 使用正交归一化 FFT
        X = torch.fft.fft2(x, norm='ortho')
        H = torch.fft.fft2(psf_pad, norm='ortho')
        Y = X * H
        y = torch.fft.ifft2(Y, norm='ortho').real

        return y

    def training_losses(self, model, x_dirty, t, model_kwargs=None):
        assert model_kwargs is not None, "model_kwargs contains the psf"
        psf = model_kwargs["psf"]
        dirty_max = x_dirty.max()  # 原始脏图的最大亮度
        # 1. 脏图归一化到[-1, 1]
        x_dirty_input = 2 * (x_dirty / dirty_max) - 1  # [-1, 1]

        assert not torch.isnan(x_dirty).any()
        assert not torch.isinf(x_dirty).any()

        x_t, noise = self.q_sample(x_dirty_input, t)

        terms = {}

        if self.loss_type == LossType.MSE:
            eps_theta = model(x_t, self._scale_timesteps(t), **model_kwargs)

            sqrt_alpha_bar_t = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape)
            sqrt_one_minus_alpha_bar_t = _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape)

            x_0_theta = (x_t - sqrt_one_minus_alpha_bar_t * eps_theta) / sqrt_alpha_bar_t
            x_0_theta = x_0_theta * dirty_max   # 恢复原始亮度
            # x_0_theta = self.normalized(x_0_theta, str(self.step) + " x_0_theta")  # 归一化

            if self.step % 100 == 0:
                self.save_img(x_0_theta, self.step)

            if self.model_mean_type == ModelMeanType.EPSILON:
                if self.model_var_type == ModelVarType.LEARNED:
                    assert False, "code of learned variance has not been completed"
                else:
                    model_output = eps_theta
            elif self.model_mean_type == ModelMeanType.SCORE:
                assert False, "code of score estimation has not been completed"
            else:
                raise NotImplementedError(self.model_mean_type)

            assert x_0_theta.shape == noise.shape == x_dirty.shape == psf.shape
            
            # ---- 物理引导损失 Physical Loss ---- #
            # 将预测的干净图像与 PSF 卷积，模拟成脏图
            # 将张量转换为 CPU 上的 numpy 数组再进行卷积
            x_0_theta_cpu = x_0_theta.detach().cpu().numpy()
            psf_cpu = psf.detach().cpu().numpy()

            # 在 CPU 上执行卷积
            x_recon_psf_np = convolve_fft(x_0_theta_cpu, psf_cpu, 
                                        boundary='fill', fill_value=0.0,
                                        nan_treatment='interpolate', 
                                        normalize_kernel=True)

            # 转换回张量并移回 GPU（如果需要）
            x_recon_psf = torch.from_numpy(x_recon_psf_np).to(x_0_theta.device)
            # x_recon_psf = self.conv_fft(x_0_theta, psf)

            # 在 forward 中加入 debug 打印
            print("[debug]")
            print(f"x_dirty mean: {x_dirty.mean():.4f}, std: {x_dirty.std():.4f}")
            print(f"psf mean: {psf.mean():.4f}, std: {psf.std():.4f}, sum: {psf.sum():.4f}")
            print(f"x_0_theta mean: {x_0_theta.mean():.4f}, std: {x_0_theta.std():.4f}")
            print(f"x_recon_psf mean: {x_recon_psf.mean():.4f}, std: {x_recon_psf.std():.4f}")

            # x_recon_psf = self.normalized(x_recon_psf, str(self.step) + " x_recon_psf")  # 归一化
            physical_loss = torch.nn.functional.l1_loss(x_recon_psf, x_dirty)

            diffusion_loss = mean_flat((eps_theta - noise) ** 2).mean()

            # ---- 总损失 ---- #
            lambda_1 = 0.1  # 可调参数，物理损失权重
            lambda_2 = 0.9  # 可调参数，扩散损失权重
            loss = lambda_1 * physical_loss + lambda_2 * diffusion_loss
            # 记录损失到TensorBoard
            try:
                from torch.utils.tensorboard import SummaryWriter
                # 如果还没有创建writer，创建一个
                if not hasattr(self, 'writer'):
                    self.writer = SummaryWriter(log_dir='runs/ppgddpm_experiment')
                
                # 记录损失曲线
                self.writer.add_scalar('Loss/physical_loss', physical_loss.item(), self.step)
                self.writer.add_scalar('Loss/diffusion_loss', diffusion_loss.item(), self.step)
                self.writer.add_scalar('Loss/total_loss', loss.item(), self.step)
                
                # 记录权重
                self.writer.add_scalar('Weights/lambda_1', lambda_1, self.step)
                self.writer.add_scalar('Weights/lambda_2', lambda_2, self.step)

                # 每100步刷新一次，确保数据及时写入
                if self.step % 100 == 0:
                    self.writer.flush()
            except ImportError:
                print("tensorboard not installed")
                pass  # 如果没有安装tensorboard，则跳过
            logger.log(f"physical_loss: {physical_loss}, diffusion_loss: {diffusion_loss}, loss: {loss}")

            terms["mse"] = loss
            terms["loss"] = loss

        else:
            raise NotImplementedError(self.loss_type)
        
        self.step += 1

        return terms


class SpacedDiffusion(GaussianDiffusion):
    """
    :param use_timesteps: a collection (sequence or set) of timesteps from the
                          original diffusion process to retain.
    :param kwargs: the kwargs to create the base diffusion process.

    Note: the code is the same to class SpacedDiffusion.
    """
    def __init__(self, use_timesteps, **kwargs):
        self.use_timesteps = set(use_timesteps)
        self.timestep_map = []
        self.original_num_steps = len(kwargs["betas"])

        base_diffusion = GaussianDiffusion(**kwargs)  # pylint: disable=missing-kwoa
        # compute new alphas and betas
        last_bar_alpha = 1.0
        last_bar_beta = 0.0
        new_betas = []
        new_alphas = []
        for i, (bar_alpha, bar_beta) in enumerate(zip(base_diffusion.bar_alphas, base_diffusion.bar_betas)):
            if i in self.use_timesteps:
                alpha = bar_alpha / last_bar_alpha
                new_alphas.append(alpha)
                last_bar_alpha = bar_alpha
                beta = np.sqrt(bar_beta ** 2 - alpha ** 2 * last_bar_beta ** 2)
                last_bar_beta = bar_beta
                new_betas.append(beta)
                self.timestep_map.append(i)
        kwargs["alphas"] = np.array(new_alphas)
        kwargs["betas"] = np.array(new_betas)
        super().__init__(**kwargs)

    def p_eps_std(
            self, model, *args, **kwargs
    ):  # pylint: disable=signature-differs
        return super().p_eps_std(self._wrap_model(model), *args, **kwargs)

    def training_losses(
            self, model, *args, **kwargs
    ):  # pylint: disable=signature-differs
        return super().training_losses(self._wrap_model(model), *args, **kwargs)
        

    def _wrap_model(self, model):
        if isinstance(model, _WrappedModel):
            return model
        return _WrappedModel(
            model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
        )

    def _scale_timesteps(self, t):
        # Scaling is done by the wrapped model.
        return t
