# Copyright 2023 Katherine Crowson and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union

import numpy as np
import torch

from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging, randn_tensor
from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin


logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


@dataclass
class ReflowEulerSchedulerOutput(BaseOutput):
    """
    Output class for the scheduler's step function output.
    """

    prev_sample: torch.FloatTensor


class ReflowEulerScheduler(SchedulerMixin, ConfigMixin):
    """
    naive euler sampling method for reflow model 
    
    训练时候 t 实际上是连续取值，但为了保持一致性缩放到 (0,1000) 区间
    """

    # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
    order = 1

    @register_to_config 
    def __init__(
        self,
        num_train_timesteps: int = 1000,
        t_min=0.,
        t_max=1.,
        t_eps=1e-3,
        **kwargs,
        # beta_start: float = 0.0001,
        # beta_end: float = 0.02,
        # beta_schedule: str = "linear",
        # trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
        # prediction_type: str = "epsilon",
        # interpolation_type: str = "linear",
        # use_karras_sigmas: Optional[bool] = False,
        # timestep_spacing: str = "linspace",
        # steps_offset: int = 0,
    ):
        # if trained_betas is not None:
        #     self.betas = torch.tensor(trained_betas, dtype=torch.float32)
        # elif beta_schedule == "linear":
        #     self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
        # elif beta_schedule == "scaled_linear":
        #     # this schedule is very specific to the latent diffusion model.
        #     self.betas = (
        #         torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
        #     )
        # elif beta_schedule == "squaredcos_cap_v2":
        #     # Glide cosine schedule
        #     self.betas = betas_for_alpha_bar(num_train_timesteps)
        # else:
        #     raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")

        # self.alphas = 1.0 - self.betas
        # self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)

        # sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
        # sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
        # self.sigmas = torch.from_numpy(sigmas)

        # setable values
        self.num_train_timesteps= num_train_timesteps
        self.num_inference_steps = None
        # timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
        # self.timesteps = torch.from_numpy(timesteps)
        self.is_scale_input_called = False
        # self.use_karras_sigmas = use_karras_sigmas
        
        self.t_min=t_min
        self.t_max=t_max
        self.t_eps = t_eps

    @property
    def init_noise_sigma(self):
        """
        placeholder
        """
        return 1.
        # standard deviation of the initial noise distribution
        if self.config.timestep_spacing in ["linspace", "trailing"]:
            return self.sigmas.max()

        return (self.sigmas.max() ** 2 + 1) ** 0.5

    def scale_model_input(
        self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor]
    ) -> torch.FloatTensor:
        """
        placeholder
        """
        self.is_scale_input_called = True
        return sample
        if isinstance(timestep, torch.Tensor):
            timestep = timestep.to(self.timesteps.device)
        step_index = (self.timesteps == timestep).nonzero().item()
        sigma = self.sigmas[step_index]

        sample = sample / ((sigma**2 + 1) ** 0.5)

        self.is_scale_input_called = True
        return sample

    def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, generator: Optional[torch.Generator] = None,):
        """
        确定输入 unet 的 t 值和采样步长
        """
        self.num_inference_steps = num_inference_steps
        timesteps=torch.arange(self.t_min, self.t_max, (self.t_max-self.t_min)/num_inference_steps)
        # TODO : 增加选项：t=0 的时候可能需要一点小扰动防止 temb 退化 ; switch to on or off , 做对比
        timesteps = timesteps + self.t_eps * randn_tensor(timesteps.shape, generator=generator, device=timesteps.device, dtype=timesteps.dtype)
        timesteps = timesteps.clamp(0.,1.)
        shifted_timesteps = torch.cat([timesteps[1:], torch.tensor([self.t_max])], dim=0).to(timesteps)
        step_sizes = shifted_timesteps - timesteps
        
        timesteps=timesteps.to(device, dtype=torch.float32)
        step_sizes=step_sizes.to(device, dtype=torch.float32)
        
        # self.unscaled_timesteps = timesteps
        self.timesteps = timesteps * self.num_train_timesteps
        self.step_sizes = step_sizes

        # # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
        # if self.config.timestep_spacing == "linspace":
        #     timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[
        #         ::-1
        #     ].copy()
        # elif self.config.timestep_spacing == "leading":
        #     step_ratio = self.config.num_train_timesteps // self.num_inference_steps
        #     # creates integer timesteps by multiplying by ratio
        #     # casting to int to avoid issues when num_inference_step is power of 3
        #     timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float)
        #     timesteps += self.config.steps_offset
        # elif self.config.timestep_spacing == "trailing":
        #     step_ratio = self.config.num_train_timesteps / self.num_inference_steps
        #     # creates integer timesteps by multiplying by ratio
        #     # casting to int to avoid issues when num_inference_step is power of 3
        #     timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(float)
        #     timesteps -= 1
        # else:
        #     raise ValueError(
        #         f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
        #     )

        # sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
        # log_sigmas = np.log(sigmas)

        # if self.config.interpolation_type == "linear":
        #     sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
        # elif self.config.interpolation_type == "log_linear":
        #     sigmas = torch.linspace(np.log(sigmas[-1]), np.log(sigmas[0]), num_inference_steps + 1).exp()
        # else:
        #     raise ValueError(
        #         f"{self.config.interpolation_type} is not implemented. Please specify interpolation_type to either"
        #         " 'linear' or 'log_linear'"
        #     )

        # if self.use_karras_sigmas:
        #     sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
        #     timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])

        # sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
        # self.sigmas = torch.from_numpy(sigmas).to(device=device)
        # if str(device).startswith("mps"):
        #     # mps does not support float64
        #     self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32)
        # else:
        #     self.timesteps = torch.from_numpy(timesteps).to(device=device)

    # def _sigma_to_t(self, sigma, log_sigmas):
    #     # get log sigma
    #     log_sigma = np.log(sigma)

    #     # get distribution
    #     dists = log_sigma - log_sigmas[:, np.newaxis]

    #     # get sigmas range
    #     low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
    #     high_idx = low_idx + 1

    #     low = log_sigmas[low_idx]
    #     high = log_sigmas[high_idx]

    #     # interpolate sigmas
    #     w = (low - log_sigma) / (low - high)
    #     w = np.clip(w, 0, 1)

    #     # transform interpolation to time range
    #     t = (1 - w) * low_idx + w * high_idx
    #     t = t.reshape(sigma.shape)
    #     return t

    # # Copied from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17
    # def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor:
    #     """Constructs the noise schedule of Karras et al. (2022)."""

    #     sigma_min: float = in_sigmas[-1].item()
    #     sigma_max: float = in_sigmas[0].item()

    #     rho = 7.0  # 7.0 is the value used in the paper
    #     ramp = np.linspace(0, 1, num_inference_steps)
    #     min_inv_rho = sigma_min ** (1 / rho)
    #     max_inv_rho = sigma_max ** (1 / rho)
    #     sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
    #     return sigmas

    def step(
        self,
        model_output: torch.FloatTensor,
        timestep: Union[float, torch.FloatTensor],
        sample: torch.FloatTensor,
        s_churn: float = 0.0,
        s_tmin: float = 0.0,
        s_tmax: float = float("inf"),
        s_noise: float = 1.0,
        generator: Optional[torch.Generator] = None,
        return_dict: bool = True,
    ) -> Union[ReflowEulerSchedulerOutput, Tuple]:
        """
        Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
        process from the learned model outputs (most often the predicted noise).

        Args:
            model_output (`torch.FloatTensor`): direct output from learned diffusion model.
            timestep (`float`): current timestep in the diffusion chain.
            sample (`torch.FloatTensor`):
                current instance of sample being created by diffusion process.
            s_churn (`float`)
            s_tmin  (`float`)
            s_tmax  (`float`)
            s_noise (`float`)
            generator (`torch.Generator`, optional): Random number generator.
            return_dict (`bool`): option for returning tuple rather than EulerDiscreteSchedulerOutput class

        Returns:
            [`~schedulers.scheduling_utils.EulerDiscreteSchedulerOutput`] or `tuple`:
            [`~schedulers.scheduling_utils.EulerDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a
            `tuple`. When returning a tuple, the first element is the sample tensor.

        """

        if (
            isinstance(timestep, int)
            or isinstance(timestep, torch.IntTensor)
            or isinstance(timestep, torch.LongTensor)
        ):
            raise ValueError(
                (
                    "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
                    " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
                    " one of the `scheduler.timesteps` as a timestep."
                ),
            )

        if not self.is_scale_input_called:
            logger.warning(
                "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
                "See `StableDiffusionPipeline` for a usage example."
            )

        if isinstance(timestep, torch.Tensor):
            timestep = timestep.to(self.timesteps.device)

        step_index = (self.timesteps == timestep).nonzero().item()
        # sigma = self.sigmas[step_index]

        # gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0

        # noise = randn_tensor(
        #     model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator
        # )

        # eps = noise * s_noise
        # sigma_hat = sigma * (gamma + 1)

        # if gamma > 0:
        #     sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5

        # # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
        # # NOTE: "original_sample" should not be an expected prediction_type but is left in for
        # # backwards compatibility
        # if self.config.prediction_type == "original_sample" or self.config.prediction_type == "sample":
        #     pred_original_sample = model_output
        # elif self.config.prediction_type == "epsilon":
        #     pred_original_sample = sample - sigma_hat * model_output
        # elif self.config.prediction_type == "v_prediction":
        #     # * c_out + input * c_skip
        #     pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
        # else:
        #     raise ValueError(
        #         f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
        #     )

        # # 2. Convert to an ODE derivative
        # derivative = (sample - pred_original_sample) / sigma_hat

        # dt = self.sigmas[step_index + 1] - sigma_hat
        
        # NOTE : 模型默认输出 derivative
        derivative = model_output
        dt = self.step_sizes[step_index]

        prev_sample = sample + derivative * dt

        if not return_dict:
            return (prev_sample,)

        return ReflowEulerSchedulerOutput(prev_sample=prev_sample,)

    # def add_noise(
    #     self,
    #     original_samples: torch.FloatTensor,
    #     noise: torch.FloatTensor,
    #     timesteps: torch.FloatTensor,
    # ) -> torch.FloatTensor:
    #     # Make sure sigmas and timesteps have the same device and dtype as original_samples
    #     sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
    #     if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
    #         # mps does not support float64
    #         schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
    #         timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
    #     else:
    #         schedule_timesteps = self.timesteps.to(original_samples.device)
    #         timesteps = timesteps.to(original_samples.device)

    #     step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]

    #     sigma = sigmas[step_indices].flatten()
    #     while len(sigma.shape) < len(original_samples.shape):
    #         sigma = sigma.unsqueeze(-1)

    #     noisy_samples = original_samples + noise * sigma
    #     return noisy_samples

    def __len__(self):
        return self.config.num_train_timesteps
