# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union

import numpy as np
import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu

from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, logging
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin


logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerDiscrete
class EulerDiscreteSchedulerOutput(BaseOutput):
    """
    Output class for the scheduler's `step` function output.

    Args:
        prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
            denoising loop.
        pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
            `pred_original_sample` can be used to preview progress or for guidance.
    """

    prev_sample: torch.FloatTensor
    pred_original_sample: Optional[torch.FloatTensor] = None


# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
    num_diffusion_timesteps,
    max_beta=0.999,
    alpha_transform_type="cosine",
):
    """
    Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
    (1-beta) over time from t = [0,1].

    Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
    to that part of the diffusion process.


    Args:
        num_diffusion_timesteps (`int`): the number of betas to produce.
        max_beta (`float`): the maximum beta to use; use values lower than 1 to
                     prevent singularities.
        alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
                     Choose from `cosine` or `exp`

    Returns:
        betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
    """
    if alpha_transform_type == "cosine":

        def alpha_bar_fn(t):
            return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2

    elif alpha_transform_type == "exp":

        def alpha_bar_fn(t):
            return math.exp(t * -12.0)

    else:
        raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")

    betas = []
    for i in range(num_diffusion_timesteps):
        t1 = i / num_diffusion_timesteps
        t2 = (i + 1) / num_diffusion_timesteps
        betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
    return torch.tensor(betas, dtype=torch.float32)


# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
def rescale_zero_terminal_snr(betas):
    """
    Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)


    Args:
        betas (`torch.FloatTensor`):
            the betas that the scheduler is being initialized with.

    Returns:
        `torch.FloatTensor`: rescaled betas with zero terminal SNR
    """
    # Convert betas to alphas_bar_sqrt
    alphas = 1.0 - betas
    alphas_cumprod = torch.cumprod(alphas, dim=0)
    alphas_bar_sqrt = alphas_cumprod.sqrt()

    # Store old values.
    alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
    alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()

    # Shift so the last timestep is zero.
    alphas_bar_sqrt -= alphas_bar_sqrt_T

    # Scale so the first timestep is back to the old value.
    alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)

    # Convert alphas_bar_sqrt to betas
    alphas_bar = alphas_bar_sqrt**2  # Revert sqrt
    alphas = alphas_bar[1:] / alphas_bar[:-1]  # Revert cumprod
    alphas = torch.cat([alphas_bar[0:1], alphas])
    betas = 1 - alphas

    return betas


class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
    """
    Euler scheduler.

    This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
    methods the library implements for all schedulers such as loading and saving.

    Args:
        num_train_timesteps (`int`, defaults to 1000):
            The number of diffusion steps to train the model.
        beta_start (`float`, defaults to 0.0001):
            The starting `beta` value of inference.
        beta_end (`float`, defaults to 0.02):
            The final `beta` value.
        beta_schedule (`str`, defaults to `"linear"`):
            The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
            `linear` or `scaled_linear`.
        trained_betas (`np.ndarray`, *optional*):
            Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
        prediction_type (`str`, defaults to `epsilon`, *optional*):
            Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
            `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
            Video](https://imagen.research.google/video/paper.pdf) paper).
        interpolation_type(`str`, defaults to `"linear"`, *optional*):
            The interpolation type to compute intermediate sigmas for the scheduler denoising steps. Should be on of
            `"linear"` or `"log_linear"`.
        use_karras_sigmas (`bool`, *optional*, defaults to `False`):
            Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`,
            the sigmas are determined according to a sequence of noise levels {σi}.
        timestep_spacing (`str`, defaults to `"linspace"`):
            The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
            Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
        steps_offset (`int`, defaults to 0):
            An offset added to the inference steps, as required by some model families.
        rescale_betas_zero_snr (`bool`, defaults to `False`):
            Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
            dark samples instead of limiting it to samples with medium brightness. Loosely related to
            [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
    """

    _compatibles = [e.name for e in KarrasDiffusionSchedulers]
    order = 1

    @register_to_config
    def __init__(
        self,
        num_train_timesteps: int = 1000,  # 训练步骤的数量
        beta_start: float = 0.0001,  # beta值的起始
        beta_end: float = 0.02,  # beta值的结束
        beta_schedule: str = "linear",  # beta值的变化方式
        trained_betas: Optional[Union[np.ndarray, List[float]]] = None,  # 预先训练的beta值
        prediction_type: str = "epsilon",  # 预测的类型
        interpolation_type: str = "linear",  # 插值的类型
        use_karras_sigmas: Optional[bool] = False,  # 是否使用Karras sigma
        sigma_min: Optional[float] = None,  # sigma的最小值
        sigma_max: Optional[float] = None,  # sigma的最大值
        timestep_spacing: str = "linspace",  # 时间步长的间隔方式
        timestep_type: str = "discrete",  # 时间步长的类型，可以是"discrete"或"continuous"
        steps_offset: int = 0,  # 步骤偏移量
        rescale_betas_zero_snr: bool = False,  # 是否重新缩放beta值
    ):
        # 根据提供的trained_betas或beta_schedule初始化betas数组
        if trained_betas is not None:
            self.betas = torch.tensor(trained_betas, dtype=torch.float32)
        elif beta_schedule == "linear":
            self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
        elif beta_schedule == "scaled_linear":
            # 这个调度非常特定于潜在扩散模型
            self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
        elif beta_schedule == "squaredcos_cap_v2":
            # Glide余弦调度
            self.betas = betas_for_alpha_bar(num_train_timesteps)
        else:
            raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
        logger.info(f"betas: {self.betas.device}")
        # 如果启用，重新缩放beta值以避免SNR（信号噪声比）为零
        if rescale_betas_zero_snr:
            self.betas = rescale_zero_terminal_snr(self.betas)

        # 计算alphas、alphas_cumprod，它们在扩散和采样过程中使用
        self.alphas = 1.0 - self.betas
        self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)

        logger.info(f"alphas_cumprod: {self.alphas_cumprod.device}")
        # 对于连续时间步长和v_prediction预测类型，使用对数sigma作为时间步长
        if rescale_betas_zero_snr:
            # 接近0但不为0，以便第一个sigma不是无穷大
            # FP16最小的正则化子正常工作
            self.alphas_cumprod[-1] = 2**-24

        # 计算sigmas数组，它在采样过程中使用
        sigmas = (((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5).flip(0)
        # 创建训练时间步长的数组
        timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
        timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32)

        # 可设置的值
        self.num_inference_steps = None

        # 根据timestep_type和prediction_type选择使用的时间步长
        if timestep_type == "continuous" and prediction_type == "v_prediction":
            self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas])
        else:
            self.timesteps = timesteps

        # 合并sigmas数组并初始化其他属性
        self.sigmas = torch.cat([sigmas, torch.zeros(1, device=f'npu:{sigmas.device}' if isinstance(sigmas.device, int) else sigmas.device)])
        self.is_scale_input_called = False
        self.use_karras_sigmas = use_karras_sigmas

        # 用于内部跟踪的索引
        self._step_index = None
        self._begin_index = None

        # 将sigmas移动到CPU以避免过多的CPU/GPU通信
        # self.sigmas = self.sigmas.to("cpu")

    @property
    def init_noise_sigma(self):
        # standard deviation of the initial noise distribution
        max_sigma = max(self.sigmas) if isinstance(self.sigmas, list) else self.sigmas.max()
        if self.config.timestep_spacing in ["linspace", "trailing"]:
            return max_sigma

        return (max_sigma**2 + 1) ** 0.5

    @property
    def step_index(self):
        """
        The index counter for current timestep. It will increae 1 after each scheduler step.
        """
        return self._step_index

    @property
    def begin_index(self):
        """
        The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
        """
        return self._begin_index

    # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
    def set_begin_index(self, begin_index: int = 0):
        """
        Sets the begin index for the scheduler. This function should be run from pipeline before the inference.

        Args:
            begin_index (`int`):
                The begin index for the scheduler.
        """
        self._begin_index = begin_index

    def scale_model_input(
        self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor]
    ) -> torch.FloatTensor:
        """
        根据当前时间步缩放去噪模型输入，以确保与需要缩放模型输入的调度器之间的互换性。
        按照 `(sigma**2 + 1) ** 0.5` 缩放去噪模型输入，以匹配 Euler 算法。

        参数:
            sample (`torch.FloatTensor`):
                输入样本。
            timestep (`int`, 可选):
                扩散链中的当前时间步。

        返回:
            `torch.FloatTensor`:
                缩放后的输入样本。
        """
        # 如果尚未设置步长索引，则初始化步长索引
        logger.info(f"scale_model_input => 如果尚未设置步长索引，则初始化步长索引")
        if self.step_index is None:
            logger.info(f"scale_model_input => self.step_index is None")
            self._init_step_index(timestep)

        # 获取当前步长索引对应的 sigma 值
        logger.info(f"scale_model_input => 获取当前步长索引对应的 sigma 值")
        sigma = self.sigmas[self.step_index]
        # 根据 sigma 值缩放输入样本
        logger.info(f"scale_model_input => 根据 sigma 值缩放输入样本")
        sample = sample / ((sigma**2 + 1) ** 0.5)

        # 标记输入样本缩放方法已被调用
        self.is_scale_input_called = True
        logger.info(f"scale_model_input => return")
        return sample


    def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
        """
        设置用于扩散链的离散时间步（应在推理前运行）。

        Args:
            num_inference_steps (int): 使用预训练模型生成样本时使用的扩散步骤数量。
            device (str 或 torch.device, 可选): 应移动到的时间步的设备。如果为 None，则不移动时间步。
        """
        # 设置推理步骤数量
        self.num_inference_steps = num_inference_steps

        # 根据配置文件选择时间步的间隔方式
        if self.config.timestep_spacing == "linspace":
            # 使用线性间隔设置时间步
            timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy()
        elif self.config.timestep_spacing == "leading":
            # 使用前导间隔设置时间步
            step_ratio = self.config.num_train_timesteps // self.num_inference_steps
            timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32)
            timesteps += self.config.steps_offset
        elif self.config.timestep_spacing == "trailing":
            # 使用尾随间隔设置时间步
            step_ratio = self.config.num_train_timesteps / self.num_inference_steps
            timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32)
            timesteps -= 1
        else:
            # 如果配置的时间步间隔方式不支持，则抛出异常
            raise ValueError(
                f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
            )

        # 计算每个时间步的 sigma 值
        sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
        log_sigmas = np.log(sigmas)

        # 根据配置文件选择插值方式
        if self.config.interpolation_type == "linear":
            sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
        elif self.config.interpolation_type == "log_linear":
            sigmas = torch.linspace(np.log(sigmas[-1]), np.log(sigmas[0]), num_inference_steps + 1).exp().numpy()
        else:
            # 如果配置的插值方式未实现，则抛出异常
            raise ValueError(
                f"{self.config.interpolation_type} is not implemented. Please specify interpolation_type to either"
                " 'linear' or 'log_linear'"
            )

        # 如果使用 Karras sigma，则进行转换
        if self.use_karras_sigmas:
            sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps)
            timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas])

        # 将 sigma 值转换为 PyTorch 张量并移动到指定设备
        sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device)

        # 根据配置文件设置时间步
        if self.config.timestep_type == "continuous" and self.config.prediction_type == "v_prediction":
            self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas]).to(device)
        else:
            self.timesteps = torch.from_numpy(timesteps.astype(np.float32)).to(device)

        # 设置 sigma 值和初始索引
        self.sigmas = torch.cat([sigmas, torch.zeros(1, device=f'npu:{sigmas.device}' if isinstance(sigmas.device, int) else sigmas.device)])
        self._step_index = None
        self._begin_index = None

        # 将 sigma 值移动到 CPU 以减少 CPU/GPU 通信
        self.sigmas = self.sigmas.to("cpu")

    def _sigma_to_t(self, sigma, log_sigmas):
        # get log sigma
        log_sigma = np.log(np.maximum(sigma, 1e-10))

        # get distribution
        dists = log_sigma - log_sigmas[:, np.newaxis]

        # get sigmas range
        low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
        high_idx = low_idx + 1

        low = log_sigmas[low_idx]
        high = log_sigmas[high_idx]

        # interpolate sigmas
        w = (low - log_sigma) / (low - high)
        w = np.clip(w, 0, 1)

        # transform interpolation to time range
        t = (1 - w) * low_idx + w * high_idx
        t = t.reshape(sigma.shape)
        return t

    # Copied from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17
    def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor:
        """Constructs the noise schedule of Karras et al. (2022)."""

        # Hack to make sure that other schedulers which copy this function don't break
        # TODO: Add this logic to the other schedulers
        if hasattr(self.config, "sigma_min"):
            sigma_min = self.config.sigma_min
        else:
            sigma_min = None

        if hasattr(self.config, "sigma_max"):
            sigma_max = self.config.sigma_max
        else:
            sigma_max = None

        sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
        sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()

        rho = 7.0  # 7.0 is the value used in the paper
        ramp = np.linspace(0, 1, num_inference_steps)
        min_inv_rho = sigma_min ** (1 / rho)
        max_inv_rho = sigma_max ** (1 / rho)
        sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
        return sigmas

    def index_for_timestep(self, timestep, schedule_timesteps=None):
        if schedule_timesteps is None:
            schedule_timesteps = self.timesteps

        indices = (schedule_timesteps == timestep).nonzero()

        # The sigma index that is taken for the **very** first `step`
        # is always the second index (or the last index if there is only 1)
        # This way we can ensure we don't accidentally skip a sigma in
        # case we start in the middle of the denoising schedule (e.g. for image-to-image)
        pos = 1 if len(indices) > 1 else 0

        return indices[pos].item()

    def _init_step_index(self, timestep):
        """
        初始化步进索引。

        本方法用于根据给定的时间步长初始化内部的步进索引。其主要功能是将时间步长转换为在预定时间序列中对应的索引。
        如果begin_index未设置，则需将时间步长转换为与self.timesteps相同设备的索引；如果已设置，则直接使用begin_index作为步进索引。

        参数:
        - timestep: 表示当前时间步长，可以是torch.Tensor类型或其他类型。

        没有返回值，但会设置内部的_step_index属性。
        """
        # 当begin_index未设置时，根据传入的时间步长初始化步进索引
        if self.begin_index is None:
            # 如果timestep是torch.Tensor类型，确保它在正确的设备上
            if isinstance(timestep, torch.Tensor):
                logger.debug(f"如果timestep是torch.Tensor类型，确保它在正确的设备上 {timestep.device}")
                timestep = timestep.to(f'npu:{self.timesteps.device}' if isinstance(self.timesteps.device, int) else self.timesteps.device)
            # 根据时间步长计算对应的索引
            self._step_index = self.index_for_timestep(timestep)
        else:
            # 如果begin_index已设置，则直接使用该值作为步进索引
            self._step_index = self._begin_index

    def step(
        self,
        model_output: torch.FloatTensor,
        timestep: Union[float, torch.FloatTensor],
        sample: torch.FloatTensor,
        s_churn: float = 0.0,
        s_tmin: float = 0.0,
        s_tmax: float = float("inf"),
        s_noise: float = 1.0,
        generator: Optional[torch.Generator] = None,
        return_dict: bool = True,
    ) -> Union[EulerDiscreteSchedulerOutput, Tuple]:
        """
        通过反向SDE预测前一时间步的样本。此函数根据学习到的模型输出（通常是预测的噪声）传播扩散过程。

        参数:
            model_output (`torch.FloatTensor`):
                从学习到的扩散模型直接输出的结果。
            timestep (`float`):
                扩散链中的当前离散时间步。
            sample (`torch.FloatTensor`):
                由扩散过程创建的当前样本实例。
            s_churn (`float`):
                用于数据增强目的而添加到样本中的噪声程度。
            s_tmin (`float`):
                应用 s_churn 的最小时间步。
            s_tmax (`float`):
                应用 s_churn 的最大时间步。
            s_noise (`float`, 默认为 1.0):
                添加到样本中的噪声缩放因子。
            generator (`torch.Generator`, 可选):
                随机数生成器。
            return_dict (`bool`):
                是否返回一个 `EulerDiscreteSchedulerOutput` 或者元组。

        返回:
            `EulerDiscreteSchedulerOutput` 或 `tuple`:
                如果 return_dict 为 `True`，则返回 `EulerDiscreteSchedulerOutput`，否则返回一个元组，
                其中第一个元素是样本张量。
        """
        logger.info(f"{EulerDiscreteScheduler.__name__}: step")
        # 验证时间步类型以防止整数值导致计算问题
        if (
            isinstance(timestep, int)
            or isinstance(timestep, torch.IntTensor)
            or isinstance(timestep, torch.LongTensor)
        ):
            raise ValueError(
                (
                    "传递整数索引（例如来自 `enumerate(timesteps)`）作为时间步到 `EulerDiscreteScheduler.step()` 不被支持。"
                    "确保传递 `scheduler.timesteps` 中的一个时间步。"
                ),
            )

        # 警告正确使用 scale_model_input
        if not self.is_scale_input_called:
            logger.warning(
                "在调用 `step` 之前应该调用 `scale_model_input` 函数以确保正确的去噪。"
                "参见 `StableDiffusionPipeline` 以获取使用示例。"
            )

        # 如果尚未初始化步长索引，则初始化
        if self.step_index is None:
            self._init_step_index(timestep)

        # 上转换以避免计算 prev_sample 时的精度问题
        logger.info("上转换以避免计算 prev_sample 时的精度问题")
        sample = sample.to(dtype=torch.float32)

        # 获取当前 sigma 值
        sigma = self.sigmas[self.step_index]

        # 计算噪声增强的 gamma
        gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0

        # 生成随机噪声
        logger.info("生成随机噪声")
        noise = randn_tensor(
            model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator
        )

        # 计算噪声缩放因子
        eps = noise * s_noise
        sigma_hat = sigma * (gamma + 1)

        # 如果 gamma > 0，则应用噪声增强
        if gamma > 0:
            sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5

        # 根据模型输出和当前 sigma 计算预测的原始样本（x_0）
        if self.config.prediction_type == "original_sample" or self.config.prediction_type == "sample":
            pred_original_sample = model_output
        elif self.config.prediction_type == "epsilon":
            pred_original_sample = sample - sigma_hat * model_output
        elif self.config.prediction_type == "v_prediction":
            pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
        else:
            raise ValueError(
                f"prediction_type 给定为 {self.config.prediction_type} 必须是 `epsilon` 或 `v_prediction` 中的一个"
            )

        # 计算 ODE 导数
        derivative = (sample - pred_original_sample) / sigma_hat

        # 计算时间步大小
        dt = self.sigmas[self.step_index + 1] - sigma_hat

        # 计算前一时间步的样本
        prev_sample = sample + derivative * dt

        # 将样本转换回模型兼容的数据类型
        logger.info("将样本转换回模型兼容的数据类型")
        prev_sample = prev_sample.to(f'npu:{model_output.dtype}' if isinstance(model_output.dtype, int) else model_output.dtype)

        # 更新步长索引
        self._step_index += 1

        # 根据 return_dict 返回前一时间步的样本
        if not return_dict:
            return (prev_sample,)

        logger.info("返回 EulerDiscreteSchedulerOutput")
        return EulerDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)


    def add_noise(
        self,
        original_samples: torch.FloatTensor,
        noise: torch.FloatTensor,
        timesteps: torch.FloatTensor,
    ) -> torch.FloatTensor:
        """
        给原始样本添加噪声。

        这个方法主要用于在训练或生成过程中，给原始的样本数据添加噪声。通过这种方式，
        可以在训练时引入更多的变化，从而提高模型的泛化能力；在生成时，可以通过逐步去噪
        的过程生成新的样本。

        参数:
            original_samples (torch.FloatTensor): 原始的样本数据。
            noise (torch.FloatTensor): 要添加的噪声数据。
            timesteps (torch.FloatTensor): 对应的时间步数据，用于确定在哪个时间点添加多少噪声。

        返回:
            torch.FloatTensor: 添加噪声后的样本数据。
        """

        # 确保sigmas和timesteps具有与original_samples相同的设备和数据类型
        logger.info(f"add_noise: original_samples.device -> {original_samples.device}")
        logger.info(f"add_noise: original_samples.dtype -> {original_samples.dtype}")
        logger.info(f"add_noise: self.timesteps.device -> { self.timesteps.device}")
        logger.info(f"add_noise: self.timesteps.dtype -> { self.timesteps.dtype}")
        logger.info(f"add_noise: timesteps.device -> { timesteps.device}")
        logger.info(f"add_noise:  self.sigmas.device -> {  self.sigmas.device}")
        logger.info(f"add_noise:  self.sigmas.dtype -> {  self.sigmas.dtype}")
        if self.sigmas.device != original_samples.device:
            sigmas = self.sigmas.to(f'npu:{original_samples.device}' if isinstance(original_samples.device, int) else original_samples.device, dtype=original_samples.dtype, non_blocking=True )
            logger.info(f"add_noise:  sigmas.device to {original_samples.device}")
        if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
            # mps不支持float64
            schedule_timesteps = self.timesteps.to(f'npu:{original_samples.device}' if isinstance(original_samples.device, int) else original_samples.device, dtype=torch.float32)
            timesteps = timesteps.to(f'npu:{original_samples.device}' if isinstance(original_samples.device, int) else original_samples.device, dtype=torch.float32)
        else:
            logger.info(f"add_noise:  self.timesteps to {original_samples.device}")
            schedule_timesteps = self.timesteps.to(f'npu:{original_samples.device}' if isinstance(original_samples.device, int) else original_samples.device, non_blocking=True)
            logger.info(f"add_noise:  timesteps to {original_samples.device}")
            timesteps = timesteps.to(f'npu:{original_samples.device}' if isinstance(original_samples.device, int) else original_samples.device, non_blocking=True)

        # 当scheduler用于训练，或pipeline未实现set_begin_index时，self.begin_index为None
        logger.info(f"add_noise: self.begin_index -> {self.begin_index}")
        if self.begin_index is None:
            step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps]
        elif self.step_index is not None:
            # add_noise在第一次去噪步骤后被调用（用于图像补全）
            step_indices = [self.step_index] * timesteps.shape[0]
        else:
            # 在第一次去噪步骤前调用add_noise以创建初始潜变量（图像到图像）
            step_indices = [self.begin_index] * timesteps.shape[0]

        logger.info(f"add_noise: step_indices -> {step_indices}")
        sigma = sigmas[step_indices].flatten()
        while len(sigma.shape) < len(original_samples.shape):
            sigma = sigma.unsqueeze(-1)

        noisy_samples = original_samples + noise * sigma
        logger.info(f"add_noise: return")
        return noisy_samples

    def __len__(self):
        return self.config.num_train_timesteps
