
from diffusers.schedulers.scheduling_utils import SchedulerMixin, KarrasDiffusionSchedulers
from diffusers.utils import BaseOutput, logging, randn_tensor
from diffusers.configuration_utils import ConfigMixin, register_to_config
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
import numpy as np

# ! oneflow-diffusers 版本较低，randn_tensor 和 KarrasDiffusionSchedulers 没有实现 ; 为了兼容直接复制到这里，不再 import 了

from diffusers import logging
logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


@dataclass
# Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput
class EulerDummySchedulerOutput(BaseOutput):
    """
    Output class for the scheduler's step function output.

    Args:
        prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
            denoising loop.
        pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
            The predicted denoised sample (x_{0}) based on the model output from the current timestep.
            `pred_original_sample` can be used to preview progress or for guidance.
    """

    prev_sample: torch.FloatTensor
    pred_original_sample: Optional[torch.FloatTensor] = None


class EulerDummyScheduler(SchedulerMixin, ConfigMixin):

    _compatibles = [e.name for e in KarrasDiffusionSchedulers]
    order = 1

    @register_to_config
    def __init__(
        self,
        eps: float = 1e-3,
        T: float = 1.,
        reflow_flag: bool = True,

        overide_timesteps: List[float] = None,

        num_train_timesteps: int = 1000,
        beta_start: float = 0.0001,
        beta_end: float = 0.02,
        beta_schedule: str = "linear",
        trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
        prediction_type: str = "epsilon",
        interpolation_type: str = "linear",
        *args,
        **kwargs,
    ):
        self.eps = eps
        self.T = T
        self.reflow_flag = reflow_flag  # control 正反方向的 flow
        self.num_train_timesteps = num_train_timesteps

        self.init_noise_sigma = 1.  # not used

        # ! only experimental ; 只适用于想手动指定 t schedule 的情况，传递几个很少但精确的 t
        self.overide_timesteps = overide_timesteps

    def scale_model_input(
        self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor]
    ) -> torch.FloatTensor:
        """
        Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm.

        Args:
            sample (`torch.FloatTensor`): input sample
            timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain

        Returns:
            `torch.FloatTensor`: scaled input sample
        """
        self.is_scale_input_called = True
        return sample

    def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):

        Ts, Te = self.eps, self.T

        if self.overide_timesteps:
            assert len(self.overide_timesteps)==num_inference_steps
            timesteps = torch.tensor(self.overide_timesteps, dtype=torch.float)
        else:
            timesteps = torch.arange(start=Ts, end=Te, step=(Te-Ts)/num_inference_steps)

        self.num_inference_steps = num_inference_steps
        timesteps_original = torch.tensor(timesteps.tolist() + [Te])
        timesteps = (999*timesteps_original).to(device=device)
        self.timesteps_original=timesteps_original
        self.timesteps = timesteps
        # self.dt = 1 / N
        if self.reflow_flag == False:  # TODO : ! 这里的 logic 需要修改；需要变 timesteps
            # self.dt = -self.dt
            ...

    def step(
        self,
        model_output: torch.FloatTensor,
        timestep: Union[float, torch.FloatTensor],
        sample: torch.FloatTensor,
        s_churn: float = 0.0,
        s_tmin: float = 0.0,
        s_tmax: float = float("inf"),
        s_noise: float = 1.0,
        generator: Optional[torch.Generator] = None,
        return_dict: bool = True,
    ) -> Union[EulerDummySchedulerOutput, Tuple]:
        step_index = (self.timesteps == timestep).nonzero().item()
        dt = self.timesteps_original[step_index+1] - self.timesteps_original[step_index]
        
        derivative = model_output
        prev_sample = sample + derivative * dt
        if not return_dict:
            return (prev_sample,)
        return EulerDummySchedulerOutput(prev_sample=prev_sample)

    def __len__(self):
        return self.num_train_timesteps
