# Copyright 2023 Katherine Crowson and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union

import numpy as np
import torch

from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging, randn_tensor
from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin


logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


class ReflowHighOrderScheduler(SchedulerMixin, ConfigMixin):
    """
    在外部使用 scipy solve_ivp 的高阶 ode 方法进行求解的 scheduler
    
    训练时候 t 实际上是连续取值，但为了保持一致性缩放到 (0,1000) 区间
    """

    @register_to_config 
    def __init__(
        self,
        num_train_timesteps: int = 1000,
        t_min=0.,
        t_max=1.,
        t_eps=1e-3,
        use_scipy_ode_solver=True,
        ode_method="RK45",
        ode_kwargs={},
        **kwargs,
    ):
        """
        ode_method : [RK45, RK23, DOP853]
        
        ode_kwargs : rtol , atol
        """
        self.use_scipy_ode_solver=use_scipy_ode_solver
        self.t_min=t_min
        self.t_max=t_max
        self.t_eps = t_eps
        self.ode_kwargs=ode_kwargs
        self.ode_method=ode_method

        # setable values
        self.num_train_timesteps= num_train_timesteps
        self.num_inference_steps = None
        self.is_scale_input_called = False
        

    @property
    def init_noise_sigma(self):
        """
        placeholder
        """
        return 1.

    def scale_model_input(
        self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor]
    ) -> torch.FloatTensor:
        """
        placeholder
        """
        self.is_scale_input_called = True
        return sample

    def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None, generator: Optional[torch.Generator] = None,):
        """
        do nothing
        """
        self.timesteps = None

    def step(
        self,
        model_output: torch.FloatTensor,
        timestep: Union[float, torch.FloatTensor],
        sample: torch.FloatTensor,
        s_churn: float = 0.0,
        s_tmin: float = 0.0,
        s_tmax: float = float("inf"),
        s_noise: float = 1.0,
        generator: Optional[torch.Generator] = None,
        return_dict: bool = True,
    ) -> None:
        """
        Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
        process from the learned model outputs (most often the predicted noise).

        Args:
            model_output (`torch.FloatTensor`): direct output from learned diffusion model.
            timestep (`float`): current timestep in the diffusion chain.
            sample (`torch.FloatTensor`):
                current instance of sample being created by diffusion process.
            s_churn (`float`)
            s_tmin  (`float`)
            s_tmax  (`float`)
            s_noise (`float`)
            generator (`torch.Generator`, optional): Random number generator.
            return_dict (`bool`): option for returning tuple rather than EulerDiscreteSchedulerOutput class

        Returns:
            [`~schedulers.scheduling_utils.EulerDiscreteSchedulerOutput`] or `tuple`:
            [`~schedulers.scheduling_utils.EulerDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a
            `tuple`. When returning a tuple, the first element is the sample tensor.

        """
        raise(NotImplementedError("use outer scipy solve_ivp only"))
        if (
            isinstance(timestep, int)
            or isinstance(timestep, torch.IntTensor)
            or isinstance(timestep, torch.LongTensor)
        ):
            raise ValueError(
                (
                    "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
                    " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
                    " one of the `scheduler.timesteps` as a timestep."
                ),
            )

        if not self.is_scale_input_called:
            logger.warning(
                "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
                "See `StableDiffusionPipeline` for a usage example."
            )

        if isinstance(timestep, torch.Tensor):
            timestep = timestep.to(self.timesteps.device)

        step_index = (self.timesteps == timestep).nonzero().item()
        
        # NOTE : 模型默认输出 derivative
        derivative = model_output
        dt = self.step_sizes[step_index]

        prev_sample = sample + derivative * dt

        if not return_dict:
            return (prev_sample,)

        return ReflowEulerSchedulerOutput(prev_sample=prev_sample,)

    def __len__(self):
        return self.config.num_train_timesteps
