import numpy as np
import torch
from loguru import logger
from einops import rearrange

from reflow.utils import get_model_fn, from_flattened_numpy, to_flattened_numpy
from reflow.loss import pred_score


def get_sampling_fn(config, sde, shape, inverse_scaler=None,):
    """Create a sampling function.

    Args:
      config: A `ml_collections.ConfigDict` object that contains all configuration information.
      sde: A `sde_lib.SDE` object that represents the forward SDE.
      shape: A sequence of integers representing the expected shape of a single sample.
      inverse_scaler: The inverse data normalizer function.
      eps: A `float` number. The reverse-time SDE is only integrated to `eps` for numerical stability.

    Returns:
      A function that takes random states and a replicated training state and outputs samples with the
        trailing dimensions matching `shape`.
    """

    sampler_name = config.sampling.method
    if sampler_name.lower() == 'rectified_flow':
        sampling_fn = get_rectified_flow_sampler(sde=sde, shape=shape)
        # sampling_fn = ReFlowSmapler(sde, shape)
    else:
        raise ValueError(f"Sampler name {sampler_name} unknown.")

    return sampling_fn



def get_rectified_flow_sampler(sde, shape, inverse_scaler=None):
    """
    Get rectified flow sampler.

    Returns:
      A sampling function that returns samples and the number of function evaluations during sampling.
    """
    def euler_sampler(model, z=None, condition=None, return_traj=False, uncond_condition=None, guidance_scale=1.0,):
        """The probability flow ODE sampler with simple Euler discretization.

        Args:
          model: A velocity model.
          z: If present, generate samples from latent code `z`.
        Returns:
          samples, number of function evaluations.
        """
        device=model.device
        with torch.no_grad():
            # Initial sample
            if z is None:
                z0 = sde.get_z0(torch.zeros(
                    shape, device=device), train=False)
                x = z0.detach().clone()

            else:
                x = z
            if return_traj:
                traj = [x]

            model_fn = get_model_fn(model, train=False)

            # Uniform
            eps = sde.eps  
            # dt = 1./sde.sample_N # fix bug
            dt = (1. - eps)/sde.sample_N # fix bug
            for i in range(sde.sample_N):

                num_t = i / sde.sample_N * (sde.T - eps) + eps
                t = torch.ones(shape[0], device=device) * num_t
                t = (999*t)
                # pred = model_fn(x, t*999)  # Copy from models/utils.py
                # compatible with diffusers
                pred = model_fn(x, timestep=t, **condition).sample
                if guidance_scale>1.0:
                    uncond_pred=model_fn(x, timestep=t, **uncond_condition).sample
                    pred = uncond_pred + guidance_scale * (pred - uncond_pred)

                # convert to diffusion models if sampling.sigma_variance > 0.0 while perserving the marginal probability
                sigma_t = sde.sigma_t(num_t)
                if sigma_t>0.0:
                    pred_sigma = pred + (sigma_t**2)/(2*(sde.noise_scale**2)*((1.-num_t)**2)) * (
                        0.5 * num_t * (1.-num_t) * pred - 0.5 * (2.-num_t)*x.detach().clone())
                    x = x.detach().clone() + pred_sigma * dt + sigma_t * np.sqrt(dt) * torch.randn_like(pred_sigma, device=device)
                else:
                    x = x.detach().clone() + pred * dt
                    
                if return_traj:
                    traj.append(x)
            # x = inverse_scaler(x)
            nfe = sde.sample_N
            if return_traj:
                traj = torch.stack(traj).transpose(0,1) # (b,l,c,h,w)
                return x, nfe, traj
            return x, nfe

    def rk45_sampler(model, z=None, condition=None, return_traj=False, uncond_condition=None, guidance_scale=1.0,):
        """The probability flow ODE sampler with black-box ODE solver.

        Args:
          model: A velocity model.
          z: If present, generate samples from latent code `z`.
        Returns:
          samples, number of function evaluations.
        """
        device=model.device
        with torch.no_grad():
            rtol = atol = sde.ode_tol
            method = 'RK45'
            # eps = 1e-3
            eps = sde.eps

            # Initial sample
            if z is None:
                z0 = sde.get_z0(torch.zeros(
                    shape, device=device), train=False)
                x = z0.detach().clone()
            else:
                x = z

            model_fn = get_model_fn(model, train=False)

            def ode_func(t, x, condition):
                x = from_flattened_numpy(x, shape).to(device).type(torch.float32)
                # x = from_flattened_numpy(x, shape).type(torch.float32)
                vec_t = torch.ones(shape[0], device=device) * t
                vec_t = (999*vec_t)
                # compatible with diffusers
                drift = model_fn(x, timestep=vec_t, **condition).sample
                if guidance_scale>1.0:
                    uncond_drift=model_fn(x, timestep=vec_t, **uncond_condition).sample
                    drift = uncond_drift + guidance_scale * (drift - uncond_drift)

                return to_flattened_numpy(drift)

            from scipy import integrate

            # Black-box ODE solver for the probability flow ODE
            solution = integrate.solve_ivp(ode_func, (eps, sde.T), to_flattened_numpy(x),
                                           rtol=rtol, atol=atol, method=method, args=(condition,))
            nfe = solution.nfev
            x = torch.tensor(solution.y[:, -1], dtype=torch.float32, device=device).reshape(shape)
            if return_traj:
                b,c,h,w = shape
                traj = torch.tensor(solution.y, dtype=torch.float32, device=device)
                traj = rearrange(traj, '(b c h w) l -> b l c h w', b=b, c=c, h=h, w=w)
                return x, nfe, traj

            # x = inverse_scaler(x)

            return x, nfe

    logger.info(f'Type of Sampler: {sde.use_ode_sampler}')
    if sde.use_ode_sampler == 'rk45':
        return rk45_sampler
    elif sde.use_ode_sampler == 'euler':
        return euler_sampler
    else:
        assert False, 'Not Implemented!'