import torch
import parameters as p
import torch.nn.functional as F
import einops


####################### Schedulers #######################
# See more details about beta schedule imporoving performance of dffusion model.
# at https://arxiv.org/abs/2102.09672 .
#
#

torch.cat
def cosine_beta_schedule(timesteps, s=0.008):
    steps = timesteps + 1
    x = torch.linspace(0, timesteps, steps)
    alphas_cumprod = torch.cos(
        ((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
    alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
    betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
    return torch.clip(betas, 0.0001, 0.9999)


def linear_beta_schedule(timesteps,device=None):
    beta_start = 0.0001
    beta_end = 0.02
    return torch.linspace(beta_start, beta_end, timesteps,device=device)


def quadratic_beta_schedule(timesteps):
    beta_start = 0.0001
    beta_end = 0.02
    return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2


def sigmoid_beta_schedule(timesteps):
    beta_start = 0.0001
    beta_end = 0.02
    betas = torch.linspace(-6, 6, timesteps)
    return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start


####################### preliminary variables #######################

# It's good to invoke scheduler to control beta at each step.
betas = linear_beta_schedule(timesteps=p.T,device='cuda')


alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod, pad=(1, 0), value=1.)[:p.T]

sqrt_recip_alphas = torch.sqrt(1./alphas)  # 1 / a
sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)  # √ a_t_bar
sqrt_one_minus_alphas_cumprod = torch.sqrt(
    1. - alphas_cumprod)  # √ 1 - a_t_bar


# variance of q(x_{t-1} | x_t,x_0) which is so called beta_tilde_t in the paper.



def extract(variable_bar:torch.Tensor, t) -> torch.Tensor:
    '''
    extract variables at step t and all previous variables from given tensor.
    e.g. extract variables from step 0 to t from sqrt_alpha_cumprod.
    To set batch size , check parameters.py at same root.
    retrun a tensor of shape [batch_size,1,1,1].
    '''
    out = variable_bar.gather(-1, t)
    return einops.rearrange(out, 'l -> l 1 1 1')

# forward diffusion


# implementation of x_t (x_0,t).
def inject_noise(x_start, t: torch.Tensor,device=None) -> torch.Tensor:
    '''
    given x_0, sample from distribution q(x_t | x_0) at step t.This is the forward process
    of diffusion model.\n
    :param x_start of shape [batch,C,H,W] or [C,H,W] \n
    :param t : tensor of shape [1] or tensor of shape [batch]
    '''

    
    noise = torch.randn_like(x_start,device=device)

    sqrt_alphas_cumprod_t = extract(sqrt_alphas_cumprod, t).to(device=device)
    sqrt_one_minus_alpha_cumprod_t = extract(sqrt_one_minus_alphas_cumprod, t).to(device=device)
    
    return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alpha_cumprod_t * noise


def Criterion(denoise_model, x_start, t, ):
    
    noise = torch.randn_like(x_start)
    x_t = inject_noise(x_start, t, noise)  # sample from q(x_t | x_o) at step t
    predict_noise = denoise_model(x_t, t)

    loss = F.mse_loss(noise, predict_noise)

    return loss


@torch.no_grad()
def sample(model,device=None):
    x_t = torch.randn(p.TENSOR_SHAPE,device=device)
    
    print("sampling")
    for t in reversed(range(p.T)):
        
        z = torch.randn(p.TENSOR_SHAPE,device=device) if t > 1 else torch.zeros(
            p.TENSOR_SHAPE,device=device)
        t = torch.tensor([t],device=device)

        sqrt_one_minus_alphas_cumprod_t = extract(
            sqrt_one_minus_alphas_cumprod, t).to(device=device)
        
        betas_t = extract(betas, t).to(device=device)

        

        eps = model(x_t,t)

        coeff1 = extract(torch.sqrt(1. / alphas), t).to(device=device)

        coeff2 = extract(coeff1 * (1. - alphas) / torch.sqrt(1. - alphas_cumprod),t).to(device)

        posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)

        var = torch.cat([posterior_variance[1:2],posterior_variance[1:]])
       
        print(coeff1.shape,x_t.shape,coeff2.shape,eps.shape)
        mean = coeff1 * x_t - coeff2 * eps


        variance = extract(var,t).to(device=device)

        # x_t = sqrt_recip_alphas_t * \
        #     (x_t - betas_t * model(x_t, t)/sqrt_one_minus_alphas_cumprod_t) \
        #     + torch.sqrt(posterior_variance_t)*z
        x_t = mean + torch.sqrt(variance) * noise
    return x_t


if __name__ == '__main__':
    from model import Unet
    denoise_model = Unet(isGrayImage=True).to('cuda')
    sample(denoise_model,'cuda')