import numpy as np
import torch.nn as nn 

from monai.utils import set_determinism
from ddim_seg.basic_unet_denose import BasicUNetDe
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
import argparse
import torch 
from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
from guided_diffusion.respace import SpacedDiffusion, space_timesteps
from guided_diffusion.resample import UniformSampler
set_determinism(123)
import os

class DiffUNet(nn.Module):
    def __init__(self, norm="ins") -> None:
        super().__init__()
        # self.embed_model = BasicUNetEncoder(3, 1, 1, [64, 64, 128, 256, 512, 64])

    
        self.model = BasicUNetDe(3, 17, 16, [32, 32, 64, 128, 256, 32], 
                                act = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}))

        # self.model = BasicUNetDe(3, 6, 5, [32, 32, 64, 128, 256, 32], 
        #                         act = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}))
        
        betas = get_named_beta_schedule("linear", 1000)
        self.diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [1000]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )

        self.sample_diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [5]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )
        self.sampler = UniformSampler(1000)


    def forward(self, image=None, x=None, pred_type=None, step=None, embedding=None):
        if pred_type == "q_sample":
            noise = torch.randn_like(x).to(x.device)
            t, weight = self.sampler.sample(x.shape[0], x.device)
            return self.diffusion.q_sample(x, t, noise=noise), t, noise

        elif pred_type == "denose":
            # return self.model(x, t=step, image=image, embed=embed)
            # x0, x1, x2, x3, x4, x0_deep, x1_deep, x2_deep, x3_deep = self.embed_model(image)
            # return self.model(x, t=step, image=image, embeddings=[x0, x1, x2, x3, x4]), x0_deep, x1_deep, x2_deep, x3_deep
            # embeddings = self.embed_model(image)
            # return self.model(x, t=step, image=image, embeddings=embeddings)
            return self.model(x, t=step, image=image)

        elif pred_type == "ddim_sample":
            # embeddings = self.embed_model(image)
            b = image.shape[0]
            sample_out = self.sample_diffusion.ddim_sample_loop(self.model, (b, 16, 128, 128, 128), model_kwargs={"image": image})
            # sample_out = sample_out["pred_xstart"]
            sample_return = torch.zeros((b, 16, 128, 128, 128), device=image.device)
            for index in range(5):
                sample_return += sample_out["all_samples"][index]

            return sample_return
                
        elif pred_type == "sample_one_step":
            # embeddings = self.embed_model(image)
            b = image.shape[0]
            t = torch.tensor([999]).to(image.device)

            x_t = torch.randn(b, 16, 128, 128, 128).to(image.device)

            output = self.model(x_t, t, image=image)

            return output
    