import numpy as np
from light_training.dataloading.dataset import get_loader
# from dataset.brats_data_utils_resample128 import get_loader_brats
import torch 
import torch.nn as nn 
from ddim_seg.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from models.uent2d import UNet2D
from models.uent3d import UNet3D
from monai.networks.nets.segresnet import SegResNet
from ddim_seg.unet3d import DiffusionUNet
from ddim_seg.ddim import DDIM
from ddim_seg.nnunet3d_raw import Generic_UNet
from ddim_seg.basic_unet_denose import BasicUNetDe
from ddim_seg.basic_unet import BasicUNetEncoder
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
import argparse
from monai.losses.dice import DiceLoss
# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal

from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
from guided_diffusion.respace import SpacedDiffusion, space_timesteps
from guided_diffusion.resample import UniformSampler
set_determinism(123)
import os

class DiffUNet(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        from models.nnunet_denoise.get_unet3d_denoise import get_nnunet3d
        from models.nnunet_denoise.get_unet3d_encoder import get_nnunet3d_encoder

        self.embed_model = get_nnunet3d_encoder(in_chans=1, out_chans=1)
        self.model = get_nnunet3d(in_chans=2, out_chans=1)

        betas = get_named_beta_schedule("linear", 1000)
        self.diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [1000]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )

        self.sample_diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [2]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )
        self.sampler = UniformSampler(1000)


    def forward(self, image=None, x=None, pred_type=None, step=None):
        if pred_type == "q_sample":
            noise = torch.randn_like(x).to(x.device)
            t, weight = self.sampler.sample(x.shape[0], x.device)
            return self.diffusion.q_sample(x, t, noise=noise), t, noise

        elif pred_type == "denose":
            # return self.model(x, t=step, image=image, embed=embed)
            embeddings = self.embed_model(image)
            return self.model(x, t=step, image=image, embeddings=embeddings)

        elif pred_type == "ddim_sample":
            embeddings = self.embed_model(image)
            b = image.shape[0]
            sample_out = self.sample_diffusion.ddim_sample_loop(self.model, (b, 1, 128, 128, 128), model_kwargs={"image": image, "embeddings": embeddings})
            
            sample_return = torch.zeros((b, 1, 128, 128, 128), device=image.device)
            for index in range(2):
                sample_return += sample_out["all_samples"][index]

            return sample_return

            # sample_out = sample_out["pred_xstart"]
            # return sample_out
        

