import numpy as np
from light_training.dataloading.dataset import get_train_val_test_loader_seperate
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.files_helper import save_new_model_and_delete_last
from models.uent3d import UNet3D
from monai.networks.nets.segresnet import SegResNet
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
from einops import rearrange
from models.modelgenesis.unet3d import UNet3DModelGen
from models.transvw.models.ynet3d import UNet3DTransVW
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.attentionunet import AttentionUnet
from light_training.loss.compound_losses import DC_and_CE_loss
from light_training.loss.dice import MemoryEfficientSoftDiceLoss
from light_training.evaluation.metric import dice
from monai.losses.dice import DiceLoss
set_determinism(123)
import os
from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
from guided_diffusion.respace import SpacedDiffusion, space_timesteps
from guided_diffusion.resample import UniformSampler
from medpy import metric

from models.bit_diffusion import decimal_to_bits, bits_to_decimal


os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
# data_dir = "/home/xingzhaohu/sharefs/datasets/WORD-V0.1.0-monai-resample35-15-15/"
# data_dir = "/home/xingzhaohu/sharefs/datasets/WORD-V0.1.0-nnunet-fullres/"
train_dir = "./data/fullres/train"
val_dir = "./data/fullres/val"
test_dir = "./data/fullres/test"

logdir = "./logs/diffunet_init_gpu4_bitlabels/"

env = "DDP"
model_save_path = os.path.join(logdir, "model")
max_epoch = 1000
batch_size = 2
val_every = 5
num_gpus = 4
device = "cuda:0"
patch_size = [128, 128, 128]

class CEAndDiceLoss(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.loss_func = nn.BCEWithLogitsLoss()
        self.dice_loss = DiceLoss(sigmoid=True, batch=True)

    def forward(self, pred, label):
        return self.loss_func(pred, label) + self.dice_loss(pred, label)

class DiffUNetInit(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        from models.nnunet_denoise.get_unet3d_denoise import get_nnunet3d
        # from models.diffunet_monai import DiffUNet
        self.model = get_nnunet3d(17, 16)

        # self.embed_model = BasicUNetEncoder(3, 4, 2, [128, 128, 256, 512, 1024, 128])

        # self.model = BasicUNetDe(3, 7, 3, [128, 128, 256, 512, 1024, 128], 
        #                         act = ("LeakyReLU", {"negative_slope": 0.1, "inplace": False}))
        
        betas = get_named_beta_schedule("linear", 1000)
        self.diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [1000]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )

        self.sample_diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [2]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )
        self.sampler = UniformSampler(1000)


    def forward(self, image=None, x=None, pred_type=None, step=None, embedding=None):
        if pred_type == "q_sample":
            noise = torch.randn_like(x).to(x.device)
            t, weight = self.sampler.sample(x.shape[0], x.device)
            return self.diffusion.q_sample(x, t, noise=noise), t, noise

        elif pred_type == "denose":
            # return self.model(x, t=step, image=image, embed=embed)
            # x0, x1, x2, x3, x4, x0_deep, x1_deep, x2_deep, x3_deep = self.embed_model(image)
            # return self.model(x, t=step, image=image, embeddings=[x0, x1, x2, x3, x4]), x0_deep, x1_deep, x2_deep, x3_deep
            # embeddings = self.embed_model(image)
            # return self.model(x, t=step, image=image, embeddings=embeddings)
            return self.model(x, t=step, image=image)

        elif pred_type == "ddim_sample":
            # embeddings = self.embed_model(image)
            b = image.shape[0]
            sample_out = self.sample_diffusion.ddim_sample_loop(self.model, (b, 16, 128, 128, 128), model_kwargs={"image": image})
            # sample_out = sample_out["pred_xstart"]
            sample_return = torch.zeros((b, 16, 128, 128, 128), device=image.device)
            for index in range(2):
                sample_return += sample_out["all_samples"][index]

            return sample_return
                
        elif pred_type == "sample_one_step":
            # embeddings = self.embed_model(image)
            b = image.shape[0]
            t = torch.tensor([999]).to(image.device)

            x_t = torch.randn(b, 16, 128, 128, 128).to(image.device)

            output = self.model(x_t, t, image=image)

            return output

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        
        from models.diffunet_monai import DiffUNet

        self.model = DiffUNet()
        self.patch_size = patch_size

        self.train_process = 16
        # self.model = SwinUNETR(patch_size, 1, 1, feature_size=48)
        # self.model = UNet2D()
        # self.model = BasicUNet(3, 1, 17)
        # self.model = UNETR(1, 17, [96, 96, 96])
        # _, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")
        # self.model = model

        # self.model = SegResNet(3, 32, 1, 17)
        self.bce = nn.BCEWithLogitsLoss()
        self.dice_loss = DiceLoss(sigmoid=True, batch=True)
        self.mse = nn.MSELoss()

        self.best_mean_dice = 0.0
        self.augmentation = False
        
        # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-4, weight_decay=3e-5,
        #                             momentum=0.99, nesterov=True)
        self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1e-4, weight_decay=1e-5)
        self.scheduler_type = "poly"
        # self.loss_func = CEAndDiceLoss()


    def training_step(self, batch):
        image, label = self.get_input(batch)

        label = decimal_to_bits(label)
        x_start = label

        x_start = (x_start) * 2 - 1
        x_t, t, noise = self.model(x=x_start, pred_type="q_sample")
        pred_xstart = self.model(x=x_t, step=t, image=image, pred_type="denose")
       
        loss_dice = self.dice_loss(pred_xstart, label)
        loss_bce = self.bce(pred_xstart, label)
        pred_xstart = torch.sigmoid(pred_xstart)
        loss_mse = self.mse(pred_xstart, label)

        loss = loss_dice + loss_bce + loss_mse

        self.log("denoise_loss", loss, step=self.global_step)
        # self.log("deep_loss", deep_loss, step=self.global_step)
        self.log("dice_loss", loss_dice, step=self.global_step)
        return loss 

    # for image, label in data_loader:
    def get_input(self, batch):
        image = batch["data"]
        label = batch["seg"]

        label = label.float()

        return image, label 

    def convert_labels(self, labels):
        labels_new = []
        for i in range(1, 17):
            labels_new.append(labels == i)
        
        labels_new = torch.cat(labels_new, dim=1)
        return labels_new
    
    def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
        if pred.sum() > 0 and gt.sum() > 0:
            d = dice(pred, gt)
            # hd95 = metric.binary.hd95(pred, gt)
            return np.array([d, 50])
        
        elif gt.sum() == 0 and pred.sum() == 0:
            return np.array([1.0, 50])
        
        else:
            return np.array([0.0, 50])
    
    def validation_step(self, batch):
        image, label = self.get_input(batch)    
        
        output = self.model(image, pred_type="sample_one_step")
        output = torch.sigmoid(output)
        output = (output > 0.5)
        output = bits_to_decimal(output).cpu().numpy()

        target = label[:, 0].cpu().numpy()
        dices = []
        hds = []

        c = 17
        for i in range(1, c):
            pred_c = output == i
            target_c = target == i 
            cal_dice, cal_hd = self.cal_metric(target_c, pred_c)
            dices.append(cal_dice)
        
        return dices
        
    
    def validation_end(self, mean_val_outputs, val_outputs):
        dices = mean_val_outputs
        
        print(f"dices is {dices}")

        mean_dice = sum(dices) / len(dices)
        
        index = 0
        for d in dices:
            index += 1
            self.log(f"dice_{index}", d, step=self.epoch)

        self.log("mean_dice", mean_dice, step=self.epoch)

        if mean_dice > self.best_mean_dice:
            self.best_mean_dice = mean_dice
            save_new_model_and_delete_last(self.model, 
                                            os.path.join(model_save_path, 
                                            f"best_model_{mean_dice:.4f}.pt"), 
                                            delete_symbol="best_model")

        save_new_model_and_delete_last(self.model, 
                                        os.path.join(model_save_path, 
                                        f"final_model_{mean_dice:.4f}.pt"), 
                                        delete_symbol="final_model")

        print(f"mean_dice is {mean_dice}")

if __name__ == "__main__":

    trainer = BraTSTrainer(env_type=env,
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            logdir=logdir,
                            val_every=val_every,
                            num_gpus=num_gpus,
                            master_port=17751,
                            training_script=__file__)
    
    train_ds, val_ds, test_ds = get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None)

    trainer.train(train_dataset=train_ds, val_dataset=val_ds)
