import numpy as np
from monai.utils import set_determinism
set_determinism(0)
from dataset.liver_data import get_loader_liver
# from dataset.btcv_transunet_datasetings import get_loader_btcv
import torch 
import torch.nn as nn 
from ddim_seg.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice, hausdorff_distance_95
from light_training.trainer import Trainer
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from models.uent2d import UNet2D
from models.uent3d import UNet3D
from monai.networks.nets.segresnet import SegResNet
from ddim_seg.unet3d import DiffusionUNet
from ddim_seg.ddim import DDIM
from ddim_seg.nnunet3d_raw import Generic_UNet
from ddim_seg.basic_unet_denose import BasicUNetDe
from ddim_seg.basic_unet import BasicUNetEncoder
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
import argparse
from monai.losses.dice import DiceLoss
import yaml
from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal

from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
from guided_diffusion.respace import SpacedDiffusion, space_timesteps
from guided_diffusion.resample import UniformSampler

import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# logdir = "./logs_brats/diffusion_seg_multi_label_more_params_mse/"
# logdir = "./logs_brats/diffusion_seg_multi_label_more_params_bce_e300/"
# logdir = "./logs_brats/diffusion_seg_swinunetrde_bce_e300/"
# logdir = "./logs_brats/diffusion_seg_all_loss_e300_norm/"
# logdir = "./logs_brats/diffusion_seg_all_loss_e300_norm_fix_conv_bugs/"
# logdir = "./logs_brats/diffusion_seg_all_loss_e300_norm_fix_conv_bugs_embed/"
logdir = "logs_liver/diffusion_seg_base/"

model_save_path = os.path.join(logdir, "model")
max_epoch = 1000
batch_size = 2
val_every = 50
env = "DDP"
num_gpus = 2
device = "cuda:0"

def parse_config(config_path):

    def dict2namespace(config):
        namespace = argparse.Namespace()
        for key, value in config.items():
            if isinstance(value, dict):
                new_value = dict2namespace(value)
            else:
                new_value = value
            setattr(namespace, key, new_value)
        return namespace

    with open(config_path, "r") as f:
        config = yaml.safe_load(f)
    new_config = dict2namespace(config)

    return new_config


class FuseModel(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        # self.embed_model = BasicUNetEncoder(3, 1, 2, [64, 64, 128, 256, 512, 64])

        self.model = BasicUNetDe(3, 3, 2, [64, 64, 128, 256, 512, 64], 
                                act = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}))

        betas = get_named_beta_schedule("linear", 1000)
        self.diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [1000]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )

        self.sample_diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [10]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )
        self.sampler = UniformSampler(1000)


    def forward(self, image=None, x=None, pred_type=None, step=None, embedding=None):
        if pred_type == "q_sample":
            noise = torch.randn_like(x).to(x.device)
            t, weight = self.sampler.sample(x.shape[0], x.device)
            return self.diffusion.q_sample(x, t, noise=noise), t, noise

        elif pred_type == "denose":
            # return self.model(x, t=step, image=image, embed=embed)
            return self.model(x, t=step, image=image)

        elif pred_type == "ddim_sample":
            # sample_out = self.diffusion.ddim_sample(self.model, shape=[1, 1, 96, 96, 96], stride=20, eta=0, embeddings=embed, image=image)
            # sample_out = self.diffusion.ddim_sample(self.model, shape=[1, 1, 96, 96, 96], stride=20, eta=0, image=image)

            sample_out = self.sample_diffusion.ddim_sample_loop(self.model, (1, 2, 96, 96, 96), model_kwargs={"image": image})
            sample_out = sample_out["pred_xstart"]
            return sample_out

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        self.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.5)
        self.model = FuseModel()

        self.best_mean_dice = 0.0
        self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=2e-4, weight_decay=1e-3)
        self.ce = nn.CrossEntropyLoss() 
        self.mse = nn.MSELoss()
        self.scheduler = LinearWarmupCosineAnnealingLR(self.optimizer,
                                                  warmup_epochs=50,
                                                  max_epochs=max_epochs)

        self.bce = nn.BCEWithLogitsLoss()
        self.dice_loss = DiceLoss(sigmoid=True)

    def training_step(self, batch):
        image, label = self.get_input(batch)
        x_start = label

        # print(image.shape)
        # print(label.shape)
        x_start = (x_start) * 2 - 1
        x_t, t, noise = self.model(x=x_start, pred_type="q_sample")
        pred_xstart = self.model(x=x_t, step=t, image=image, pred_type="denose")

        loss_dice = self.dice_loss(pred_xstart, label)
        loss_bce = self.bce(pred_xstart, label)

        pred_xstart = torch.sigmoid(pred_xstart)
        loss_mse = self.mse(pred_xstart, label)

        loss = loss_bce + loss_mse + loss_dice

        self.log("train_loss", loss, step=self.global_step)

        return loss 
 
    def get_input(self, batch):
        image = batch["image"]
        label = batch["label"]
        # import matplotlib.pyplot as plt 
        # plt.subplot(1, 2, 1)
        # plt.imshow(image.detach().cpu().numpy()[0, 0, 48], cmap="gray")
        # plt.subplot(1, 2, 2)
        # plt.imshow(label.detach().cpu().numpy()[0, 0, 48], cmap="gray")
        # plt.show()

        label = self.convert_labels(label)
        label = label.float()
        return image, label 

    def convert_labels(self, labels):
        labels_new = []
        for i in range(1, 3):
            labels_new.append(labels == i)
        
        labels_new = torch.cat(labels_new, dim=1)
        return labels_new


    def validation_end(self, mean_val_outputs):
        dices = mean_val_outputs
        print(dices)
        mean_dice = sum(dices) / len(dices)

        self.log("mean_dice", mean_dice, step=self.epoch)

        if mean_dice > self.best_mean_dice:
            self.best_mean_dice = mean_dice
            save_new_model_and_delete_last(self.model, 
                                            os.path.join(model_save_path, 
                                            f"best_model_{mean_dice:.4f}.pt"), 
                                            delete_symbol="best_model")

        save_new_model_and_delete_last(self.model, 
                                        os.path.join(model_save_path, 
                                        f"final_model_{mean_dice:.4f}.pt"), 
                                        delete_symbol="final_model")

        print(f" mean_dice is {mean_dice}")

    def validation_step(self, batch):
        image, label = self.get_input(batch)    
        
        output = self.window_infer(image, self.model, pred_type="ddim_sample")

        output = torch.sigmoid(output)

        output = (output > 0.5).float().cpu().numpy()

        target = label.cpu().numpy()
        dices = []
        hd = []
        c = 2
        for i in range(0, c):
            pred_c = output[:, i]
            target_c = target[:, i]

            if target_c.sum() == 0:
                dices.append(float("nan"))
                hd.append(float("nan"))
            else :
                dices.append(dice(pred_c, target_c))
                hd.append(hausdorff_distance_95(pred_c, target_c))
        
        return_metrics = []
        for d in dices:
            return_metrics.append(d)
        for h in hd:
            return_metrics.append(h)

        return return_metrics

    
if __name__ == "__main__":

    trainer = BraTSTrainer(env_type=env,
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            logdir=logdir,
                            val_every=val_every,
                            num_gpus=num_gpus,
                            master_port=17751,
                            training_script=__file__)

    train_ds, val_ds = get_loader_liver(batch_size=batch_size, fold=0, cache=True)

    trainer.train(train_dataset=train_ds, val_dataset=val_ds)
