import numpy as np
from dataset.liver_data import get_loader_liver
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from ddim_seg.basic_unet import BasicUNetEncoder
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from light_training.evaluation.metric import dice, hausdorff_distance_95, jaccard, recall, fscore
from models.uent2d import UNet2D
from models.uent3d import UNet3D
from models.uent25d import UNet25D
from monai.networks.nets.segresnet import SegResNet
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal
import argparse
import yaml 
from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
from guided_diffusion.respace import SpacedDiffusion, space_timesteps
from guided_diffusion.resample import UniformSampler
from ddim_seg.unet3d import DiffusionUNet
from ddim_seg.basic_unet_denose import BasicUNetDe
from monai.networks.nets.vnet import VNet
from models.modelgenesis.unet3d import UNet3DModelGen
from models.transvw.models.ynet3d import UNet3DTransVW
from monai.networks.nets.attentionunet import AttentionUnet
from medpy.metric import dc 

from einops import rearrange


set_determinism(123)
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "2"
data_dir = "/home/xingzhaohu/sharefs/datasets/brats2020/MICCAI_BraTS2020_TrainingData/"

max_epoch = 300
batch_size = 2
val_every = 10
num_gpus = 2
device = "cuda:0"
thres = 0.7

class FuseModel(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        # self.embed_model = BasicUNetEncoder(3, 1, 2, [64, 64, 128, 256, 512, 64])

        self.model = BasicUNetDe(3, 3, 2, [64, 64, 128, 256, 512, 64], 
                                act = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}))

        betas = get_named_beta_schedule("linear", 1000)
        self.diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [1000]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )

        self.sample_diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [10]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )
        self.sampler = UniformSampler(1000)


    def forward(self, image=None, x=None, pred_type=None, step=None, embedding=None):
        if pred_type == "q_sample":
            noise = torch.randn_like(x).to(x.device)
            t, weight = self.sampler.sample(x.shape[0], x.device)
            return self.diffusion.q_sample(x, t, noise=noise), t, noise

        elif pred_type == "denose":
            # return self.model(x, t=step, image=image, embed=embed)
            return self.model(x, t=step, image=image)

        elif pred_type == "ddim_sample":
            # sample_out = self.diffusion.ddim_sample(self.model, shape=[1, 1, 96, 96, 96], stride=20, eta=0, embeddings=embed, image=image)
            # sample_out = self.diffusion.ddim_sample(self.model, shape=[1, 1, 96, 96, 96], stride=20, eta=0, image=image)

            sample_out = self.sample_diffusion.ddim_sample_loop(self.model, (1, 2, 96, 96, 96), model_kwargs={"image": image})
            sample_out = sample_out["pred_xstart"]
            return sample_out

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        self.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.25)
        self.model = None 

    def get_input(self, batch):
        image = batch["image"]
       
        return image 

    def validation_step(self, batch, pred_type=None):
        image = self.get_input(batch)
        if pred_type != None :
            output = self.window_infer(image, self.model, pred_type=pred_type)
            output = (torch.sigmoid(output) > 0.5).float()
            output = output.cpu().numpy()
        else:
            output = self.window_infer(image, self.model).argmax(dim=1).cpu().numpy()

        return output

def convert_label(output):
    b, d, w, h = output.shape[0], output.shape[2], output.shape[3], output.shape[4]
    show_output = np.zeros((b, d, w, h))
    liver = output[:, 0] == 1
    tumor = output[:, 1] == 1
    show_output[tumor] = 2
    show_output[liver] = 1
    return show_output

if __name__ == "__main__":
    from monai.data import DataLoader
    import time 
    import h5py
    plot_dir = "./logs_liver/diffusion_plots/"
    os.makedirs(plot_dir, exist_ok=True)

    def to_image_save(image):
        image = (image - image.min()) / (image.max() - image.min())
        image = image * 255.0
        image = image.astype(np.uint8)
        return image 
    
    def to_label_save(image):
        return image 

    train_ds, val_ds = get_loader_liver(batch_size=batch_size, fold=0, cache=False)
    
    trainer = BraTSTrainer(env_type="pytorch",
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            val_every=val_every,
                            num_gpus=1,
                            master_port=17751,
                            training_script=__file__)
    

    swinunetr_model = SwinUNETR([96, 96, 96], 1, 3, feature_size=48)
    unetr_model = UNETR(1, 3, [96, 96, 96], mlp_dim=1024, pos_embed="conv", norm_name="instance")

    _, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")
    transbts_model = model
    segres_model = SegResNet(3, 16, 1, 3)
    modelsgen_model = UNet3DModelGen(3)
    attunet_model = AttentionUnet(3, 1, 3, channels=[32, 64, 128, 256], strides=[2, 2, 2, 2])

    diffusion_model = FuseModel()
    models_name = ["swinunetr_model", "unetr_model", "transbts_model", "segres_model",  "modelsgen_model", "attunet_model", "diffusion_model"]
    models = [swinunetr_model, unetr_model, transbts_model, segres_model, modelsgen_model, attunet_model, diffusion_model]

    checkpoints = [
        "/home/xingzhaohu/jiuding_code/diffusion_liver/logs_liver/swin_unetr/model/final_model_0.7208.pt",
        "/home/xingzhaohu/jiuding_code/diffusion_liver/logs_liver/unetr/model/best_model_0.6609.pt",
        "/home/xingzhaohu/jiuding_code/diffusion_liver/logs_liver/transbts/model/best_model_0.7005.pt",
        "/home/xingzhaohu/jiuding_code/diffusion_liver/logs_liver/segresnet/model/final_model_0.7042.pt",
        "/home/xingzhaohu/jiuding_code/diffusion_liver/logs_liver/modelsgenesis/model/final_model_0.7171.pt",
        "/home/xingzhaohu/jiuding_code/diffusion_liver/logs_liver/attentionUNet/model/final_model_0.7154.pt",
        "/home/xingzhaohu/jiuding_code/diffusion_liver/logs_liver/diffusion_seg_base/model/final_model_3.7883.pt"
    ]

    val_loader = DataLoader(val_ds, batch_size=1, shuffle=False)
   
    data_index = 0
    for data in val_loader:
       
        h5f = h5py.File(os.path.join(plot_dir, f'liver_plot{data_index}.h5'), 'w')
        print(f"save path is liver_plot{data_index}.h5")

        image = data["image"].to(device)
        label = data["label"].cpu().numpy()

        h5f.create_dataset('image', data=to_image_save(image.cpu().numpy()))
        h5f.create_dataset('label', data=to_label_save(label))

        outputs = []
        for model_index, model in enumerate(models):
            p = checkpoints[model_index]
            trainer.model = model 
            trainer.load_state_dict(p)
            trainer.model.to(device)
            trainer.model.eval()
            pred_type = None
            if "diffusion" in models_name[model_index]:
                trainer.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.5)

            else :
                trainer.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.25)
            with torch.no_grad():
                if "diffusion" in models_name[model_index]:
                    pred_type = "ddim_sample"
                else :
                    pred_type = None
                out = trainer.validation_step({"image": image}, pred_type)
            

            if "diffusion" in models_name[model_index]:
                out = convert_label(out)

            pred = to_label_save(out)
            h5f.create_dataset(f'{models_name[model_index]}', data=pred)

            dices = []
            for i in range(1, 3):
                label_i = (label == i)
                pred_i = (pred == i)
                dices.append(dc(pred_i, label_i))
            print(f"model_name: {models_name[model_index]}: dices is {dices}")

            trainer.model.to("cpu")
        h5f.close()

        data_index += 1

