import numpy as np
from dataset.brats_data_utils import get_loader_brats
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from light_training.evaluation.metric import dice, hausdorff_distance_95, jaccard, recall, fscore
from models.uent2d import UNet2D
from models.uent3d import UNet3D
from models.uent25d import UNet25D
from monai.networks.nets.segresnet import SegResNet
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal
import argparse
import yaml 
from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
from guided_diffusion.respace import SpacedDiffusion, space_timesteps
from guided_diffusion.resample import UniformSampler
from ddim_seg.unet3d import DiffusionUNet
from ddim_seg.basic_unet_denose import BasicUNetDe
from monai.networks.nets.vnet import VNet
from models.modelgenesis.unet3d import UNet3DModelGen
from models.transvw.models.ynet3d import UNet3DTransVW
from monai.networks.nets.attentionunet import AttentionUnet
from nnunet.network_architecture.generic_UNet import Generic_UNet
from medpy.metric import dc 
from models.unet_nested.unet_nested_3d import UNet_Nested3D

from einops import rearrange


set_determinism(123)
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "2"
data_dir = "/home/xingzhaohu/sharefs/datasets/brats2020/MICCAI_BraTS2020_TrainingData/"

max_epoch = 300
batch_size = 2
val_every = 10
num_gpus = 2
device = "cuda:0"
thres = 0.7

class FuseModelUNet(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.model3d = UNet3D()
        self.model25d = UNet25D()
        self.model2d = UNet2D()

    def forward(self, x, pred_type="all"):
        if pred_type == "all":
            pred_3d = self.model3d(x)
            pred_25d = self.model25d(x)
            pred_2d = self.model2d(x)

            return pred_3d  + pred_25d + pred_2d
            # return pred_3d, pred_25d, pred_2d
        
        elif pred_type == "3d":
            return self.model3d(x)
        
        elif pred_type == "25d":
            return self.model25d(x)

        elif pred_type == "2d":
            return self.model2d(x)

class FuseModel(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        # self.model3d = UNet3D()
        # self.model25d = UNet25D()
        # self.model2d = UNet2D()
        _, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")
        self.model3d = model
        self.model25d = UNet25D()
        self.model2d = SwinUNETR2D()

    def forward(self, x, pred_type="all"):
        if pred_type == "all":
            pred_3d = self.model3d(x)
            pred_25d = self.model25d(x)
            pred_2d = self.model2d(x)

            return pred_3d  + pred_25d + pred_2d
            # return pred_3d, pred_25d, pred_2d
        
        elif pred_type == "3d":
            return self.model3d(x)
        
        elif pred_type == "25d":
            return self.model25d(x)

        elif pred_type == "2d":
            return self.model2d(x)

class SwinUNETR2D(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.model = SwinUNETR([96, 96], 4, 4, spatial_dims=2)
    
    def forward(self, x):
        b, c, d, w, h = x.shape
        x = rearrange(x, "b c d w h -> (b d) c w h")
        # print(x.shape)
        x = self.model(x)
        x = rearrange(x, "(b d) c w h -> b c d w h", b=b, d=d)
        return x 

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        self.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.1)
        self.model = None 

    def get_input(self, batch):
        image = batch["image"]
       
        return image 

    def validation_step(self, batch, pred_type=None):
        image = self.get_input(batch)
        if pred_type is None:
            output = self.window_infer(image, self.model).argmax(dim=1).cpu().numpy()
        else :
            output = self.window_infer(image, self.model, pred_type=pred_type).argmax(dim=1).cpu().numpy()

        return output


if __name__ == "__main__":
    from monai.data import DataLoader

    def to_image_save(image):
        image = (image - image.min()) / (image.max() - image.min())
        image = image * 255.0
        image = image.astype(np.uint8)
        return image 
    
    def to_label_save(image):
        # image = (image - image.min()) / (image.max() - image.min())
        # image = image * 255.0
        # image = image.astype(np.uint8)
        return image 

    train_ds, val_ds, test_ds = get_loader_brats(data_dir=data_dir, batch_size=batch_size, fold=0)
    
    trainer = BraTSTrainer(env_type="pytorch",
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            val_every=val_every,
                            num_gpus=1,
                            master_port=17751,
                            training_script=__file__)
    

    swinunetr_model = SwinUNETR([96, 96, 96], 4, 4)
    unetr_model = UNETR(4, 4, [96, 96, 96])
    _, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")
    transbts_model = model
    fuse_model = FuseModel()
    segres_model = SegResNet(3, 16, 4, 4)
    swinunet2d_model = SwinUNETR2D()
    modelsgen_model = UNet3DModelGen(4)
    attunet_model = AttentionUnet(3, 4, 4, channels=[32, 64, 128, 256], strides=[2, 2, 2, 2])
    nnunet_model = BasicUNet(spatial_dims=3, 
                    in_channels=4, 
                    out_channels=4,
                    features=[32, 32, 64, 128, 256, 32])

    fuse_unet = FuseModelUNet()
    individual_unet = FuseModelUNet()
    unet_plus = UNet_Nested3D(in_channels=4, n_classes=4)

    models_name = ["swinunetr_model", "unetr_model", "transbts_model", "segres_model", "swinunet2d_model", "modelsgen_model", "attunet_model", "nnunet_model", "fuse_model", "fuse_unet", "individual_unet", "unet_plus"]
    models = [swinunetr_model, unetr_model, transbts_model, segres_model, swinunet2d_model, modelsgen_model, attunet_model, nnunet_model, fuse_model, fuse_unet, individual_unet, unet_plus]
    
    extend_models = ["unet2d", "unet25d", "unet3d", "unet2d_ml", "unet25d_ml", "unet3d_ml", "transbts_ml", "unet25d_cross_ml", "swinunet2d_ml"]

    return_dices = { k: [] for k in models_name + extend_models}

    checkpoints = [
        "./logs_brats/swinunetr/model/final_model_0.8455.pt",
        "./logs_brats/unetr/model/best_model_0.8545.pt",
        "./logs_brats/tranbts/model/best_model_0.8394.pt",
        "./logs_brats/segresnet/model/final_model_0.8434.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/swinunet2d/model/best_model_0.8530.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/modelsgenesis/model/best_model_0.8675.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/attentionUNet/model/best_model_0.8346.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats_nnunet_new/nnunet_model_e1000.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/cross_unet_mutual_learning_2_e1000/model/final_model_0.8779.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2/model/final_model_0.8571.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2_no_softloss/model/best_model_0.8536.pt",
        "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_plus/model/best_model_0.8268.pt"
    
    ]

    val_loader = DataLoader(test_ds, batch_size=1, shuffle=False)

    def compute_hd(pred_i, label_i):
        if pred_i.sum() > 0 and label_i.sum()>0:
            hd = metric.binary.hd95(pred_i, label_i)
        else:
            hd = 0
    
        return hd 
    
    for data in val_loader:
        image = data["image"].to(device)
        label = data["label"].cpu()
        label[label == 4] = 3
        if len(label.shape) == 5:
            label = label.squeeze(dim=1)
        label = label.numpy()
        outputs = []
        for model_index, model in enumerate(models):
            p = checkpoints[model_index]
            trainer.model = model 
            trainer.load_state_dict(p)
            trainer.model.to(device)
            trainer.model.eval()
            if models_name[model_index] == "fuse_model" or models_name[model_index] == "fuse_unet":
                 trainer.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.6)
            else :
                 trainer.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.25)
            
            ## 没有相互学习单独模型的过程，只需要预测一个结果即可
            with torch.no_grad():
                pred = trainer.validation_step({"image": image})
    
            from medpy import metric
            dices = []
            for i in range(1, 4):
                label_i = (label == i)
                pred_i = (pred == i)

                hd = compute_hd(pred_i, label_i)
                dices.append(hd)

            dices = sum(dices) / len(dices)
            print(f"model is {models_name[model_index]}, cur dices is {dices}")
            return_dices[models_name[model_index]].append(dices)

            if models_name[model_index] == "fuse_model" or models_name[model_index] == "fuse_unet" or models_name[model_index] == "individual_unet":
                ## 此时需要预测子模型的dices结果。
                pred_types = ["3d", "25d", "2d"]
                for pred_type in pred_types:
                    with torch.no_grad():
                        pred = trainer.validation_step({"image": image}, pred_type=pred_type)

                    dices = []
                    for i in range(1, 4):
                        label_i = (label == i)
                        pred_i = (pred == i)
                        hd = compute_hd(pred_i, label_i)
                        dices.append(hd)

                    dices = sum(dices) / len(dices)
                    print(f"model is {models_name[model_index]}, cur dices is {dices}")
                    if f"{models_name[model_index]}_{pred_type}" not in return_dices:
                        return_dices[f"{models_name[model_index]}_{pred_type}"] = []

                    return_dices[f"{models_name[model_index]}_{pred_type}"].append(dices)

            trainer.model.to("cpu")

    import json 
    with open("./save_HDS.txt", mode="w") as f:
        f.write(json.dumps(return_dices))


