import numpy as np
from dataset.brats_data_utils import get_loader_brats
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from light_training.evaluation.metric import dice, hausdorff_distance_95, jaccard, recall, fscore
from models.uent2d import UNet2D
from models.uent3d import UNet3D
from models.uent25d import UNet25D
from monai.networks.nets.segresnet import SegResNet
from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal
import argparse
import yaml 
from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType
from guided_diffusion.respace import SpacedDiffusion, space_timesteps
from guided_diffusion.resample import UniformSampler
from ddim_seg.unet3d import DiffusionUNet
from ddim_seg.basic_unet_denose import BasicUNetDe
from monai.networks.nets.vnet import VNet
from models.modelgenesis.unet3d import UNet3DModelGen
from models.transvw.models.ynet3d import UNet3DTransVW
from monai.networks.nets.attentionunet import AttentionUnet
from nnunet.network_architecture.generic_UNet import Generic_UNet
from models.unet_nested.unet_nested_3d import UNet_Nested3D

from einops import rearrange


set_determinism(123)
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "2"
data_dir = "/home/xingzhaohu/sharefs/datasets/brats2020/MICCAI_BraTS2020_TrainingData/"

max_epoch = 300
batch_size = 2
val_every = 10
num_gpus = 2
device = "cuda:0"
thres = 0.7

def parse_config(config_path):

    def dict2namespace(config):
        namespace = argparse.Namespace()
        for key, value in config.items():
            if isinstance(value, dict):
                new_value = dict2namespace(value)
            else:
                new_value = value
            setattr(namespace, key, new_value)
        return namespace

    with open(config_path, "r") as f:
        config = yaml.safe_load(f)
    new_config = dict2namespace(config)

    return new_config


class FuseModel(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.model3d = UNet3D()
        self.model25d = UNet25D()
        self.model2d = UNet2D()

        # _, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")

        # self.model3d = model
        # self.model25d = UNet25D()
        # self.model2d = SwinUNETR2D()

    def forward(self, x, pred_type="all"):
        if pred_type == "all":
            pred_3d = self.model3d(x)
            pred_25d = self.model25d(x)
            pred_2d = self.model2d(x)

            return pred_3d, pred_25d, pred_2d
        
        elif pred_type == "3d":
            return self.model3d(x)
        
        elif pred_type == "25d":
            return self.model25d(x)

        elif pred_type == "2d":
            return self.model2d(x)

class SwinUNETR2D(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.model = SwinUNETR([96, 96], 4, 4, spatial_dims=2)
    
    def forward(self, x):
        b, c, d, w, h = x.shape
        x = rearrange(x, "b c d w h -> (b d) c w h")
        # print(x.shape)
        x = self.model(x)
        x = rearrange(x, "(b d) c w h -> b c d w h", b=b, d=d)
        return x 

class DiffusionModel(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        config = parse_config("./brats_diffusion.yaml")
        # self.model = DiffusionUNet(config)
        self.model = BasicUNetDe(3, 6, 2, [32, 32, 64, 128, 256, 32])
        betas = get_named_beta_schedule("linear", 1000)
        self.diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [1000]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )

        self.sample_diffusion = SpacedDiffusion(use_timesteps=space_timesteps(1000, [20]),
                                            betas=betas,
                                            model_mean_type=ModelMeanType.START_X,
                                            model_var_type=ModelVarType.FIXED_LARGE,
                                            loss_type=LossType.MSE,
                                            )
        self.sampler = UniformSampler(1000)
    

    def forward(self, image=None, x=None, pred_type=None, step=None, embedding=None):
        if pred_type == "q_sample":
            noise = torch.randn_like(x).to(x.device)
            t, weight = self.sampler.sample(x.shape[0], x.device)
            return self.diffusion.q_sample(x, t, noise=noise), t, noise

        elif pred_type == "denose":
            # return self.model(x, t=step, image=image, embed=embed)
            return self.model(x, t=step, image=image, embedding=embedding)

        elif pred_type == "ddim_sample":
            # sample_out = self.diffusion.ddim_sample(self.model, shape=[1, 1, 96, 96, 96], stride=20, eta=0, embeddings=embed, image=image)
            # sample_out = self.diffusion.ddim_sample(self.model, shape=[1, 1, 96, 96, 96], stride=20, eta=0, image=image)
            sample_out = self.sample_diffusion.ddim_sample_loop(self.model, (1, 2, 96, 96, 96), model_kwargs={"image": image})
            sample_out = sample_out["pred_xstart"]
            return sample_out

        elif pred_type == "seg":
            return self.seg_unet(image)[0]
        elif pred_type == "embedding":
            return self.seg_unet(image)

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        self.window_infer = SlidingWindowInferer(roi_size=[96, 96, 96],
                                        sw_batch_size=1,
                                        overlap=0.5)

        # self.model = SwinUNETR([96, 96, 96], 4, 4)
        # self.model = UNet2D()
        # self.model = UNet3D()
        # self.model = UNet25D()
        # self.model = UNETR(4, 4, [96, 96, 96])
# 
        # _, model = TransBTS(dataset='brats', _conv_repr=True, _pe_type="learned")
        # self.model = model

        # self.model = FuseModel()

        # self.model = SegResNet(3, 16, 4, 4)
        
        # self.model = DiffusionModel()

        # self.model = SwinUNETR2D()
        # self.model = VNet(3, 4, 4, bias=True, dropout_prob=0.0)
        # self.model = UNet3DModelGen(4)
        # self.model = UNet3DTransVW(4)

        # self.model = AttentionUnet(3, 4, 4, channels=[32, 64, 128, 256], strides=[2, 2, 2, 2])

        # def final_nonlin(x):
        #     return x 
        # self.model = Generic_UNet(input_channels=4, base_num_features=30, 
        #             num_classes=4, num_pool=5, conv_op=nn.Conv3d, 
        #             norm_op=nn.BatchNorm3d, dropout_op=nn.Dropout3d,
        #             deep_supervision=False,final_nonlin=final_nonlin,)

        # self.model = BasicUNet(spatial_dims=3, 
        #             in_channels=4, 
        #             out_channels=4,
        #             features=[32, 32, 64, 128, 256, 32])
        self.model = UNet_Nested3D(in_channels=4, n_classes=4)

        self.best_mean_dice = 0.0
        self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1e-4, weight_decay=1e-3)

        self.loss_func = nn.CrossEntropyLoss()

    def get_input(self, batch):
        image = batch["image"]
        label = batch["label"]
       
        label[label == 4] = 3
        if len(label.shape) == 5:
            label = label[:, 0]
        label = label.long()
        return image, label 

    def validation_step(self, batch):
        image, label = self.get_input(batch)
       
        # output = self.window_infer(image, self.model, pred_type="2d").argmax(dim=1).cpu().numpy()
        output = self.window_infer(image, self.model).argmax(dim=1).cpu().numpy()

        # output = self.window_infer(image, self.model, pred_type="ddim_sample")
        # output = (output > 0).float()
        # output = bits_to_decimal(output, 2).squeeze(dim=1).cpu().numpy()

        target = label.cpu().numpy()
        o = output > 0; t = target > 0 # ce
        wt = dice(o, t)
        wt_hd = hausdorff_distance_95(o, t)
        wt_recall = recall(o, t)
        wt_f1 = 1 if wt > thres else 0

        wt_f1_9 = 1 if wt > 0.9 else 0

        # core
        o = (output == 1) | (output == 3)
        t = (target == 1) | (target == 3)
        tc = dice(o, t)
        tc_hd = hausdorff_distance_95(o, t)
        tc_recall = recall(o, t)
        tc_f1 = 1 if tc > thres else 0

        tc_f1_9 = 1 if tc > 0.9 else 0

        # active
        o = (output == 3);t = (target == 3)
        et = dice(o, t)
        et_hd = hausdorff_distance_95(o, t)
        et_recall = recall(o, t)
        et_f1 = 1 if et > thres else 0

        et_f1_9 = 1 if et > 0.9 else 0
        if t.sum() == 0:
            et_f1_9 = 1
            et_f1 = 1

        return [wt, tc, et, wt_hd, tc_hd, et_hd, wt_recall, tc_recall, et_recall, wt_f1, tc_f1, et_f1, wt_f1_9, tc_f1_9, et_f1_9]

if __name__ == "__main__":

    train_ds, val_ds, test_ds = get_loader_brats(data_dir=data_dir, batch_size=batch_size, fold=0)
    
    trainer = BraTSTrainer(env_type="pytorch",
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            val_every=val_every,
                            num_gpus=1,
                            master_port=17751,
                            training_script=__file__)

    # logdir = "./logs_brats/swinunetr/model/final_model_0.8455.pt"
    # logdir = "./logs_brats/unet2d/model/final_model_0.8090.pt"
    # logdir = "./logs_brats/unet2d/model/best_model_0.8276.pt"
    # logdir = "./logs_brats/unet_mutual_learning/model/best_model2d_0.8518.pt"
    # logdir = "./logs_brats/unet_mutual_learning_2_nosoft_w001/model/final_model_0.8528.pt"
    
    # logdir = "./logs_brats/unet3d/model/final_model_0.8237.pt"
    # logdir = "./logs_brats/unet25d/model/best_model_0.8389.pt"

    # logdir = "./logs_brats/unetr/model/best_model_0.8545.pt"
    # logdir = "./logs_brats/tranbts/model/best_model_0.8394.pt"
    # logdir = "./logs_brats/unet_mutual_learning_2/model/best_model_0.8571.pt"
    # logdir = "./logs_brats/segresnet/model/final_model_0.8434.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats_nnunet_new/nnunet_model_e1000.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/swinunet2d/model/best_model_0.8530.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/vnet/model/best_model_0.7739.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/modelsgenesis/model/best_model_0.8675.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/transvw/model/best_model_0.8721.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/transvw/model/final_model_0.8671.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats_nnunet/nnunet_model.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/attentionUNet/model/best_model_0.8346.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/attentionUNet/model/final_model_0.8157.pt"
    # logdir = "./logs_brats/diffusion_seg/model/best_model_0.8236.pt"
    # logdir = "./logs_brats/diffusion_seg_bce_more_params/model/final_model_0.8515.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2_nosoft_w001_e1000/model/best_model_0.8678.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2_nosoft_w001_e1000/model/final_model_0.8673.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_mutual_learning_2_no_softloss/model/best_model_0.8536.pt"
    # logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/cross_unet_mutual_learning_2/model/final_model_0.8658.pt"
    logdir = "/home/xingzhaohu/jiuding_code/mutual_learning/logs_brats/unet_plus/model/best_model_0.8268.pt"
    trainer.load_state_dict(logdir)
    
    v_mean, v_out = trainer.validation_single_gpu(val_dataset=test_ds)

    print(f"v_mean is {v_mean}")
    # print(f"v_out is {v_out}")

    wt_f1 = np.array([d[-6] for d in v_out])
    tc_f1 = np.array([d[-5] for d in v_out])
    et_f1 = np.array([d[-4] for d in v_out])

    wt_f1_9 = np.array([d[-3] for d in v_out])
    tc_f1_9 = np.array([d[-2] for d in v_out])
    et_f1_9 = np.array([d[-1] for d in v_out])

    label = np.ones_like(wt_f1)

    wt_f1 = recall(wt_f1, label)
    tc_f1 = recall(tc_f1, label)
    et_f1 = recall(et_f1, label)

    wt_f1_9 = recall(wt_f1_9, label)
    tc_f1_9 = recall(tc_f1_9, label)
    et_f1_9 = recall(et_f1_9, label)

    print(f"wt_f1 is {wt_f1}, tc_f1 is {tc_f1}, et_f1 is {et_f1}, wt_f1_9 is {wt_f1_9}, tc_f1_9 is {tc_f1_9}, et_f1_9 is {et_f1_9}")