import numpy as np
from light_training.dataloading.dataset import get_train_val_test_loader_seperate
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
# from light_training.utils.files_helper import save_new_model_and_delete_last
# from models.uent2d import UNet2D
# from models.uent3d import UNet3D
# from models.uent25d import UNet25D
# from monai.networks.nets.segresnet import SegResNet
# from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS
# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal
import SimpleITK as sitk 
from scipy import ndimage
import skimage.measure as measure
from light_training.prediction import Predictor
set_determinism(123)
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_dir = "./data/fullres/train"
val_dir = "./data/fullres/val"
test_dir = "./data/fullres/test"

save_dir = "./prediction_results/nnunet_test_best_model/predictTs"

device = "cuda:0"
patch_size = [128, 128, 128]

os.makedirs(save_dir, exist_ok=True)

class MedicalTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
  
        # self.model = SegResNet(3, 16, 1, 1)
        # from models.nnunet3d import get_nnunet3d
        # self.model = get_nnunet3d(1, 17)

        # self.best_mean_dice = 0.0
        # self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1e-4, weight_decay=1e-3)

    def filte_state_dict(self, sd):
        if "module" in sd :
            sd = sd["module"]
        new_sd = {}
        for k, v in sd.items():
            k = str(k)
            new_k = k[7:] if k.startswith("module") else k 
            new_sd[new_k] = v 
        del sd 
        return new_sd

    def define_nnunet_model(self,):
        
        # p = "/home/zhaohu/diffunet-proj/word-project/logs/nnunet_gpu4_alltrain/model/final_model_0.8207.pt"
        p = "/home/zhaohu/diffunet-proj/word-project/logs/nnunet_gpu4_alltrain/model/best_model_0.8297.pt"
        from models.nnunet3d import get_nnunet3d
        model = get_nnunet3d(1, 17)
        model.eval()
        model.load_state_dict(self.filte_state_dict(torch.load(p)))

        window_infer = SlidingWindowInferer(roi_size=patch_size,
                                        sw_batch_size=2,
                                        overlap=0.5,
                                        progress=True,
                                        mode="gaussian")    
        predictor = Predictor(window_infer, 
                              mirror_axes=[0, 1, 2])
        
        print(f"nnunet load successfully ...")

        return model, predictor

    def get_input(self, batch):
        image = batch["data"]
        properties = batch["properties"]
        del batch
        return image, properties

    def validation_step(self, batch):
        image, properties = self.get_input(batch)

        index = 0
        model_methods = [self.define_nnunet_model, ]

        for model_class in model_methods:
            model, predictor = model_class()

            if index == 0:
                ensemble_output = predictor.maybe_mirror_and_predict(image, model, device=device)
            else:
                ensemble_output += predictor.maybe_mirror_and_predict(image, model, device=device)

            index += 1

        ensemble_output = ensemble_output / len(model_methods)
        ensemble_output = predictor.predict_raw_probability(ensemble_output, properties)

        ensemble_output = ensemble_output.argmax(dim=0)

        ensemble_output = predictor.predict_noncrop_probability(ensemble_output, properties)
        case_name = properties['name'][0]

        predictor.save_to_npy(ensemble_output, save_dir, case_name)

        # raw_spacing = properties["spacing"]
        # predictor.save_to_nii(ensemble_output,
        #                       raw_spacing,
        #                       save_dir=save_dir,
        #                       case_name=case_name)


        print(f"save successfully: {case_name}")
        return 0


if __name__ == "__main__":

    train_ds, val_ds, test_ds = get_train_val_test_loader_seperate(train_dir=train_dir, val_dir=val_dir, test_dir=test_dir)
    
    trainer = MedicalTrainer(env_type="pytorch",
                                    max_epochs=0,
                                    batch_size=0,
                                    device=device,
                                    val_every=0,
                                    num_gpus=1,
                                    master_port=17751,
                                    training_script=__file__)

    trainer.validation_single_gpu(val_dataset=test_ds)