import numpy as np
from light_training.dataloading.dataset import get_loader
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
import SimpleITK as sitk 
import SimpleITK
from scipy import ndimage
import skimage.measure as measure
from utils.cropping import crop_to_nonzero
from utils.normalize import CTNormStandard, CTNormalization
from utils.resample import resample_data_or_seg_to_shape, compute_new_shape
from utils.predictor import Predictor

import os

# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
device = "cuda:2"


# torch.cuda.set_per_process_memory_fraction(0.275, 2)

def nii2array(path):
    mask_itk_ref = sitk.ReadImage(path)
    mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref)
    return mask_arr_ref

def merge_multi_class_to_one(input_arr, classes_index=None):
    new_arr = np.zeros_like(input_arr)
    for cls_ind in classes_index:
        new_arr[input_arr == cls_ind] = 1
    return new_arr

class Inference:
    def __init__(self) -> None:
        
        self.input_dir = "./data/SegRap2023_Validation_Set_20cases"
        # self.output_dir = "./prediction_results/normf11_ep2000_gpu2"
        self.output_dir = "./prediction_results/task1_normf01_ep2000_gpu2_unet466"
        self.out_spacing = [3.0, 0.54199219, 0.54199219] 
        # self.patch_size_1 = [64, 192, 192]
        self.patch_size_1 = [64, 256, 256]
        self.device = torch.device(device)

        os.makedirs(self.output_dir, exist_ok=True)
    def filte_state_dict(self, sd):
        if "module" in sd :
            sd = sd["module"]
        new_sd = {}
        for k, v in sd.items():
            k = str(k)
            new_k = k[7:] if k.startswith("module") else k 
            new_sd[new_k] = v 
        del sd 
        return new_sd

    def convert_mha_to_nii(self, mha_input_path, nii_out_path):  # nnUNet specific
        img = SimpleITK.ReadImage(mha_input_path)
        print(img.GetSize())
        SimpleITK.WriteImage(img, nii_out_path, True)

    def convert_nii_to_mha(self, nii_input_path, mha_out_path):  # nnUNet specific
        img = SimpleITK.ReadImage(nii_input_path)
        SimpleITK.WriteImage(img, mha_out_path, True)

    def read(self, mha_path):
        img = SimpleITK.ReadImage(mha_path)
        spacing = img.GetSpacing()
        raw_size = SimpleITK.GetArrayFromImage(img).shape
        img = SimpleITK.GetArrayFromImage(img)[None,].astype(np.float32)
        properties = {
            "spacing": spacing,
            "raw_size": raw_size
        }
        return img, properties

    def check_gpu(self):
        """
        Check if GPU is available. Note that the Grand Challenge only has one available GPU.
        """
        print('Checking GPU availability')
        is_available = torch.cuda.is_available()
        print('Available: ' + str(is_available))
        print(f'Device count: {torch.cuda.device_count()}')
        if is_available:
            print(f'Current device: {torch.cuda.current_device()}')
            print('Device name: ' + torch.cuda.get_device_name(0))
            print('Device memory: ' +
                  str(torch.cuda.get_device_properties(0).total_memory))

    def load_inputs(self, i):      # use two modalities input data
        """
        Read input data (two modalities) from `self.input_dir` (/input/). 
        Please do not modify the path for CT and contrast-CT images.
        """
        ct_mha = sorted(os.listdir(os.path.join(self.input_dir, 'head-neck-ct/')))[i]
        ctc_mha = sorted(os.listdir(os.path.join(self.input_dir, 'head-neck-contrast-enhanced-ct/')))[i]
        uuid = os.path.splitext(ct_mha)[0]

        img, properties = self.read(os.path.join(self.input_dir, 'head-neck-ct/', ct_mha))
        img_c, _ = self.read(os.path.join(self.input_dir, 'head-neck-contrast-enhanced-ct/', ctc_mha))

        print(f"{img.shape}, {img_c.shape}, {ct_mha}, {ctc_mha}")

        data = np.concatenate([img, img_c], axis=0)
        del img
        del img_c
        # data is (2, d, w, h)
        return uuid, data, properties

    def write_outputs(self, uuid):
        """
        If you used one-hot label (54 classes) for training, please convert the 54 classes prediction to 45 oars prediction using function `convert_one_hot_label_to_multi_organs`.
        Otherwise, stack your 45 predictions for oars in the first channel, the corresponding mapping between the channel index and the organ names is:
        {0: 'Brain',
        1: 'BrainStem',
        2: 'Chiasm',
        3: 'TemporalLobe_L',
        4: 'TemporalLobe_R',
        5: 'Hippocampus_L',
        6: 'Hippocampus_R',
        7: 'Eye_L',
        8: 'Eye_R',
        9: 'Lens_L',
        10: 'Lens_R',
        11: 'OpticNerve_L',
        12: 'OpticNerve_R',
        13: 'MiddleEar_L',
        14: 'MiddleEar_R',
        15: 'IAC_L',
        16: 'IAC_R',
        17: 'TympanicCavity_L',
        18: 'TympanicCavity_R',
        19: 'VestibulSemi_L',
        20: 'VestibulSemi_R',
        21: 'Cochlea_L',
        22: 'Cochlea_R',
        23: 'ETbone_L',
        24: 'ETbone_R',
        25: 'Pituitary',
        26: 'OralCavity',
        27: 'Mandible_L',
        28: 'Mandible_R',
        29: 'Submandibular_L',
        30: 'Submandibular_R',
        31: 'Parotid_L',
        32: 'Parotid_R',
        33: 'Mastoid_L',
        34: 'Mastoid_R',
        35: 'TMjoint_L',
        36: 'TMjoint_R',
        37: 'SpinalCord',
        38: 'Esophagus',
        39: 'Larynx',
        40: 'Larynx_Glottic',
        41: 'Larynx_Supraglot',
        42: 'PharynxConst',
        43: 'Thyroid',
        44: 'Trachea'}
        Please ensure the 0 channel is the prediction of Brain, the 1 channel is the prediction of BrainStem, ......, the 44 channel is the prediction of Trachea.
        and also ensure the shape of final prediction array is [45, *image_shape].
        The predictions should be saved in the `self.output_dir` (/output/). Please do not modify the path and the suffix (.mha) for saving the prediction.
        """
        # os.makedirs(os.path.dirname(self.output_dir), exist_ok=True)
        # convert_one_hot_label_to_multi_organs(os.path.join(
        #     self.result_path, self.nii_seg_file), os.path.join(self.output_dir, uuid + ".mha"))
        # print('Output written to: ', os.path.join(self.output_dir, uuid + ".mha"))
        pass 
    def preprocess(self, data, properties):
        # data : (2, d, w, h)
        # return : data: (1, 2, d, w, h) 

        normalizer = CTNormStandard(a_min=-175, 
                                    a_max=250, 
                                    b_min=0.0,
                                    b_max=1.0, clip=True)
    
        original_spacing = list(properties['spacing'])
        ## 由于old spacing读出来是反的，因此这里需要转置一下
        original_spacing_trans = original_spacing[::-1]
        properties["original_spacing_trans"] = original_spacing_trans
        properties["target_spacing_trans"] = self.out_spacing

        seg = np.zeros_like(data)

        data = normalizer(data)
        
        shape_before_cropping = data.shape[1:]
        ## crop
        properties['shape_before_cropping'] = shape_before_cropping
        # this command will generate a segmentation. This is important because of the nonzero mask which we may need
        data, seg, bbox = crop_to_nonzero(data, seg)
        del seg 
        properties['bbox_used_for_cropping'] = bbox

        # crop, remember to store size before cropping!
        shape_before_resample = data.shape[1:]
        properties['shape_after_cropping_before_resample'] = shape_before_resample

        new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)

        assert len(data.shape) == 4

        data = resample_data_or_seg_to_shape(data, new_shape, 
                                             original_spacing, 
                                             self.out_spacing,
                                             order=3,
                                             order_z=0)
        properties['shape_after_resample'] = new_shape

        data = data[None,]

        data = torch.from_numpy(data)

        return data, properties

    def predict(self, data, properties, uid):
        torch.cuda.empty_cache()

        # from models.nnunet3d import NNUNetWrapperTask1
        # model = NNUNetWrapperTask1()

        # from models.nnunet3d_more_params import NNUNetWrapperTask1
        # model = NNUNetWrapperTask1()

        # model_1_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task1_unet3d_alldata_addaug_bs2_ep2000_ds_gpu2_norm01_more_params/model/best_model_0.6634.pt"
        # model_1_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task1_unet3d_alldata_addaug_bs2_ep2000_ds_gpu2_norm01_lowps/model/final_model_0.9097.pt"
        
        from models.nnunet3d import NNUNetWrapperTask1
        model = NNUNetWrapperTask1()
        model2_patch_size = [64, 256, 256]
        self.patch_size_1 = model2_patch_size
        model_2_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task1_unet3d_alldata_addaug_bs2_ep2000_ds_gpu2_norm01_unet466/model/final_model_0.9091.pt"
        new_sd = self.filte_state_dict(torch.load(model_2_path, map_location="cpu"))
        model.load_state_dict(new_sd)

        del new_sd
        torch.cuda.empty_cache()
        
        model.eval()
        window_infer = SlidingWindowInferer(roi_size=self.patch_size_1,
                                                sw_batch_size=1,
                                                overlap=0.3,
                                                progress=True,
                                                mode="gaussian",
                                                device="cpu",
                                                sw_device=self.device)
        
        predictor = Predictor(window_infer, mirror_axes=[0,1,2])
        try:
            ensemble_output = predictor.maybe_mirror_and_predict(data, model, self.device)
        except RuntimeError:
            ensemble_output = predictor.maybe_mirror_and_predict(data, model, torch.device("cpu"))
        torch.cuda.empty_cache()
        del model
        del data

        print(f"prediction done")
        ensemble_output = predictor.predict_raw_probability(ensemble_output, properties)
        print(f"non linear....")
        # ensemble_output = predictor.apply_nonlinear(ensemble_output, nonlinear_type="sigmoid")
        ensemble_output = ensemble_output > 0

        print(f"restore crop...")
        ensemble_output = predictor.predict_noncrop_probability(ensemble_output, properties)

        raw_spacing = properties["spacing"]
        case_name = uid
        print(f"uuid is {uid}")
        os.makedirs(os.path.dirname(self.output_dir), exist_ok=True)

        print(f"saving....")
        predictor.save_to_nii_multi_organ(ensemble_output,
                              raw_spacing,
                              save_dir=self.output_dir,
                              case_name=case_name,
                              postprocess=False)

    def post_process(self):
        self.check_gpu()
        print('Start processing')
        uuid, data, properties = self.load_inputs()

        data, properties = self.preprocess(data, properties)
        print(properties)
        print('Start prediction')
        self.predict(data, properties, uuid)
    
    def process(self):
        """
        Read inputs from /input, process with your algorithm and write to /output
        """
        self.post_process()

    def run_single(self, i):
        print('Start processing')
        uuid, data, properties = self.load_inputs(i)
        # return 
        data, properties = self.preprocess(data, properties)
        print(properties)
        print('Start prediction')
        self.predict(data, properties, uuid)

    def run(self):
        pass 
        for i in range(20):
            self.run_single(i)

if __name__ == "__main__":

    
    inference = Inference()

    inference.run()