import numpy as np
from light_training.dataloading.dataset import get_loader
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
import SimpleITK as sitk 
import SimpleITK
from scipy import ndimage
import skimage.measure as measure
from utils.cropping import crop_to_nonzero
from utils.normalize import CTNormStandard, CTNormalization
from utils.resample import resample_data_or_seg_to_shape, compute_new_shape
from utils.predictor import Predictor
from tqdm import tqdm 
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = "cuda:0"

segrap_subset_task002 = {
    "GTVp": 1,
    "GTVnd": 2}


def nii2array(path):
    mask_itk_ref = sitk.ReadImage(path)
    mask_arr_ref = sitk.GetArrayFromImage(mask_itk_ref)
    return mask_arr_ref

def merge_multi_class_to_one(input_arr, classes_index=None):
    new_arr = np.zeros_like(input_arr)
    for cls_ind in classes_index:
        new_arr[input_arr == cls_ind] = 1
    return new_arr

def convert_one_hot_label_to_multi_lesions(ont_hot_label_path, save_path):
    patient_results = []
    spacing = None
    for lesion in segrap_subset_task002.keys():
        ont_hot_label_arr = nii2array(ont_hot_label_path)
        ont_hot_label_itk = sitk.ReadImage(ont_hot_label_path)
        spacing = ont_hot_label_itk.GetSpacing()
        new_arr = np.zeros_like(ont_hot_label_arr)
        new_arr[ont_hot_label_arr == segrap_subset_task002[lesion]] = 1
        patient_results.append(new_arr)
    oars = []
    for t in patient_results:
        oars.append(sitk.GetImageFromArray(t, False))
    output_itk = sitk.JoinSeries(oars)
    new_spacing = (spacing[0], spacing[1], spacing[2], 1)
    output_itk.SetSpacing(new_spacing)
    print(output_itk.GetSize())
    sitk.WriteImage(output_itk, save_path, True)
    print("Conversion Finished")

class Inference:
    def __init__(self) -> None:
        
        self.input_dir = "./data/SegRap2023_Validation_Set_20cases"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gpu1_gp2_fusion"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gp2"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gp2_more_params"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gp1_more_params"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gp2_more_params_bs4"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gp2_more_params_bs4_bnnorm"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gp2_more_params_bs4_bnnorm_postprocess"
        # self.output_dir = "./prediction_results/task2_normf11_ep2000_gp2_more_params_bs4_bnnorm_postprocessonlyclass2"
        # self.output_dir = "./prediction_results/task2_normf11_ep1900_gp2_more_params_bs4_bnnorm"
        # self.output_dir = "./prediction_results/task2_normf11_ep1800_gp2_more_params_bs4_bnnorm"
        self.output_dir = "./prediction_results/fuse135"
        self.output_dir = "./prediction_results/fuse12345_over0.1"
        self.out_spacing = [3.0, 0.54199219, 0.54199219] 
        self.device = torch.device(device)

        os.makedirs(self.output_dir, exist_ok=True)
    def filte_state_dict(self, sd):
        if "module" in sd :
            sd = sd["module"]
        new_sd = {}
        for k, v in sd.items():
            k = str(k)
            new_k = k[7:] if k.startswith("module") else k 
            new_sd[new_k] = v 
        del sd 
        return new_sd

    def convert_mha_to_nii(self, mha_input_path, nii_out_path):  # nnUNet specific
        img = SimpleITK.ReadImage(mha_input_path)
        print(img.GetSize())
        SimpleITK.WriteImage(img, nii_out_path, True)

    def convert_nii_to_mha(self, nii_input_path, mha_out_path):  # nnUNet specific
        img = SimpleITK.ReadImage(nii_input_path)
        SimpleITK.WriteImage(img, mha_out_path, True)

    def read(self, mha_path):
        img = SimpleITK.ReadImage(mha_path)
        spacing = img.GetSpacing()
        raw_size = SimpleITK.GetArrayFromImage(img).shape
        img = SimpleITK.GetArrayFromImage(img)[None,].astype(np.float32)
        properties = {
            "spacing": spacing,
            "raw_size": raw_size
        }
        return img, properties


    def load_inputs(self, i):      # use two modalities input data
        """
        Read input data (two modalities) from `self.input_dir` (/input/). 
        Please do not modify the path for CT and contrast-CT images.
        """
        ct_mha = sorted(os.listdir(os.path.join(self.input_dir, 'head-neck-ct/')))[i]
        ctc_mha = sorted(os.listdir(os.path.join(self.input_dir, 'head-neck-contrast-enhanced-ct/')))[i]
        uuid = os.path.splitext(ct_mha)[0]

        img, properties = self.read(os.path.join(self.input_dir, 'head-neck-ct/', ct_mha))
        img_c, _ = self.read(os.path.join(self.input_dir, 'head-neck-contrast-enhanced-ct/', ctc_mha))

        print(f"{img.shape}, {img_c.shape}, {ct_mha}, {ctc_mha}")

        data = np.concatenate([img, img_c], axis=0)
        del img
        del img_c
        # data is (2, d, w, h)
        return uuid, data, properties
    
    def _normalize(self, data: np.ndarray,
                   foreground_intensity_properties_per_channel: dict) -> np.ndarray:
        for c in range(data.shape[0]):
            normalizer = CTNormalization(use_mask_for_norm=False,
                                          intensityproperties=foreground_intensity_properties_per_channel[str(c)])
            data[c] = normalizer.run(data[c], None)
        return data
    
    def preprocess(self, data, properties):
        # data : (2, d, w, h)
        # return : data: (1, 2, d, w, h) 

        original_spacing = list(properties['spacing'])
        ## 由于old spacing读出来是反的，因此这里需要转置一下
        original_spacing_trans = original_spacing[::-1]
        properties["original_spacing_trans"] = original_spacing_trans
        properties["target_spacing_trans"] = self.out_spacing

        seg = np.zeros_like(data)
        shape_before_cropping = data.shape[1:]
        ## crop
        properties['shape_before_cropping'] = shape_before_cropping
        # this command will generate a segmentation. This is important because of the nonzero mask which we may need
        data, seg, bbox = crop_to_nonzero(data, seg)
        del seg 
        properties['bbox_used_for_cropping'] = bbox

        # crop, remember to store size before cropping!
        shape_before_resample = data.shape[1:]
        properties['shape_after_cropping_before_resample'] = shape_before_resample

        # normalization
        with open("./data_analysis_result.txt", "r") as f:
            content = f.read().strip("\n")
            print(content)
        content = eval(content)
        foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]

        data = self._normalize(data, foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel)

        new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing)

        assert len(data.shape) == 4

        # import time 

        # s = time.time()
        # data = resample_data_or_seg_to_shape(data, new_shape, 
        #                                      original_spacing, 
        #                                      self.out_spacing,
        #                                      order=3,
        #                                      order_z=0)
        # properties['shape_after_resample'] = new_shape
        # data = data[None,]
        # data = torch.from_numpy(data)

        data = data[None,]
        data = torch.from_numpy(data).to(self.device)
        print(data.shape)
        print(new_shape)
        data = torch.nn.functional.interpolate(data, size=new_shape.tolist(), mode="trilinear")
        properties['shape_after_resample'] = new_shape

        # print(data.mean(), data.std(), data.sum())

        # e = time.time()
        # print(f"preprocessing time is {e - s}")

        return data, properties

    def define_model_1(self):
        from models.nnunet3d import NNUNetWrapper
        model = NNUNetWrapper(norm="ins")
        # model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs2_ep2000_ds_gpu1_norm-1to1/model/final_model_0.9104.pt"
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs2_ep2000_ds_gpu1_norm-1to1_gpu2/model/final_model_0.9178.pt"

        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size
    
    def define_model_2(self):
        from models.nnunet3d import NNUNetWrapper
        model = NNUNetWrapper(norm="ins")
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs2_ep2000_ds_gpu1_norm-1to1/model/final_model_0.9104.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size

    def define_model_3(self):
        from models.nnunet3d_more_params import NNUNetWrapper
        model = NNUNetWrapper(norm="ins")
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs2_ep2000_ds_gpu2_norm-1to1_more_params/model/final_model_0.9247.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size
    
    def define_model_4(self):
        from models.nnunet3d_more_params import NNUNetWrapper
        model = NNUNetWrapper(norm="ins")
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs2_ep1000_ds_gpu1_norm-1to1_more_params/model/final_model_0.8965.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size

    def define_model_5(self):
        from models.nnunet3d_more_params import NNUNetWrapper
        model = NNUNetWrapper(norm="ins")
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs4_ep2000_ds_gpu2_norm-1to1_more_params/model/final_model_0.9292.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size

    def define_model_6_bnnorm_ep2000(self):
        from models.nnunet3d_more_params import NNUNetWrapper
        model = NNUNetWrapper(norm="batch")
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs4_ep2000_ds_gpu2_norm-1to1_more_params_bn/model/final_model_0.9092.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size

    def define_model_7_bnnorm_ep1900(self):
        from models.nnunet3d_more_params import NNUNetWrapper
        model = NNUNetWrapper(norm="batch")
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs4_ep2000_ds_gpu2_norm-1to1_more_params_bn/best_model_0.9073_ep1900.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size

    def define_model_8_bnnorm_ep1800(self):
        from models.nnunet3d_more_params import NNUNetWrapper
        model = NNUNetWrapper(norm="batch")
        model_path = "/home/xingzhaohu/jiuding_code/SegRap2023/logs/task2_unet3d_alldata_addaug_bs4_ep2000_ds_gpu2_norm-1to1_more_params_bn/best_model_0.9076_ep1800.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd)
        model.eval()
        patch_size = [64, 256, 256]

        return model, patch_size
    

    def predict(self, data, properties, uid):
        torch.cuda.empty_cache()

        models = [self.define_model_1, self.define_model_2, self.define_model_3, self.define_model_5, self.define_model_6_bnnorm_ep2000]
        num_models = len(models)

        for i in range(num_models):
            print(f"model: {i} is predicting...")
            model, patch_size = models[i]()
            window_infer = SlidingWindowInferer(roi_size=patch_size,
                                                    sw_batch_size=1,
                                                    overlap=0.1,
                                                    progress=True,
                                                    mode="gaussian")
            
            predictor = Predictor(window_infer, mirror_axes=[0,1,2])
            try:
                if i == 0:
                    ensemble_output = predictor.maybe_mirror_and_predict(data, model, self.device)
                else :
                    ensemble_output += predictor.maybe_mirror_and_predict(data, model, self.device)
            except RuntimeError:
                torch.cuda.empty_cache()
                if i == 0:
                    ensemble_output = predictor.maybe_mirror_and_predict(data, model, torch.device("cpu"))
                else :
                    ensemble_output += predictor.maybe_mirror_and_predict(data, model, torch.device("cpu"))

            ensemble_output /= num_models

            torch.cuda.empty_cache()
        del model
        del data

        print(f"prediction done")
        ensemble_output = predictor.predict_raw_probability(ensemble_output, properties)
        print(f"non linear....")
        ensemble_output = predictor.apply_nonlinear(ensemble_output, nonlinear_type="softmax")
        # ensemble_output = ensemble_output > 0

        print(f"restore crop...")
        ensemble_output = predictor.predict_noncrop_probability(ensemble_output, properties)

        raw_spacing = properties["spacing"]
        case_name = uid
        print(f"uuid is {uid}")
        os.makedirs(os.path.dirname(self.output_dir), exist_ok=True)

        print(f"saving....")
        predictor.save_to_nii(ensemble_output,
                              raw_spacing,
                              save_dir="./",
                              case_name="SegRap2023_003",
                              postprocess=False)

        self.write_outputs(uid)
        
    def write_outputs(self, uuid):
        """
        If you used one-hot label (2 classes) for training, please convert the 2 classes prediction to 2 gtvs prediction using function `convert_one_hot_label_to_multi_lesions`.
        Otherwise, stack your 2 predictions for gtvs in the first channel, the corresponding mapping between the channel index and the gtv names is:
        {0: 'GTVp',
        1: 'GTVnd'}
        Please ensure the 0 channel is the prediction of GTVp, the 1 channel is the prediction of GTVnd.
        and also ensure the shape of final prediction array is [2, *image_shape].
        The predictions should be saved in the `self.output_dir` (/output/). Please do not modify the path and the suffix (.mha) for saving the prediction.
        """
        os.makedirs(os.path.dirname(self.output_dir), exist_ok=True)
        convert_one_hot_label_to_multi_lesions(os.path.join(
            "./", "SegRap2023_003.mha"), os.path.join(self.output_dir, uuid + ".mha"))
        print('Output written to: ' +
              os.path.join(self.output_dir, uuid + ".mha"))

    def run_single(self, i):
        print('Start processing')
        uuid, data, properties = self.load_inputs(i)
        # return 
        data, properties = self.preprocess(data, properties)
        print(properties)
        print('Start prediction')
        self.predict(data, properties, uuid)

    def run(self):
        pass 
        for i in tqdm(range(20), total=20):
            self.run_single(i)

if __name__ == "__main__":

    
    inference = Inference()

    inference.run()