import torch 
from monai.inferers import SlidingWindowInferer
from light_training.trainer import Trainer
from monai.utils import set_determinism
set_determinism(123)
import os
from light_training.prediction import Predictor
from light_training.preprocessing.preprocessors.preprocessor_aortaseg2024 import DefaultPreprocessor 
from pathlib import Path

from glob import glob
import SimpleITK
import numpy as np
import gc
import torch
from monai.transforms import ScaleIntensityRange
from monai.networks.nets import UNet
from monai.inferers import sliding_window_inference

env = "pytorch"
device = "cuda:0"
patch_size = [192, 128, 128]


INPUT_PATH = Path("/input")
OUTPUT_PATH = Path("/output")
RESOURCE_PATH = Path("resources")

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
        
        self.patch_size = patch_size   

        self.preprocessor = DefaultPreprocessor(base_dir=None)

        self.preprocessor.out_spacing = [1.0, 1.0, 1.0]

        with open("./data_analysis_result.txt", "r") as f:
            content = f.read().strip("\n")
            print(content)
        content = eval(content)
        self.preprocessor.foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"]
        self.preprocessor.all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],

        self.location = INPUT_PATH / "images/ct-angiography"

    def run_test_single_image(self):
        location = self.location
        input_files = glob(str(location / "*.tiff")) + glob(str(location / "*.mha"))
        print(f"input files is {input_files}, location is {location}")
        # result = SimpleITK.ReadImage(input_files[0])
        image, properties = self.preprocessor.read_data_submit(input_files[0])
        image, _ = self.preprocessor.run_case_npy(image, None, properties)
        image = torch.from_numpy(image)[None]
        image = image.to(self.device)
        with torch.no_grad():
            self.validation_step(image, properties)

    def define_model_segmamba(self):
        from model_segmamba.segmamba_v2 import SegMambaV2
        model = SegMambaV2(1, 24)

        model_path = "resources/final_model_0.9429.pt"
        new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu"))
        model.load_state_dict(new_sd, strict=False)
        model.eval()
        window_infer = SlidingWindowInferer(roi_size=patch_size,
                                        sw_batch_size=1,
                                        overlap=0.2,
                                        progress=True,
                                        # device="cpu", 
                                        # sw_device=self.device,
                                        mode="gaussian")

        predictor = Predictor(window_infer=window_infer,
                              mirror_axes=None)
        
        return model, predictor

    def validation_step(self, image, properties):
        # image, label, properties = self.get_input(batch)
        print(properties['spacing'])

        model, predictor = self.define_model_segmamba()

        model.to(self.device)
        image = image.to("cpu")

        print(f"start predict")
        model_output = predictor.maybe_mirror_and_predict(image, model)[0].cpu()

        print(f"model_output shape is {model_output.shape}")
        # model_output = predictor.predict_raw_probability(model_output, 
        #                                                  properties=properties).cpu()
        
        model_output = model_output.argmax(dim=0).numpy()

        # model_output = predictor.predict_noncrop_probability(model_output, properties)
        # print(f"save shape is {model_output.shape}")
        
        write_array_as_image_file(
        location=OUTPUT_PATH / "images/aortic-branches",
        array=model_output,
        spacing=properties['spacing'], 
        direction=properties["direction"], 
        origin=properties["origin"],
        )
       
        return 0
    
    def filte_state_dict(self, sd):
        if "module" in sd :
            sd = sd["module"]
        new_sd = {}
        for k, v in sd.items():
            k = str(k)
            new_k = k[7:] if k.startswith("module") else k 
            new_sd[new_k] = v 
        del sd 
        return new_sd


def run():
    trainer = BraTSTrainer(env_type=env,
                            max_epochs=1,
                            batch_size=1,
                            device=device,
                            logdir="",
                            val_every=1,
                            num_gpus=1,
                            master_port=17751,
                            training_script=__file__)

    # Read the input
    # Read the input
    # image, spacing, direction, origin = load_image_file_as_array(
    #     location=INPUT_PATH / "images/ct-angiography",
    # )
    
    
    # Process the inputs: any way you'd like
    _show_torch_cuda_info()

    ############# Lines You can change ###########
    # Set the environment variable to handle memory fragmentation
    os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'


    trainer.run_test_single_image()
    # saved_model_path = "best_metric_model.pth"
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # torch.cuda.empty_cache()
    
    
    # transform = ScaleIntensityRange(a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True)
    # image = transform(image).numpy()
    # image = torch.from_numpy(image).permute(2, 1, 0).unsqueeze(0).unsqueeze(0)

    # patch_size = 48
    # spatial_dims=3 
    # num_classes=24
    # model = UNet(spatial_dims=spatial_dims, 
    #              in_channels=1, 
    #              out_channels=num_classes, 
    #              channels=(32, 64, 128,256), 
    #              strides=(2,2,2), 
    #              num_res_units=2)
    # # Load the saved model state dict
    # state_dict = torch.load(saved_model_path, map_location='cpu')
    # model.load_state_dict(state_dict)
    # del state_dict  # Free memory used by the state dictionary
    # gc.collect()
    # model = model.to(device)
    # image = image.to(device)
    # print("Defined the model and loaded both image and model to the appropriate device...")

    # model.eval()
    # num_samples = 4
    # with torch.no_grad():
    #     val_outputs = sliding_window_inference(image, (patch_size, patch_size, patch_size), num_samples, model)
    # val_outputs = val_outputs.detach().cpu()
    # print('Done with prediction! Now saving!!!')
    # pred_label = torch.argmax(val_outputs, dim = 1).to(torch.uint8)
    # del model # to save some memory
    # del image # to save some memory
    # del val_outputs # to save some memory
    # torch.cuda.empty_cache()
    # gc.collect()    
    # aortic_branches = pred_label.squeeze().permute(2, 1, 0).numpy()
    # print(f"Aortic Branches: Min={np.min(aortic_branches)}, Max={np.max(aortic_branches)}, Type={aortic_branches.dtype}")
    # ########## Don't Change Anything below this 
    # # For some reason if you want to change the lines, make sure the output segmentation has the same properties (spacing, dimension, origin, etc) as the 
    # # input volume

    # # Save your output
    # write_array_as_image_file(
    #     location=OUTPUT_PATH / "images/aortic-branches",
    #     array=aortic_branches,
    #     spacing=spacing, 
    #     direction=direction, 
    #     origin=origin,
    # )
    # print('Saved!!!')
    return 0


def load_image_file_as_array(*, location):
    # Use SimpleITK to read a file
    input_files = glob(str(location / "*.tiff")) + glob(str(location / "*.mha"))
    result = SimpleITK.ReadImage(input_files[0])
    spacing = result.GetSpacing()
    direction = result.GetDirection()
    origin = result.GetOrigin()
    # Convert it to a Numpy array
    return SimpleITK.GetArrayFromImage(result), spacing, direction, origin

def write_array_as_image_file(*, location, array, spacing, origin, direction):
    location.mkdir(parents=True, exist_ok=True)

    # You may need to change the suffix to .tiff to match the expected output
    suffix = ".mha"

    array = array.astype(np.uint8)
    image = SimpleITK.GetImageFromArray(array)
    image.SetDirection(direction) # My line
    image.SetOrigin(origin)
    SimpleITK.WriteImage(
        image,
        location / f"output{suffix}",
        useCompression=True,
    )


def _show_torch_cuda_info():

    print("=+=" * 10)
    print("Collecting Torch CUDA information")
    print(f"Torch CUDA is available: {(available := torch.cuda.is_available())}")
    if available:
        print(f"\tnumber of devices: {torch.cuda.device_count()}")
        print(f"\tcurrent device: { (current_device := torch.cuda.current_device())}")
        print(f"\tproperties: {torch.cuda.get_device_properties(current_device)}")
    print("=+=" * 10)


if __name__ == "__main__":
    raise SystemExit(run())


# if __name__ == "__main__":

#     trainer = BraTSTrainer(env_type=env,
#                             max_epochs=1,
#                             batch_size=1,
#                             device=device,
#                             logdir="",
#                             val_every=1,
#                             num_gpus=1,
#                             master_port=17751,
#                             training_script=__file__)

#     trainer.run_test()



