import os
import pickle
import subprocess
import threading

import SimpleITK
import nibabel as nib
import numpy as np
import torch
from torch.utils.tensorboard.writer import SummaryWriter

from utils.LogUtil import my_logger


def print_GPU_memory_usage(operation: str = ""):
    cuda_memory_allocated_MB = torch.cuda.memory_allocated() / 1024 / 1024 / 1024
    cuda_max_memory_allocated_MB = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
    my_logger.info("After " + operation + ", the cuda_memory_allocated is " + str(cuda_memory_allocated_MB) + " GB")
    my_logger.info(
        "After " + operation + ", the cuda_max_memory_allocated is " + str(cuda_max_memory_allocated_MB) + " GB")


def network_visualization():
    # The gpu memory is not enough to complete one forward operation
    from models.ShangJiaoOfficial.model_mj_in3sadfr_ds import UNet3D
    model = UNet3D().cuda()
    tmp_input = torch.randn(2, 1, 64, 176, 176).cuda()
    with SummaryWriter(log_dir=r"H:\laboratory\project\blood_vessel\Parse2022-Draft\run_logs") as w:
        w.add_graph(model, tmp_input)


def save_nii_file(sample_file_path, output_image, output_filename):
    # bw0 = (label == properties[0].label)
    # bw0_image = np.where(bw0 == True, 1, 0).astype(dtype="int8")
    # save_nii_file(sample_file_path, bw0_image,"result_0")
    #
    # bw1 = (label == properties[1].label)
    # bw1_image = np.where(bw1 == True, 1, 0).astype(dtype="int8")
    # save_nii_file(sample_file_path, bw1_image, "result_1")
    #
    # bw2 = (label == properties[2].label)
    # bw2_image = np.where(bw2 == True, 1, 0).astype(dtype="int8")
    # save_nii_file(sample_file_path, bw2_image, "result_2")

    # bw3 = (label == properties[3].label)
    # bw3_image = np.where(bw3 == True, 1, 0).astype(dtype="int8")
    # save_nii_file(sample_file_path, bw3_image, "result_3")
    #
    # bw4 = (label == properties[4].label)
    # bw4_image = np.where(bw4 == True, 1, 0).astype(dtype="int8")
    # save_nii_file(sample_file_path, bw4_image, "result_4")

    output_image = output_image.transpose((2, 1, 0))
    if isinstance(output_image[0][0][0].item(), bool):
        output_image = np.where(output_image == True, 1, 0).astype(dtype="int8")
    func = nib.load(sample_file_path)
    ni_img = nib.Nifti1Image(output_image, func.affine)
    output_path = os.path.join(os.path.dirname(sample_file_path), output_filename + ".nii.gz")
    nib.save(ni_img, output_path)
    my_logger.info(output_filename + " has been written.")
    my_logger.info("The output path is " + output_path)


def serializeObject(obj, file_path):
    """

    :param obj: The object you want to save
    :param file_path: The path of file where you save your object
    :return:
    """
    file_path = os.path.join(file_path, "train_config.txt")
    with open(file_path, "wb") as fw:
        pickle.dump(obj, fw)


def readObject(file_path):
    file_path = os.path.join(file_path, "train_config.txt")
    with open(file_path, "rb") as fr:
        obj = pickle.load(fr)
    return obj


def saveMhdFile(original_meta, data_array, path):
    savedImg = SimpleITK.GetImageFromArray(data_array)
    savedImg.SetOrigin(original_meta.GetOrigin())
    savedImg.SetDirection(original_meta.GetDirection())
    savedImg.SetSpacing(original_meta.GetSpacing())
    SimpleITK.WriteImage(savedImg, path)


def index_crop_3d(original_data, index_list):
    result = torch.zeros(
        size=(original_data.shape[0], original_data.shape[1], index_list[0][0][1] - index_list[0][0][0],
              index_list[0][1][1] - index_list[0][1][0], index_list[0][2][1] - index_list[0][2][0]))
    for i, index in enumerate(index_list):
        result[i, 0, :, :, :] = original_data[i, 0, index_list[i][0][0]:index_list[i][0][1],
                                index_list[i][1][0]:index_list[i][1][1], index_list[i][2][0]: index_list[i][2][1]]
    return result


def random_crop_3d(original_data, target_patch_size):
    index_list = []
    result = torch.zeros(
        size=(original_data.shape[0], original_data.shape[1], target_patch_size[0], target_patch_size[1],
              target_patch_size[2]))

    if len(original_data.shape) == 5:
        for i in range(original_data.shape[0]):
            original_shape = original_data.shape[2:]
            if original_data.shape[2] < target_patch_size[0] or original_data.shape[3] < target_patch_size[1] or \
                    original_data.shape[4] < target_patch_size[2]:
                raise Exception("Too big patch size")
            random_z_index = np.random.randint(0, high=original_shape[0] - target_patch_size[0] + 1)
            random_x_index = np.random.randint(0, high=original_shape[1] - target_patch_size[1] + 1)
            random_y_index = np.random.randint(0, high=original_shape[2] - target_patch_size[2] + 1)
            result[i, 0, :, :, :] = original_data[i, 0,
                                    random_z_index:random_z_index + target_patch_size[0],
                                    random_x_index:random_x_index + target_patch_size[1],
                                    random_y_index:random_y_index + target_patch_size[2]]
            index_list.append([
                [random_z_index, random_z_index + target_patch_size[0]],
                [random_x_index, random_x_index + target_patch_size[1]],
                [random_y_index, random_y_index + target_patch_size[2]]])
    return result, index_list


def convertMhd2Nii(data_path):
    # img_meta = SimpleITK.ReadImage(data_path)
    # img = SimpleITK.GetArrayFromImage(img_meta)
    # SimpleITK.WriteImage(img_meta, "output.nii")
    from medpy.io import load, save
    sample_name = os.path.basename(data_path)  # 128 * 448 * 448
    data_path = os.path.join(data_path, "image", sample_name + ".mhd")
    image_data, image_header = load(data_path)
    root_path = r"H:\laboratory\project\blood_vessel\MICCAI2022-Draft\utils\dicom_test"
    for i in range(128):
        save_dir = os.path.join(root_path, str(i) + ".dcm")
        save(image_data, save_dir, image_header)
    print("good")


def make_one_hot(input, num_classes):
    """Convert class index tensor to one hot encoding tensor.
    Args:
         input: A tensor of shape [*]
         num_classes: An int of number of class
    Returns:
        A tensor of shape [num_classes, *]
    """
    shape = np.array(np.expand_dims(input, axis=0).shape)
    shape[0] = num_classes
    shape = tuple(shape)
    result = torch.zeros(shape)
    result = result.scatter_(0, torch.from_numpy(input).long(), 1)

    return result


def start_tensorboard(logdir):
    def inner_function(inner_logdir):
        subprocess.run(inner_logdir)

    thread1 = threading.Thread(target=inner_function, kwargs={"inner_logdir": logdir})
    thread1.start()
    my_logger.info(threading.current_thread().name + ' start to run tensorboard')




if __name__ == '__main__':
    # data_path = r"D:\dataset\vessel\xunfei_challenge_mri\Training data\train15"
    # convertMhd2Nii(data_path)
    obj = readObject(r"D:\gs\code\xun-fei-challenge\run_logs\TraingdataPreprocessingResidualUNet3DJul23_16-31-56\train")
    print("g")
