#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/7/25
# @USER    : Shengji He
# @File    : inference.py
# @Software: PyCharm
# @Version  : Python-
# @TASK:
from __future__ import annotations
from typing import List, Union
import inspect
import tempfile
import glob
import os
import time
import nibabel as nib
from nibabel.nifti1 import Nifti1Image
import warnings
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F

from SegFlow.network import PlainConvUNet

from SegFlow.map_to_binary import class_map, class_map_5_parts, map_taskid_to_partname_ct
from SegFlow.cropping import crop_to_nonzero, bounding_box_to_slice
# from SegFlow.resampling import change_spacing
from SegFlow.predict import predict_sliding_window_return_logits, compute_gaussian
from SegFlow.postprocessing import keep_largest_blob_multilabel, remove_small_blobs_multilabel, remove_auxiliary_labels

from SegFlow.nifti_ext_header import add_label_map_to_nifti
from SegFlow.utils import _normalize, empty_cache, nostdout

__all__ = ['BodySegment', 'TotalBodySegment']

"""
utils
"""


@torch.no_grad()
def resample_3d_grid_sample(
        img_in,
        # input_tensor: torch.Tensor,
        # current_spacing: list[float] = None,
        target_spacing: list[float] = None,
        target_size: list[int] = None,
        mode: str = 'bilinear',
        padding_mode: str = 'border',
        align_corners: bool = True,
        device=torch.device('cpu'),
):
    """
    使用 F.grid_sample 实现基于 spacing 的 3D 图像重采样

    :param input_tensor: 输入张量 ((B), C, D, H, W)
    :param current_spacing: 当前体素间距 [sx, sy, sz]
    :param target_spacing: 目标体素间距 [tx, ty, tz]
    :param mode: 插值方式 'nearest' / 'bilinear'
    :param padding_mode: 边界填充方式
    :param align_corners: 是否对齐角点
    :return: 重采样后的张量
    """
    data = (img_in.get_fdata()).astype(np.float32)
    input_tensor = torch.from_numpy(data[None,]).to(device)
    current_spacing = img_in.header.get_zooms()[::-1]

    if not isinstance(input_tensor, torch.Tensor):
        input_tensor = torch.tensor(input_tensor)

    # 确保是 4D 张量 (C, D, H, W)
    has_batch = True
    if input_tensor.dim() == 4:
        input_tensor = input_tensor.unsqueeze(0)  # (1, C, D, H, W)
        has_batch = False

    batch_size, channels, depth, height, width = input_tensor.shape

    if target_size is None and current_spacing is None and target_spacing is None:
        raise IOError("At least one of target_size, (current_spacing, target_spacing) must be provided.")
    elif target_size is not None:
        d_out, h_out, w_out = target_size
        scale_factors = [
            w_out / width,
            h_out / height,
            d_out / depth,
        ]
    elif current_spacing is not None and target_spacing is not None:
        # 计算缩放因子
        scale_factors = [
            current_spacing[0] / target_spacing[0],  # x-axis
            current_spacing[1] / target_spacing[1],  # y-axis
            current_spacing[2] / target_spacing[2],  # z-axis
        ]

        # 创建目标空间的坐标网格 (D_out, H_out, W_out, 3)
        d_out = int(np.round(depth * scale_factors[2]))
        h_out = int(np.round(height * scale_factors[1]))
        w_out = int(np.round(width * scale_factors[0]))
    else:
        raise IOError("At least one of target_size, (current_spacing, target_spacing) must be provided.")

    # copy very important; otherwise new_affine changes will also be in old affine
    new_affine = np.copy(img_in.affine)
    new_affine[:3, 0] = new_affine[:3, 0] / scale_factors[2]
    new_affine[:3, 1] = new_affine[:3, 1] / scale_factors[1]
    new_affine[:3, 2] = new_affine[:3, 2] / scale_factors[0]

    # Note: equivalent to theta generated by F.affine_grid!!!
    # # 创建归一化的坐标网格 [-1, 1]
    # d_grid = torch.linspace(-1, 1, steps=d_out)
    # h_grid = torch.linspace(-1, 1, steps=h_out)
    # w_grid = torch.linspace(-1, 1, steps=w_out)
    #
    # # meshgrid 生成 (x, y, z) 坐标
    # grid_z, grid_y, grid_x = torch.meshgrid(d_grid, h_grid, w_grid, indexing='ij')
    #
    # # 合并为 (D, H, W, 3) 的网格，并扩展为 batch 维度
    # grid = torch.stack([grid_x, grid_y, grid_z], dim=-1)  # shape: (D, H, W, 3)
    # grid = grid.unsqueeze(0).expand(batch_size, -1, -1, -1, -1).to(input_tensor.device)

    theta = torch.tensor([[
        [1, 0, 0, 0],
        [0, 1, 0, 0],
        [0, 0, 1, 0],
    ]], dtype=torch.float32).to(input_tensor.device)

    kwargs = {}
    if 'align_corners' in inspect.signature(F.affine_grid).parameters.keys():
        kwargs = {'align_corners': align_corners}

    grid = F.affine_grid(theta, [batch_size, channels, d_out, h_out, w_out], **kwargs)
    # 使用 grid_sample 进行插值
    output_tensor = F.grid_sample(
        input_tensor,
        grid,
        mode=mode,
        padding_mode=padding_mode,
        **kwargs
    )
    if not has_batch:
        output_tensor = output_tensor.squeeze(0)

    new_data = output_tensor.squeeze().cpu().numpy()
    return new_data, new_affine  # 返回 (C, D_new, H_new, W_new)


def run_case_npy(data: np.ndarray, properties: dict, transpose_forward, foreground_intensity_properties):
    # apply transpose_forward, this also needs to be applied to the spacing!
    data = np.copy(data)

    data = data.transpose([0, *[i + 1 for i in transpose_forward]])
    # original_spacing = [properties['spacing'][i] for i in transpose_forward]

    # crop, remember to store size before cropping!
    shape_before_cropping = data.shape[1:]
    properties['shape_before_cropping'] = shape_before_cropping
    data, seg, bbox = crop_to_nonzero(data)
    properties['bbox_used_for_cropping'] = bbox
    # print(data.shape, seg.shape)
    properties['shape_after_cropping_and_before_resampling'] = data.shape[1:]

    # normalize
    # normalization MUST happen before resampling or we get huge problems with resampled nonzero masks no
    # longer fitting the images perfectly!
    data = _normalize(data, foreground_intensity_properties)
    return data


def combine_masks(mask_dir, class_type):
    """
    Combine classes to masks

    mask_dir: directory of totalsegmetator masks
    class_type: ribs | vertebrae | vertebrae_ribs | lung | heart

    returns: nibabel image
    """
    rib_classes = [f"rib_left_{idx}" for idx in range(1, 13)] + [f"rib_right_{idx}" for idx in
                                                                 range(1, 13)]  # + ["sternum",]
    if class_type == "ribs":
        masks = rib_classes
    # elif class_type == "vertebrae":
    #     masks = list(class_map_5_parts["class_map_part_vertebrae"].values())
    # elif class_type == "vertebrae_ribs":
    #     masks = list(class_map_5_parts["class_map_part_vertebrae"].values()) + rib_classes
    elif class_type == "lung":
        masks = ["lung_upper_lobe_left", "lung_lower_lobe_left", "lung_upper_lobe_right",
                 "lung_middle_lobe_right", "lung_lower_lobe_right"]
    elif class_type == "lung_left":
        masks = ["lung_upper_lobe_left", "lung_lower_lobe_left"]
    elif class_type == "lung_right":
        masks = ["lung_upper_lobe_right", "lung_middle_lobe_right", "lung_lower_lobe_right"]
    elif class_type == "pelvis":
        masks = ["femur_left", "femur_right", "hip_left", "hip_right"]
    elif class_type == "body":
        masks = ["body_trunc", "body_extremities"]

    ref_img = None
    for mask in masks:
        if os.path.exists(os.path.join(mask_dir, f"{mask}.nii.gz")):
            ref_img = nib.load(os.path.join(mask_dir, f"{mask}.nii.gz"))
        else:
            raise ValueError(f"Could not find {mask_dir / mask}.nii.gz. Did you run TotalSegmentator successfully?")

    combined = np.zeros(ref_img.shape, dtype=np.uint8)
    for idx, mask in enumerate(masks):
        if os.path.exists(os.path.join(mask_dir, f"{mask}.nii.gz")):
            img = nib.load(os.path.join(mask_dir, f"{mask}.nii.gz")).get_fdata()
            combined[img > 0.5] = 1

    return nib.Nifti1Image(combined, ref_img.affine)


def check_if_shape_and_affine_identical(img_1, img_2):
    max_diff = np.abs(img_1.affine - img_2.affine).max()
    if max_diff > 1e-5:
        print("Affine in:")
        print(img_1.affine)
        print("Affine out:")
        print(img_2.affine)
        print("Diff:")
        print(np.abs(img_1.affine - img_2.affine))
        print("WARNING: Output affine not equal to input affine. This should not happen.")

    if img_1.shape != img_2.shape:
        print("Shape in:")
        print(img_1.shape)
        print("Shape out:")
        print(img_2.shape)
        print("WARNING: Output shape not equal to input shape. This should not happen.")


"""
alignment
"""


def as_closest_canonical(img_in):
    """
    Convert the given nifti file to the closest canonical nifti file.
    """
    return nib.as_closest_canonical(img_in)


def undo_canonical(img_can, img_orig):
    """
    Inverts nib.to_closest_canonical()

    img_can: the image we want to move back
    img_orig: the original image because transforming to canonical

    returns image in original space

    https://github.com/nipy/nibabel/issues/1063
    """
    img_ornt = nib.orientations.io_orientation(img_orig.affine)
    ras_ornt = nib.orientations.axcodes2ornt("RAS")

    to_canonical = img_ornt  # Same as ornt_transform(img_ornt, ras_ornt)
    from_canonical = nib.orientations.ornt_transform(ras_ornt, img_ornt)

    # Same as as_closest_canonical
    # img_canonical = img_orig.as_reoriented(to_canonical)

    return img_can.as_reoriented(from_canonical)


"""
shared functions
"""


def build_network(file, num_segmentation_heads):
    print('Setup Network...')
    try:
        checkpoint = torch.load(file, map_location=torch.device('cpu'))
    except Exception as e:
        checkpoint = torch.load(file, map_location=torch.device('cpu'), weights_only=False)
    inference_allowed_mirroring_axes = checkpoint.get('inference_allowed_mirroring_axes', None)

    weight_params = checkpoint['network_weights']

    UNet_base_num_features = 32
    unet_max_num_features = 320
    conv_kernel_sizes = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
    pool_op_kernel_sizes = [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
    n_conv_per_stage_encoder = [2, 2, 2, 2, 2, 2]
    n_conv_per_stage_decoder = [2, 2, 2, 2, 2]

    deep_supervision = False
    num_stages = len(conv_kernel_sizes)
    conv_op = nn.Conv3d

    kwargs = {
        'conv_bias': True,
        'norm_op': nn.InstanceNorm3d,
        'norm_op_kwargs': {'eps': 1e-5, 'affine': True},
        'dropout_op': None, 'dropout_op_kwargs': None,
        'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},
    }
    conv_or_blocks_per_stage = {
        'n_conv_per_stage': n_conv_per_stage_encoder,
        'n_conv_per_stage_decoder': n_conv_per_stage_decoder
    }

    network = PlainConvUNet(
        input_channels=1,
        n_stages=num_stages,
        features_per_stage=[min(UNet_base_num_features * 2 ** i, unet_max_num_features) for i in range(num_stages)],
        conv_op=conv_op,
        kernel_sizes=conv_kernel_sizes,
        strides=pool_op_kernel_sizes,
        num_classes=num_segmentation_heads,
        deep_supervision=deep_supervision,
        **conv_or_blocks_per_stage,
        **kwargs
    )
    network.load_state_dict(weight_params)
    print('Loaded weights for Network')
    return network, inference_allowed_mirroring_axes


# ---------------------------------------------------------
def find_candidate_datasets(dataset_id: int, model_folder):
    startswith = "Dataset%03.0d" % dataset_id
    # if nnUNet_preprocessed is not None and isdir(nnUNet_preprocessed):
    #     candidates_preprocessed = subdirs(nnUNet_preprocessed, prefix=startswith, join=False)
    # else:
    #     candidates_preprocessed = []
    #
    # if nnUNet_raw is not None and isdir(nnUNet_raw):
    #     candidates_raw = subdirs(nnUNet_raw, prefix=startswith, join=False)
    # else:
    #     candidates_raw = []
    #
    # candidates_trained_models = []
    # if nnUNet_results is not None and isdir(nnUNet_results):
    #     candidates_trained_models += subdirs(nnUNet_results, prefix=startswith, join=False)
    #
    # all_candidates = candidates_preprocessed + candidates_raw + candidates_trained_models
    # unique_candidates = np.unique(all_candidates)
    glob_research = os.path.join(model_folder, f'{startswith}*')
    files = glob.glob(glob_research)
    all_candidates = []
    for file in files:
        if os.path.isdir(file):
            all_candidates.append(os.path.basename(file))
    unique_candidates = tuple(set(all_candidates))
    return unique_candidates


def convert_id_to_dataset_name(dataset_id: int, model_folder):
    unique_candidates = find_candidate_datasets(dataset_id, model_folder)
    if len(unique_candidates) > 1:
        raise RuntimeError("More than one dataset name found for dataset id %d. Please correct that. (I looked in the "
                           "following folders:\n%s" % (dataset_id, model_folder))
    if len(unique_candidates) == 0:
        raise RuntimeError(f"Could not find a dataset with the ID {dataset_id}. Make sure the requested dataset ID "
                           f"exists and that nnU-Net knows where raw and preprocessed data are located: {model_folder}"
                           )
    return unique_candidates[0]


def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str], model_folder) -> str:
    if isinstance(dataset_name_or_id, str) and dataset_name_or_id.startswith("Dataset"):
        return dataset_name_or_id
    if isinstance(dataset_name_or_id, str):
        try:
            dataset_name_or_id = int(dataset_name_or_id)
        except ValueError:
            raise ValueError("dataset_name_or_id was a string and did not start with 'Dataset' so we tried to "
                             "convert it to a dataset ID (int). That failed, however. Please give an integer number "
                             "('1', '2', etc) or a correct tast name. Your input: %s" % dataset_name_or_id)
    return convert_id_to_dataset_name(dataset_name_or_id, model_folder)


def convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration):
    return f'{trainer_name}__{plans_identifier}__{configuration}'


def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer',
                      plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres',
                      fold: Union[str, int] = None, model_folder='.') -> str:
    tmp = os.path.join(model_folder, maybe_convert_to_dataset_name(dataset_name_or_id, model_folder),
                       convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration))
    if fold is not None:
        tmp = os.path.join(tmp, f'fold_{fold}')
    return tmp


"""
interface
"""


@torch.no_grad()
class BodySegment:
    """for body mask"""

    def __init__(self, model_path, device=None, verbose=False, allow_tqdm=False):
        if not os.path.exists(model_path):
            raise FileNotFoundError(model_path)
        self.model_path = model_path

        if device is None:
            device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.device = device
        self.verbose = verbose
        self.allow_tqdm = allow_tqdm

        # -----------------------------------------------------
        # default parameters
        # -----------------------------------------------------
        self.task_name = 'body'
        self.num_segmentation_heads = 3
        self.resample = 1.5
        self.nr_threads_resampling = 6  # Nr of threads for resampling

        # Save one multilabel image for all classes
        self.multilabel_image = False

        # Do not create derived masks (e.g. skin from body mask).
        self.no_derived_masks = False

        # Skip saving of segmentations for faster runtime if you are only interested in statistics.
        self.skip_saving = False

        # -----------------------------------------------------
        # wait generate
        # -----------------------------------------------------
        self.network, self.inference_allowed_mirroring_axes = None, None

    def predict(self, nib_image):
        # for network predicetion
        patch_size = [128, 128, 128]
        tile_step_size = 0.5
        use_gaussian = True
        use_mirroring = False
        perform_everything_on_gpu = True

        allow_tqdm = self.allow_tqdm
        verbose = self.verbose

        transpose_forward = [0, 1, 2]
        transpose_backward = [0, 1, 2]
        foreground_intensity_properties = {
            "max": 44877.0,
            "mean": -43.4468879699707,
            "median": 0.0,
            "min": -42801.0,
            "percentile_00_5": -985.0,
            "percentile_99_5": 1411.0,
            "std": 355.778564453125
        }

        num_segmentation_heads = self.num_segmentation_heads
        device = self.device
        if self.network is None:
            self.network, self.inference_allowed_mirroring_axes = build_network(self.model_path, num_segmentation_heads)
        network, allowed_mirroring_axes = self.network, self.inference_allowed_mirroring_axes

        original_affine = nib_image.affine

        reoriented_image = nib_image.as_reoriented(nib.io_orientation(original_affine))
        reoriented_affine = reoriented_image.affine

        spacings_for_nnunet = [float(i) for i in reoriented_image.header.get_zooms()[::-1]]

        data = reoriented_image.get_fdata().transpose((2, 1, 0))[None]

        data = data.astype(np.float32)
        data_properties = {
            'nibabel_stuff': {'original_affine': original_affine, 'reoriented_affine': reoriented_affine},
            'spacing': spacings_for_nnunet
        }

        # run_case_npy!!!
        data = run_case_npy(data, data_properties, transpose_forward, foreground_intensity_properties)

        data = torch.from_numpy(data).contiguous().float()

        # predict_logits_from_preprocessed_data

        prediction = predict_sliding_window_return_logits(data, network, num_segmentation_heads, patch_size, device,
                                                          tile_step_size, use_gaussian, use_mirroring,
                                                          perform_everything_on_gpu,
                                                          allowed_mirroring_axes, verbose, allow_tqdm)
        # convert_predicted_logits_to_segmentation_with_correct_shape

        with torch.no_grad():
            prediction = prediction.float()
            # predicted_probabilities = torch.sigmoid(prediction)
            predicted_probabilities = torch.softmax(prediction, 0)
        del prediction

        # check correct number of outputs
        assert predicted_probabilities.shape[0] == num_segmentation_heads, \
            f'unexpected number of channels in predicted_probabilities. Expected {num_segmentation_heads}, ' \
            f'got {predicted_probabilities.shape[0]}. Remeber that predicted_probabilities should have shape ' \
            f'(c, x, y(, z)).'
        empty_cache(device)
        try:
            segmentation = predicted_probabilities.argmax(0)
        except RuntimeError:
            print('Prediction done, transferring to CPU if needed')
            predicted_probabilities = predicted_probabilities.cpu()
            segmentation = predicted_probabilities.argmax(0)
        segmentation = segmentation.cpu().numpy()

        empty_cache(device)
        # put segmentation in bbox (revert cropping)
        segmentation_reverted_cropping = np.zeros(data_properties['shape_before_cropping'], dtype=np.uint8)
        slicer = bounding_box_to_slice(data_properties['bbox_used_for_cropping'])
        segmentation_reverted_cropping[slicer] = segmentation
        del segmentation

        # revert transpose
        segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(transpose_backward)

        # revert transpose, to nifti
        seg = segmentation_reverted_cropping.transpose((2, 1, 0)).astype(np.uint8)

        seg_nib = nib.Nifti1Image(seg, affine=data_properties['nibabel_stuff']['reoriented_affine'])
        seg_nib_reoriented = seg_nib.as_reoriented(
            nib.io_orientation(data_properties['nibabel_stuff']['original_affine']))
        assert np.allclose(data_properties['nibabel_stuff']['original_affine'], seg_nib_reoriented.affine), \
            'restored affine does not match original affine'
        return seg_nib_reoriented

    def call_origin(self, nib_file, save_path):
        resample = self.resample
        verbose = self.verbose
        nr_threads_resampling = self.nr_threads_resampling
        task_name = self.task_name
        skip_saving = self.skip_saving
        multilabel_image = self.multilabel_image
        no_derived_masks = self.no_derived_masks

        if type(resample) is float:
            resample = [resample, resample, resample]

        if isinstance(nib_file, Nifti1Image):
            img_in_orig = nib_file
        else:
            img_in_orig = nib.load(nib_file)

        if len(img_in_orig.shape) == 2:
            raise ValueError("TotalSegmentator does not work for 2D images. Use a 3D image.")
        if len(img_in_orig.shape) > 3:
            print(f"WARNING: Input image has {len(img_in_orig.shape)} dimensions. Only using first three dimensions.")
            img_in_orig = nib.Nifti1Image(img_in_orig.get_fdata()[:, :, :, 0], img_in_orig.affine)

        img_dtype = img_in_orig.get_data_dtype()
        if img_dtype.fields is not None:
            raise TypeError(f"Invalid dtype {img_dtype}. Expected a simple dtype, not a structured one.")

        # takes ~0.9s for medium image
        img_in = nib.Nifti1Image(img_in_orig.get_fdata(), img_in_orig.affine)  # copy img_in_orig

        img_in = as_closest_canonical(img_in)

        if resample is not None:
            if verbose:
                print("Resampling...")
            st = time.time()
            img_in_shape = img_in.shape
            img_in_zooms = img_in.header.get_zooms()
            img_in_rsp = change_spacing(img_in, resample,
                                        order=3, dtype=np.int32,
                                        nr_cpus=nr_threads_resampling)  # 4 cpus instead of 1 makes it a bit slower
            # img_in_rsp = resample_3d_grid_sample(img_in, img_in_zooms[::-1], resample[::-1], device=self.device)
            if verbose:
                print(f"  from shape {img_in.shape} to shape {img_in_rsp.shape}")
                print(f"  Resampled in {time.time() - st:.2f}s")
        else:
            img_in_rsp = img_in

        st = time.time()
        if verbose:
            print("Predicting...")
        with nostdout(verbose):
            img_pred = self.predict(img_in_rsp)
        if verbose:
            print(f"  Predicted in {time.time() - st:.2f}s")

        # Postprocessing multilabel (run here on lower resolution)
        img_pred_pp = keep_largest_blob_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                   ["body_trunc"], debug=False, quiet=not verbose)
        img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

        vox_vol = np.prod(img_pred.header.get_zooms())
        size_thr_mm3 = 50000 / vox_vol
        img_pred_pp = remove_small_blobs_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                    ["body_extremities"], interval=[size_thr_mm3, 1e10],
                                                    debug=False, quiet=not verbose)
        img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

        if resample is not None:
            if verbose:
                print("Resampling...")
                print(f"  back to original shape: {img_in_shape}")
            # Use force_affine otherwise output affine sometimes slightly off (which then is even increased
            # by undo_canonical)
            img_pred = change_spacing(img_pred, resample, img_in_shape,
                                      order=0, dtype=np.uint8, nr_cpus=nr_threads_resampling,
                                      force_affine=img_in.affine)

        if verbose:
            print("Undoing canonical...")
        img_pred = undo_canonical(img_pred, img_in_orig)

        check_if_shape_and_affine_identical(img_in_orig, img_pred)

        img_data = img_pred.get_fdata().astype(np.uint8)

        label_map = class_map[task_name]

        # Prepare output nifti
        # Copy header to make output header exactly the same as input. But change dtype otherwise it will be
        # float or int and therefore the masks will need a lot more space.
        # (infos on header: https://nipy.org/nibabel/nifti_images.html)
        new_header = img_in_orig.header.copy()
        new_header.set_data_dtype(np.uint8)
        img_out = nib.Nifti1Image(img_data, img_pred.affine, new_header)
        img_out = add_label_map_to_nifti(img_out, label_map)

        if save_path is not None and skip_saving is False:
            if verbose:
                print("Saving segmentations...")

            # Select subset of classes if required
            selected_classes = class_map[task_name]

            st = time.time()
            if multilabel_image:
                os.makedirs(os.path.dirname(save_path), exist_ok=True)
                nib.save(img_out, save_path)
            else:
                os.makedirs(save_path, exist_ok=True)

                for k, v in selected_classes.items():
                    binary_img = img_data == k
                    output_path = os.path.join(save_path, f"{v}.nii.gz")
                    nib.save(nib.Nifti1Image(binary_img.astype(np.uint8), img_pred.affine, new_header), output_path)

            if task_name == "body" and not multilabel_image and not no_derived_masks:
                if verbose:
                    print("Creating body.nii.gz")
                body_img = combine_masks(save_path, "body")
                nib.save(body_img, os.path.join(save_path, "body.nii.gz"))
                # if not quiet: print("Creating skin.nii.gz")
                # skin = extract_skin(img_in_orig, nib.load(file_out / "body.nii.gz"))
                # nib.save(skin, file_out / "skin.nii.gz")
        return img_out

    def __call__(self, nib_file, save_path):
        resample = self.resample
        verbose = self.verbose
        nr_threads_resampling = self.nr_threads_resampling
        task_name = self.task_name
        skip_saving = self.skip_saving
        multilabel_image = self.multilabel_image
        no_derived_masks = self.no_derived_masks
        device = self.device

        if type(resample) is float:
            resample = [resample, resample, resample]

        if isinstance(nib_file, Nifti1Image):
            img_in_orig = nib_file
        else:
            img_in_orig = nib.load(nib_file)

        if len(img_in_orig.shape) == 2:
            raise ValueError("TotalSegmentator does not work for 2D images. Use a 3D image.")
        if len(img_in_orig.shape) > 3:
            print(f"WARNING: Input image has {len(img_in_orig.shape)} dimensions. Only using first three dimensions.")
            img_in_orig = nib.Nifti1Image(img_in_orig.get_fdata()[:, :, :, 0], img_in_orig.affine)

        img_dtype = img_in_orig.get_data_dtype()
        if img_dtype.fields is not None:
            raise TypeError(f"Invalid dtype {img_dtype}. Expected a simple dtype, not a structured one.")

        # takes ~0.9s for medium image
        img_in = nib.Nifti1Image(img_in_orig.get_fdata(), img_in_orig.affine)  # copy img_in_orig

        img_in = as_closest_canonical(img_in)

        if resample is not None:
            if verbose:
                print("Resampling...")
            st = time.time()
            img_in_shape = img_in.shape
            # img_in_zooms = img_in.header.get_zooms()
            # img_in_rsp = change_spacing(img_in, resample,
            #                             order=3, dtype=np.int32,
            #                             nr_cpus=nr_threads_resampling)  # 4 cpus instead of 1 makes it a bit slower
            data_rsp, affine_rsp = resample_3d_grid_sample(img_in, resample[::-1], device=device)
            img_in_rsp = nib.Nifti1Image(data_rsp, affine_rsp)
            if verbose:
                print(f"  from shape {img_in.shape} to shape {img_in_rsp.shape}")
                print(f"  Resampled in {time.time() - st:.2f}s")
        else:
            img_in_rsp = img_in

        st = time.time()
        if verbose:
            print("Predicting...")
        with nostdout(verbose):
            img_pred = self.predict(img_in_rsp)
        if verbose:
            print(f"  Predicted in {time.time() - st:.2f}s")

        # Postprocessing multilabel (run here on lower resolution)
        img_pred_pp = keep_largest_blob_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                   ["body_trunc"], debug=False, quiet=not verbose)
        img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

        vox_vol = np.prod(img_pred.header.get_zooms())
        size_thr_mm3 = 50000 / vox_vol
        img_pred_pp = remove_small_blobs_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                    ["body_extremities"], interval=[size_thr_mm3, 1e10],
                                                    debug=False, quiet=not verbose)
        img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

        if resample is not None:
            if verbose:
                print("Resampling...")
                print(f"  back to original shape: {img_in_shape}")
            # Use force_affine otherwise output affine sometimes slightly off (which then is even increased
            # by undo_canonical)
            # img_pred = change_spacing(img_pred, resample, img_in_shape,
            #                           order=0, dtype=np.uint8, nr_cpus=nr_threads_resampling,
            #                           force_affine=img_in.affine)
            data_rsp, _ = resample_3d_grid_sample(img_pred, target_size=img_in_shape, mode='nearest', device=device)
            data_rsp = data_rsp.astype(np.uint8)
            img_pred = nib.Nifti1Image(data_rsp, img_in.affine)
        if verbose:
            print("Undoing canonical...")
        img_pred = undo_canonical(img_pred, img_in_orig)

        check_if_shape_and_affine_identical(img_in_orig, img_pred)

        img_data = img_pred.get_fdata().astype(np.uint8)

        label_map = class_map[task_name]

        # Prepare output nifti
        # Copy header to make output header exactly the same as input. But change dtype otherwise it will be
        # float or int and therefore the masks will need a lot more space.
        # (infos on header: https://nipy.org/nibabel/nifti_images.html)
        new_header = img_in_orig.header.copy()
        new_header.set_data_dtype(np.uint8)
        img_out = nib.Nifti1Image(img_data, img_pred.affine, new_header)
        img_out = add_label_map_to_nifti(img_out, label_map)

        if save_path is not None and skip_saving is False:
            if verbose:
                print("Saving segmentations...")

            # Select subset of classes if required
            selected_classes = class_map[task_name]

            st = time.time()
            if multilabel_image:
                os.makedirs(os.path.dirname(save_path), exist_ok=True)
                nib.save(img_out, save_path)
            else:
                os.makedirs(save_path, exist_ok=True)

                for k, v in selected_classes.items():
                    binary_img = img_data == k
                    output_path = os.path.join(save_path, f"{v}.nii.gz")
                    nib.save(nib.Nifti1Image(binary_img.astype(np.uint8), img_pred.affine, new_header), output_path)

            if task_name == "body" and not multilabel_image and not no_derived_masks:
                if verbose:
                    print("Creating body.nii.gz")
                body_img = combine_masks(save_path, "body")
                nib.save(body_img, os.path.join(save_path, "body.nii.gz"))
                # if not quiet: print("Creating skin.nii.gz")
                # skin = extract_skin(img_in_orig, nib.load(file_out / "body.nii.gz"))
                # nib.save(skin, file_out / "skin.nii.gz")
        return img_out

    def __del__(self):
        compute_gaussian.cache_clear()
        empty_cache(self.device)
        print('class instance destroyed!')


@torch.no_grad()
class TotalBodySegment:
    """
    for ct segment, all kinds of region
    WARNING: only for organs currently!!!
    """

    def __init__(self, model_folder, device=None, verbose=False, allow_tqdm=False):
        if not os.path.exists(model_folder):
            raise FileNotFoundError(model_folder)
        self.model_folder = model_folder  # contain all models

        if device is None:
            device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        self.device = device
        self.verbose = verbose
        self.allow_tqdm = allow_tqdm

        # -----------------------------------------------------
        # default parameters
        # -----------------------------------------------------
        self.task_name = 'total'
        self.task_id = [291, 292, 293, 294, 295]
        self.cur_task_id = None
        self.task_id_to_num_segmentation_heads = {291: 25, 292: 27, 293: 19, 294: 24, 295: 27}
        self.resample = 1.5
        self.nr_threads_resampling = 6  # Nr of threads for resampling

        self.nr_voxels_thr = 512 * 512 * 900
        self.perform_everything_on_gpu = True

        self.trainer = "nnUNetTrainerNoMirroring"
        self.model = "3d_fullres"
        self.plans = "nnUNetPlans"
        self.model_name = 'checkpoint_final.pth'
        # Save one multilabel image for all classes
        self.multilabel_image = True

        # Do not create derived masks (e.g. skin from body mask).
        self.no_derived_masks = False

        # Skip saving of segmentations for faster runtime if you are only interested in statistics.
        self.skip_saving = False

        # -----------------------------------------------------
        # wait generate
        # -----------------------------------------------------
        self.network, self.inference_allowed_mirroring_axes = None, None

    def predict(self, nib_image, task_id, step_size):
        # for network predicetion
        patch_size = [128, 128, 128]
        tile_step_size = step_size
        use_gaussian = True
        use_mirroring = False
        perform_everything_on_gpu = self.perform_everything_on_gpu

        allow_tqdm = self.allow_tqdm
        verbose = self.verbose

        transpose_forward = [0, 1, 2]
        transpose_backward = [0, 1, 2]
        foreground_intensity_properties = {
            "max": 3606.0,
            "mean": -370.00039267657144,
            "median": -249.0,
            "min": -3139.0,
            "percentile_00_5": -1024.0,
            "percentile_99_5": 276.0,
            "std": 436.5998675471528
        }

        num_segmentation_heads = self.task_id_to_num_segmentation_heads[task_id]
        device = self.device
        if self.network is None or self.cur_task_id != task_id:
            cur_model_folder = get_output_folder(task_id, self.trainer, self.plans, self.model, 0, self.model_folder)
            model_file = os.path.join(cur_model_folder, self.model_name)
            self.network, self.inference_allowed_mirroring_axes = build_network(model_file, num_segmentation_heads)
            self.cur_task_id = task_id
        network, allowed_mirroring_axes = self.network, self.inference_allowed_mirroring_axes

        original_affine = nib_image.affine

        reoriented_image = nib_image.as_reoriented(nib.io_orientation(original_affine))
        reoriented_affine = reoriented_image.affine

        spacings_for_nnunet = [float(i) for i in reoriented_image.header.get_zooms()[::-1]]

        data = reoriented_image.get_fdata().transpose((2, 1, 0))[None]

        data = data.astype(np.float32)
        data_properties = {
            'nibabel_stuff': {'original_affine': original_affine, 'reoriented_affine': reoriented_affine},
            'spacing': spacings_for_nnunet
        }

        # run_case_npy!!!
        data = run_case_npy(data, data_properties, transpose_forward, foreground_intensity_properties)

        data = torch.from_numpy(data).contiguous().float()

        # predict_logits_from_preprocessed_data

        prediction = predict_sliding_window_return_logits(data, network, num_segmentation_heads, patch_size, device,
                                                          tile_step_size, use_gaussian, use_mirroring,
                                                          perform_everything_on_gpu,
                                                          allowed_mirroring_axes, verbose, allow_tqdm)
        # convert_predicted_logits_to_segmentation_with_correct_shape

        # with torch.no_grad():
        #     prediction = prediction.float()
        #     # predicted_probabilities = torch.sigmoid(prediction)
        #     predicted_probabilities = torch.softmax(prediction, 0)
        # del prediction
        #
        # # check correct number of outputs
        # assert predicted_probabilities.shape[0] == num_segmentation_heads, \
        #     f'unexpected number of channels in predicted_probabilities. Expected {num_segmentation_heads}, ' \
        #     f'got {predicted_probabilities.shape[0]}. Remeber that predicted_probabilities should have shape ' \
        #     f'(c, x, y(, z)).'
        # empty_cache(device)
        # try:
        #     segmentation = predicted_probabilities.argmax(0)
        # except RuntimeError:
        #     print('Prediction done, transferring to CPU if needed')
        #     predicted_probabilities = predicted_probabilities.cpu()
        #     segmentation = predicted_probabilities.argmax(0)
        # segmentation = segmentation.cpu().numpy()

        try:
            # check correct number of outputs
            assert prediction.shape[0] == num_segmentation_heads, \
                f'unexpected number of channels in predicted_probabilities. Expected {num_segmentation_heads}, ' \
                f'got {prediction.shape[0]}. Remeber that predicted_probabilities should have shape ' \
                f'(c, x, y(, z)).'
            with torch.no_grad():
                segmentation = prediction.float().argmax(0)
        except RuntimeError:
            print('Prediction done, transferring to CPU if needed')
            with torch.no_grad():
                prediction = prediction.float().cpu()
                segmentation = prediction.argmax(0)

        segmentation = segmentation.cpu().numpy()
        del prediction

        empty_cache(device)
        # put segmentation in bbox (revert cropping)
        segmentation_reverted_cropping = np.zeros(data_properties['shape_before_cropping'], dtype=np.uint8)
        slicer = bounding_box_to_slice(data_properties['bbox_used_for_cropping'])
        segmentation_reverted_cropping[slicer] = segmentation
        del segmentation

        # revert transpose
        segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(transpose_backward)

        # revert transpose, to nifti
        seg = segmentation_reverted_cropping.transpose((2, 1, 0)).astype(np.uint8)

        seg_nib = nib.Nifti1Image(seg, affine=data_properties['nibabel_stuff']['reoriented_affine'])
        seg_nib_reoriented = seg_nib.as_reoriented(
            nib.io_orientation(data_properties['nibabel_stuff']['original_affine']))
        assert np.allclose(data_properties['nibabel_stuff']['original_affine'], seg_nib_reoriented.affine), \
            'restored affine does not match original affine'
        return seg_nib_reoriented

    def call_origin(self, nib_file, save_path, roi_subset: List[str] = None):
        resample = self.resample
        verbose = self.verbose
        nr_threads_resampling = self.nr_threads_resampling
        task_name = self.task_name
        task_id = self.task_id
        skip_saving = self.skip_saving
        multilabel_image = self.multilabel_image
        no_derived_masks = self.no_derived_masks

        if type(resample) is float:
            resample = [resample, resample, resample]

        if isinstance(nib_file, Nifti1Image):
            img_in_orig = nib_file
        else:
            img_in_orig = nib.load(nib_file)

        multimodel = type(task_id) is list

        if task_name == "total":
            class_map_parts = class_map_5_parts
            map_taskid_to_partname = map_taskid_to_partname_ct
        # elif task_name == "total_mr":
        #     class_map_parts = class_map_parts_mr
        #     map_taskid_to_partname = map_taskid_to_partname_mr
        # elif task_name == "headneck_muscles":
        #     class_map_parts = class_map_parts_headneck_muscles
        #     map_taskid_to_partname = map_taskid_to_partname_headneck_muscles
        else:
            raise NotImplementedError(task_name)

        if len(img_in_orig.shape) == 2:
            raise ValueError("TotalSegmentator does not work for 2D images. Use a 3D image.")
        if len(img_in_orig.shape) > 3:
            print(f"WARNING: Input image has {len(img_in_orig.shape)} dimensions. Only using first three dimensions.")
            img_in_orig = nib.Nifti1Image(img_in_orig.get_fdata()[:, :, :, 0], img_in_orig.affine)

        img_dtype = img_in_orig.get_data_dtype()
        if img_dtype.fields is not None:
            raise TypeError(f"Invalid dtype {img_dtype}. Expected a simple dtype, not a structured one.")

        # takes ~0.9s for medium image
        img_in = nib.Nifti1Image(img_in_orig.get_fdata(), img_in_orig.affine)  # copy img_in_orig

        img_in = as_closest_canonical(img_in)

        if resample is not None:
            if verbose:
                print("Resampling...")
            st = time.time()
            img_in_shape = img_in.shape
            img_in_zooms = img_in.header.get_zooms()
            img_in_rsp = change_spacing(img_in, resample,
                                        order=3, dtype=np.int32,
                                        nr_cpus=nr_threads_resampling)  # 4 cpus instead of 1 makes it a bit slower
            # img_in_rsp = resample_3d_grid_sample(img_in, img_in_zooms[::-1], resample[::-1], device=self.device)
            if verbose:
                print(f"  from shape {img_in.shape} to shape {img_in_rsp.shape}")
                print(f"  Resampled in {time.time() - st:.2f}s")
        else:
            img_in_rsp = img_in

        if task_name == "total" and resample is not None and resample[0] < 3.0:
            # overall speedup for 15mm model roughly 11% (GPU) and 100% (CPU)
            # overall speedup for  3mm model roughly  0% (GPU) and  10% (CPU)
            # (dice 0.001 worse on test set -> ok)
            # (for lung_trachea_bronchia somehow a lot lower dice)
            step_size = 0.8
        else:
            step_size = 0.5

        st = time.time()
        if multimodel:  # if running multiple models
            # only compute model parts containing the roi subset
            if roi_subset is not None:
                part_names = []
                new_task_id = []
                for part_name, part_map in class_map_parts.items():
                    if any(organ in roi_subset for organ in part_map.values()):
                        # get taskid associated to model part_name
                        map_partname_to_taskid = {v: k for k, v in map_taskid_to_partname.items()}
                        new_task_id.append(map_partname_to_taskid[part_name])
                        part_names.append(part_name)
                task_id = new_task_id
                if verbose:
                    print(f"Computing parts: {part_names} based on the provided roi_subset")

            class_map_inv = {v: k for k, v in class_map[task_name].items()}
            img_shape = img_in_rsp.shape
            seg_combined = np.zeros(img_shape, dtype=np.uint8)
            # Run several tasks and combine results into one segmentation
            for idx, tid in enumerate(task_id):
                if verbose:
                    print(f"Predicting part {idx + 1} of {len(task_id)} ...")
                with nostdout(verbose):
                    img_pred = self.predict(img_in_rsp, tid, step_size=step_size)
                    seg = img_pred.get_fdata()
                    for jdx, class_name in class_map_parts[map_taskid_to_partname[tid]].items():
                        seg_combined[seg == jdx] = class_map_inv[class_name]
            img_pred = seg_combined
        else:
            if verbose:
                print("Predicting...")
            with nostdout(verbose):
                img_pred = self.predict(img_in_rsp, task_id, step_size)
        if verbose:
            print(f"  Predicted in {time.time() - st:.2f}s")

        img_pred = remove_auxiliary_labels(img_pred, task_name)
        if task_name == 'body':
            # Postprocessing multilabel (run here on lower resolution)
            img_pred_pp = keep_largest_blob_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                       ["body_trunc"], debug=False, quiet=not verbose)
            img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

            vox_vol = np.prod(img_pred.header.get_zooms())
            size_thr_mm3 = 50000 / vox_vol
            img_pred_pp = remove_small_blobs_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                        ["body_extremities"], interval=[size_thr_mm3, 1e10],
                                                        debug=False, quiet=not verbose)
            img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

        if resample is not None:
            if verbose:
                print("Resampling...")
                print(f"  back to original shape: {img_in_shape}")
            # Use force_affine otherwise output affine sometimes slightly off (which then is even increased
            # by undo_canonical)
            img_pred = change_spacing(img_pred, resample, img_in_shape,
                                      order=0, dtype=np.uint8, nr_cpus=nr_threads_resampling,
                                      force_affine=img_in.affine)

        if verbose:
            print("Undoing canonical...")
        img_pred = undo_canonical(img_pred, img_in_orig)

        check_if_shape_and_affine_identical(img_in_orig, img_pred)

        img_data = img_pred.get_fdata().astype(np.uint8)

        label_map = class_map[task_name]

        # Keep only voxel values corresponding to the roi_subset
        if roi_subset is not None:
            label_map = {k: v for k, v in label_map.items() if v in roi_subset}
            img_data *= np.isin(img_data, list(label_map.keys()))

        # Prepare output nifti
        # Copy header to make output header exactly the same as input. But change dtype otherwise it will be
        # float or int and therefore the masks will need a lot more space.
        # (infos on header: https://nipy.org/nibabel/nifti_images.html)
        new_header = img_in_orig.header.copy()
        new_header.set_data_dtype(np.uint8)
        img_out = nib.Nifti1Image(img_data, img_pred.affine, new_header)
        img_out = add_label_map_to_nifti(img_out, label_map)

        if save_path is not None and skip_saving is False:
            if verbose:
                print("Saving segmentations...")

            # Select subset of classes if required
            selected_classes = class_map[task_name]

            st = time.time()
            if multilabel_image:
                os.makedirs(os.path.dirname(save_path), exist_ok=True)
                nib.save(img_out, save_path)
            else:
                os.makedirs(save_path, exist_ok=True)

                for k, v in selected_classes.items():
                    binary_img = img_data == k
                    output_path = os.path.join(save_path, f"{v}.nii.gz")
                    nib.save(nib.Nifti1Image(binary_img.astype(np.uint8), img_pred.affine, new_header), output_path)

            if task_name == "body" and not multilabel_image and not no_derived_masks:
                if verbose:
                    print("Creating body.nii.gz")
                body_img = combine_masks(save_path, "body")
                nib.save(body_img, os.path.join(save_path, "body.nii.gz"))
                # if not quiet: print("Creating skin.nii.gz")
                # skin = extract_skin(img_in_orig, nib.load(file_out / "body.nii.gz"))
                # nib.save(skin, file_out / "skin.nii.gz")
        return img_out

    def _predict_part_(self, img_in_rsp, step_size, roi_subset):
        verbose = self.verbose
        task_name = self.task_name
        task_id = self.task_id
        multimodel = type(task_id) is list
        if task_name == "total":
            class_map_parts = class_map_5_parts
            map_taskid_to_partname = map_taskid_to_partname_ct
        # elif task_name == "total_mr":
        #     class_map_parts = class_map_parts_mr
        #     map_taskid_to_partname = map_taskid_to_partname_mr
        # elif task_name == "headneck_muscles":
        #     class_map_parts = class_map_parts_headneck_muscles
        #     map_taskid_to_partname = map_taskid_to_partname_headneck_muscles
        else:
            raise NotImplementedError(task_name)

        if multimodel:  # if running multiple models
            # only compute model parts containing the roi subset
            if roi_subset is not None:
                part_names = []
                new_task_id = []
                for part_name, part_map in class_map_parts.items():
                    if any(organ in roi_subset for organ in part_map.values()):
                        # get taskid associated to model part_name
                        map_partname_to_taskid = {v: k for k, v in map_taskid_to_partname.items()}
                        new_task_id.append(map_partname_to_taskid[part_name])
                        part_names.append(part_name)
                task_id = new_task_id
                if verbose:
                    print(f"Computing parts: {part_names} based on the provided roi_subset")

            class_map_inv = {v: k for k, v in class_map[task_name].items()}
            img_shape = img_in_rsp.shape
            seg_combined = np.zeros(img_shape, dtype=np.uint8)
            # Run several tasks and combine results into one segmentation
            for idx, tid in enumerate(task_id):
                if verbose:
                    print(f"Predicting part {idx + 1} of {len(task_id)} ...")
                with nostdout(verbose):
                    img_pred = self.predict(img_in_rsp, tid, step_size=step_size)
                    seg = img_pred.get_fdata()
                    for jdx, class_name in class_map_parts[map_taskid_to_partname[tid]].items():
                        seg_combined[seg == jdx] = class_map_inv[class_name]
            img_pred = nib.Nifti1Image(seg_combined, affine=img_in_rsp.affine)
        else:
            if verbose:
                print("Predicting...")
            with nostdout(verbose):
                img_pred = self.predict(img_in_rsp, task_id, step_size)
        return img_pred

    def split_predict(self, img_in_rsp, step_size, roi_subset):
        warnings.warn('prefer to function: adaptive_split_predict', DeprecationWarning)
        verbose = self.verbose
        task_id = self.task_id
        nr_voxels_thr = self.nr_voxels_thr
        multimodel = type(task_id) is list

        # tmp_dir = '.'
        with tempfile.TemporaryDirectory(prefix="nnunet_tmp_") as tmp_dir:
            if verbose:
                print(f"tmp_dir: {tmp_dir}")

            if verbose:
                print("Splitting into subparts...")
            img_parts = ["s01", "s02", "s03"]
            third = img_in_rsp.shape[2] // 3
            margin = 20  # set margin with fixed values to avoid rounding problem if using percentage of third
            img_in_rsp_data = img_in_rsp.get_fdata()
            nib.save(nib.Nifti1Image(img_in_rsp_data[:, :, :third + margin], img_in_rsp.affine),
                     os.path.join(tmp_dir, "s01_0000.nii.gz"))
            nib.save(nib.Nifti1Image(img_in_rsp_data[:, :, third + 1 - margin:third * 2 + margin], img_in_rsp.affine),
                     os.path.join(tmp_dir, "s02_0000.nii.gz"))

            nib.save(nib.Nifti1Image(img_in_rsp_data[:, :, third * 2 + 1 - margin:], img_in_rsp.affine),
                     os.path.join(tmp_dir, "s03_0000.nii.gz"))

            img_pred = np.zeros(img_in_rsp.shape, dtype=np.uint8)
            for i, img_part in enumerate(img_parts):
                file = os.path.join(tmp_dir, f"{img_part}_0000.nii.gz")
                img_in_rsp_part = nib.load(file)
                # cur_ss = img_in_rsp_part.shape
                # do_triple_split = np.prod(cur_ss) > nr_voxels_thr and cur_ss[2] > 200 and multimodel
                # if do_triple_split:
                #     img_pred_part = self.split_predict(img_in_rsp_part, step_size, roi_subset)
                # else:
                #     img_pred_part = self._predict_part_(img_in_rsp_part, step_size, roi_subset)
                img_pred_part = self._predict_part_(img_in_rsp_part, step_size, roi_subset)
                if i == 0:
                    img_pred[:, :, :third] = img_pred_part.get_fdata()[:, :, :-margin]
                elif i == 1:
                    img_pred[:, :, third:third * 2] = img_pred_part.get_fdata()[:, :, margin - 1:-margin]
                else:
                    img_pred[:, :, third * 2:] = img_pred_part.get_fdata()[:, :, margin - 1:]
        return nib.Nifti1Image(img_pred, affine=img_in_rsp.affine)

    def adaptive_split_predict(self, img_in_rsp, step_size, roi_subset):
        verbose = self.verbose
        nr_voxels_thr = self.nr_voxels_thr
        device = self.device

        ss = img_in_rsp.shape
        if np.prod(ss) / nr_voxels_thr < 3:
            num_img_parts = 3
        else:
            num_img_parts = int(np.ceil(np.prod(ss) / nr_voxels_thr))

        if verbose:
            print("Splitting into subparts...")
        zPatchSize = img_in_rsp.shape[2] // num_img_parts
        margin = 20  # set margin with fixed values to avoid rounding problem if using percentage of third
        img_in_rsp_data = img_in_rsp.get_fdata()

        img_pred = np.zeros(img_in_rsp.shape, dtype=np.uint8)
        for i in range(num_img_parts):
            idx_0_in = zPatchSize * i - margin
            idx_1_in = zPatchSize * (i + 1) + margin

            idx_0_out = idx_0_in + margin
            idx_1_out = idx_1_in - margin

            idx_0_ph = margin
            idx_1_ph = - margin
            if idx_0_in < 0:
                idx_0_in = 0
                idx_0_out = 0
                idx_0_ph = 0
            if idx_1_in > img_in_rsp.shape[2]:
                idx_1_in = None
                idx_1_out = None
                idx_1_ph = None
            idx_in = slice(idx_0_in, idx_1_in)
            idx_out = slice(idx_0_out, idx_1_out)
            idx_ph = slice(idx_0_ph, idx_1_ph)

            img_in_rsp_part = nib.Nifti1Image(img_in_rsp_data[:, :, idx_in,], img_in_rsp.affine)
            img_pred_part = self._predict_part_(img_in_rsp_part, step_size, roi_subset)
            img_pred[:, :, idx_out,] = img_pred_part.get_fdata()[:, :, idx_ph,]
            empty_cache(device)
        return nib.Nifti1Image(img_pred, affine=img_in_rsp.affine)

    def __call__(self, nib_file, save_path, roi_subset: List[str] = None):
        resample = self.resample
        verbose = self.verbose
        nr_threads_resampling = self.nr_threads_resampling
        task_name = self.task_name
        task_id = self.task_id
        nr_voxels_thr = self.nr_voxels_thr
        skip_saving = self.skip_saving
        multilabel_image = self.multilabel_image
        no_derived_masks = self.no_derived_masks
        device = self.device

        if type(resample) is float:
            resample = [resample, resample, resample]

        if isinstance(nib_file, Nifti1Image):
            img_in_orig = nib_file
        else:
            img_in_orig = nib.load(nib_file)

        multimodel = type(task_id) is list

        # if task_name == "total":
        #     class_map_parts = class_map_5_parts
        #     map_taskid_to_partname = map_taskid_to_partname_ct
        # # elif task_name == "total_mr":
        # #     class_map_parts = class_map_parts_mr
        # #     map_taskid_to_partname = map_taskid_to_partname_mr
        # # elif task_name == "headneck_muscles":
        # #     class_map_parts = class_map_parts_headneck_muscles
        # #     map_taskid_to_partname = map_taskid_to_partname_headneck_muscles
        # else:
        #     raise NotImplementedError(task_name)

        if len(img_in_orig.shape) == 2:
            raise ValueError("TotalSegmentator does not work for 2D images. Use a 3D image.")
        if len(img_in_orig.shape) > 3:
            print(f"WARNING: Input image has {len(img_in_orig.shape)} dimensions. Only using first three dimensions.")
            img_in_orig = nib.Nifti1Image(img_in_orig.get_fdata()[:, :, :, 0], img_in_orig.affine)

        img_dtype = img_in_orig.get_data_dtype()
        if img_dtype.fields is not None:
            raise TypeError(f"Invalid dtype {img_dtype}. Expected a simple dtype, not a structured one.")

        # takes ~0.9s for medium image
        img_in = nib.Nifti1Image(img_in_orig.get_fdata(), img_in_orig.affine)  # copy img_in_orig

        img_in = as_closest_canonical(img_in)

        if resample is not None:
            if verbose:
                print("Resampling...")
            st = time.time()
            img_in_shape = img_in.shape
            # img_in_zooms = img_in.header.get_zooms()
            # img_in_rsp = change_spacing(img_in, resample,
            #                             order=3, dtype=np.int32,
            #                             nr_cpus=nr_threads_resampling)  # 4 cpus instead of 1 makes it a bit slower
            data_rsp, affine_rsp = resample_3d_grid_sample(img_in, resample[::-1], device=device)
            img_in_rsp = nib.Nifti1Image(data_rsp, affine_rsp)
            if verbose:
                print(f"  from shape {img_in.shape} to shape {img_in_rsp.shape}")
                print(f"  Resampled in {time.time() - st:.2f}s")
            empty_cache(device)
        else:
            img_in_rsp = img_in

        if task_name == "total" and resample is not None and resample[0] < 3.0:
            # overall speedup for 15mm model roughly 11% (GPU) and 100% (CPU)
            # overall speedup for  3mm model roughly  0% (GPU) and  10% (CPU)
            # (dice 0.001 worse on test set -> ok)
            # (for lung_trachea_bronchia somehow a lot lower dice)
            step_size = 0.8
        else:
            step_size = 0.5

        ss = img_in_rsp.shape
        # If image to big then split into 3 parts along z axis. Also make sure that z-axis is at least 200px otherwise
        # splitting along it does not really make sense.
        do_triple_split = np.prod(ss) > nr_voxels_thr and ss[2] > 200 and multimodel
        # if force_split:
        #     do_triple_split = True

        st = time.time()
        if do_triple_split:
            # img_pred = self.split_predict(img_in_rsp, step_size, roi_subset)
            img_pred = self.adaptive_split_predict(img_in_rsp, step_size, roi_subset)
        else:
            # if multimodel:  # if running multiple models
            #     # only compute model parts containing the roi subset
            #     if roi_subset is not None:
            #         part_names = []
            #         new_task_id = []
            #         for part_name, part_map in class_map_parts.items():
            #             if any(organ in roi_subset for organ in part_map.values()):
            #                 # get taskid associated to model part_name
            #                 map_partname_to_taskid = {v: k for k, v in map_taskid_to_partname.items()}
            #                 new_task_id.append(map_partname_to_taskid[part_name])
            #                 part_names.append(part_name)
            #         task_id = new_task_id
            #         if verbose:
            #             print(f"Computing parts: {part_names} based on the provided roi_subset")
            #
            #     class_map_inv = {v: k for k, v in class_map[task_name].items()}
            #     img_shape = img_in_rsp.shape
            #     seg_combined = np.zeros(img_shape, dtype=np.uint8)
            #     # Run several tasks and combine results into one segmentation
            #     for idx, tid in enumerate(task_id):
            #         if verbose:
            #             print(f"Predicting part {idx + 1} of {len(task_id)} ...")
            #         with nostdout(verbose):
            #             img_pred = self.predict(img_in_rsp, tid, step_size=step_size)
            #             seg = img_pred.get_fdata()
            #             for jdx, class_name in class_map_parts[map_taskid_to_partname[tid]].items():
            #                 seg_combined[seg == jdx] = class_map_inv[class_name]
            #     img_pred = nib.Nifti1Image(seg_combined, affine=img_in_rsp.affine)
            # else:
            #     if verbose:
            #         print("Predicting...")
            #     with nostdout(verbose):
            #         img_pred = self.predict(img_in_rsp, task_id, step_size)
            img_pred = self._predict_part_(img_in_rsp, step_size, roi_subset)
        if verbose:
            print(f"  Predicted in {time.time() - st:.2f}s")

        img_pred = remove_auxiliary_labels(img_pred, task_name)
        if task_name == 'body':
            # Postprocessing multilabel (run here on lower resolution)
            img_pred_pp = keep_largest_blob_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                       ["body_trunc"], debug=False, quiet=not verbose)
            img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

            vox_vol = np.prod(img_pred.header.get_zooms())
            size_thr_mm3 = 50000 / vox_vol
            img_pred_pp = remove_small_blobs_multilabel(img_pred.get_fdata().astype(np.uint8), class_map[task_name],
                                                        ["body_extremities"], interval=[size_thr_mm3, 1e10],
                                                        debug=False, quiet=not verbose)
            img_pred = nib.Nifti1Image(img_pred_pp, img_pred.affine)

        if resample is not None:
            if verbose:
                print("Resampling...")
                print(f"  back to original shape: {img_in_shape}")
            # Use force_affine otherwise output affine sometimes slightly off (which then is even increased
            # by undo_canonical)
            # img_pred = change_spacing(img_pred, resample, img_in_shape,
            #                           order=0, dtype=np.uint8, nr_cpus=nr_threads_resampling,
            #                           force_affine=img_in.affine)
            data_rsp, _ = resample_3d_grid_sample(img_pred, target_size=img_in_shape, mode='nearest', device=device)
            data_rsp = data_rsp.astype(np.uint8)
            img_pred = nib.Nifti1Image(data_rsp, img_in.affine)
            empty_cache(device)
        if verbose:
            print("Undoing canonical...")
        img_pred = undo_canonical(img_pred, img_in_orig)

        check_if_shape_and_affine_identical(img_in_orig, img_pred)

        img_data = img_pred.get_fdata().astype(np.uint8)

        label_map = class_map[task_name]

        # Keep only voxel values corresponding to the roi_subset
        if roi_subset is not None:
            label_map = {k: v for k, v in label_map.items() if v in roi_subset}
            img_data *= np.isin(img_data, list(label_map.keys()))

        # Prepare output nifti
        # Copy header to make output header exactly the same as input. But change dtype otherwise it will be
        # float or int and therefore the masks will need a lot more space.
        # (infos on header: https://nipy.org/nibabel/nifti_images.html)
        new_header = img_in_orig.header.copy()
        new_header.set_data_dtype(np.uint8)
        img_out = nib.Nifti1Image(img_data, img_pred.affine, new_header)
        img_out = add_label_map_to_nifti(img_out, label_map)

        if save_path is not None and skip_saving is False:
            if verbose:
                print("Saving segmentations...")

            # Select subset of classes if required
            selected_classes = class_map[task_name]

            st = time.time()
            if multilabel_image:
                os.makedirs(os.path.dirname(save_path), exist_ok=True)
                nib.save(img_out, save_path)
            else:
                os.makedirs(save_path, exist_ok=True)

                for k, v in selected_classes.items():
                    binary_img = img_data == k
                    output_path = os.path.join(save_path, f"{v}.nii.gz")
                    nib.save(nib.Nifti1Image(binary_img.astype(np.uint8), img_pred.affine, new_header), output_path)

            if task_name == "body" and not multilabel_image and not no_derived_masks:
                if verbose:
                    print("Creating body.nii.gz")
                body_img = combine_masks(save_path, "body")
                nib.save(body_img, os.path.join(save_path, "body.nii.gz"))
                # if not quiet: print("Creating skin.nii.gz")
                # skin = extract_skin(img_in_orig, nib.load(file_out / "body.nii.gz"))
                # nib.save(skin, file_out / "skin.nii.gz")
        return img_out

    def __del__(self):
        compute_gaussian.cache_clear()
        empty_cache(self.device)
        print('class instance destroyed!')
