#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/7/28
# @USER    : Shengji He
# @File    : research.py
# @Software: PyCharm
# @Version  : Python-
# @TASK:
import os

import numpy as np
import torch

"""
针对CT图像:
```
TotalSegmentator -i ct.nii.gz -o segmentations
```

"""


def inference(filepath, savename, task='body', device='cpu'):
    from SegFlow.model import nnUNet_predict_image_body

    task_id = 299
    resample = 1.5
    trainer = "nnUNetTrainer"
    model = "3d_fullres"
    weights_path = '../weights/Dataset299_body_1559subj'

    folds = [0]  # None
    ml = False  # Save one multilabel image for all classes
    nr_thr_resamp = 4  # Nr of threads for resampling
    nr_thr_saving = 1  # Nr of threads for saving segmentations

    quiet = False  # Print no intermediate outputs
    verbose = False  # Show more intermediate output
    no_derived_masks = False  # Do not create derived masks (e.g. skin from body mask).
    skip_saving = False  # Skip saving of segmentations for faster runtime if you are only interested in statistics.

    temp_dir = os.path.join(os.path.dirname(savename), 'temp')

    seg_img, ct_img, stats = nnUNet_predict_image_body(filepath, savename, task_id, model=model, folds=folds,
                                                       trainer=trainer, tta=False, multilabel_image=ml,
                                                       resample=resample,
                                                       task_name=task, nr_threads_resampling=nr_thr_resamp,
                                                       nr_threads_saving=nr_thr_saving, quiet=quiet, verbose=verbose,
                                                       skip_saving=skip_saving, device=device,
                                                       no_derived_masks=no_derived_masks, tmp_dir=temp_dir)
    seg = seg_img.get_fdata().astype(np.uint8)
    pass


def main():
    # from dicom_io import dcm_to_nifti_LEGACY
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    exe_path = r'***'
    dicom_folder = r'***\CTDICOM'

    save_folder = r'***'
    save_name = 'ac_body'
    save_name = 'CT'
    # dcm_to_nifti_LEGACY(dicom_folder, save_folder, save_name, False, exe_path)

    filepath = os.path.join(save_folder, save_name + '.nii.gz')
    save_file = os.path.join(save_folder, 'segmentations')
    # inference(filepath, save_file, device=device)
    from SegFlow.inference import BodySegment
    file = r'../weights/Dataset299_body_1559subj/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/checkpoint_final.pth'

    segmenter = BodySegment(file, verbose=True, allow_tqdm=True)

    segmenter(filepath, save_file)

    del segmenter


def build_network(weight_file, num_segmentation_heads):
    import torch.nn as nn
    from SegFlow.network import PlainConvUNet

    try:
        checkpoint = torch.load(weight_file, map_location=torch.device('cpu'))
    except Exception as e:
        checkpoint = torch.load(weight_file, map_location=torch.device('cpu'), weights_only=False)
    inference_allowed_mirroring_axes = checkpoint.get('inference_allowed_mirroring_axes', None)

    weight_params = checkpoint['network_weights']

    UNet_base_num_features = 32
    unet_max_num_features = 320
    conv_kernel_sizes = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
    pool_op_kernel_sizes = [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
    n_conv_per_stage_encoder = [2, 2, 2, 2, 2, 2]
    n_conv_per_stage_decoder = [2, 2, 2, 2, 2]
    # num_segmentation_heads = 3

    deep_supervision = False
    num_stages = len(conv_kernel_sizes)
    conv_op = nn.Conv3d

    kwargs = {
        'conv_bias': True,
        'norm_op': nn.InstanceNorm3d,
        'norm_op_kwargs': {'eps': 1e-5, 'affine': True},
        'dropout_op': None, 'dropout_op_kwargs': None,
        'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True},
    }
    conv_or_blocks_per_stage = {
        'n_conv_per_stage': n_conv_per_stage_encoder,
        'n_conv_per_stage_decoder': n_conv_per_stage_decoder
    }

    network = PlainConvUNet(
        input_channels=1,
        n_stages=num_stages,
        features_per_stage=[min(UNet_base_num_features * 2 ** i, unet_max_num_features) for i in range(num_stages)],
        conv_op=conv_op,
        kernel_sizes=conv_kernel_sizes,
        strides=pool_op_kernel_sizes,
        num_classes=num_segmentation_heads,
        deep_supervision=deep_supervision,
        **conv_or_blocks_per_stage,
        **kwargs
    )
    network.load_state_dict(weight_params)
    return network, inference_allowed_mirroring_axes


# ----------------------------------
def CTNormalization(image: np.ndarray, intensityproperties: dict) -> np.ndarray:
    target_dtype = np.float32
    image = image.astype(target_dtype)
    mean_intensity = intensityproperties['mean']
    std_intensity = intensityproperties['std']
    lower_bound = intensityproperties['percentile_00_5']
    upper_bound = intensityproperties['percentile_99_5']
    image = np.clip(image, lower_bound, upper_bound)
    image = (image - mean_intensity) / max(std_intensity, 1e-8)
    return image


def _normalize(data: np.ndarray, intensityproperties: dict) -> np.ndarray:
    for c in range(data.shape[0]):
        data[c] = CTNormalization(data[c], intensityproperties)
    return data


def run_case_npy(data: np.ndarray, properties: dict, transpose_forward, foreground_intensity_properties):
    from SegFlow.cropping import crop_to_nonzero
    # apply transpose_forward, this also needs to be applied to the spacing!
    data = np.copy(data)

    data = data.transpose([0, *[i + 1 for i in transpose_forward]])
    # original_spacing = [properties['spacing'][i] for i in transpose_forward]

    # crop, remember to store size before cropping!
    shape_before_cropping = data.shape[1:]
    properties['shape_before_cropping'] = shape_before_cropping
    data, seg, bbox = crop_to_nonzero(data)
    properties['bbox_used_for_cropping'] = bbox
    # print(data.shape, seg.shape)
    properties['shape_after_cropping_and_before_resampling'] = data.shape[1:]

    # normalize
    # normalization MUST happen before resampling or we get huge problems with resampled nonzero masks no
    # longer fitting the images perfectly!
    data = _normalize(data, foreground_intensity_properties)
    return data


def write_seg(seg: np.ndarray, output_fname: str, properties: dict) -> None:
    import nibabel as nib
    # revert transpose
    seg = seg.transpose((2, 1, 0)).astype(np.uint8)

    seg_nib = nib.Nifti1Image(seg, affine=properties['nibabel_stuff']['reoriented_affine'])
    seg_nib_reoriented = seg_nib.as_reoriented(nib.io_orientation(properties['nibabel_stuff']['original_affine']))
    assert np.allclose(properties['nibabel_stuff']['original_affine'], seg_nib_reoriented.affine), \
        'restored affine does not match original affine'
    nib.save(seg_nib_reoriented, output_fname)


def test_preprocess():
    import nibabel as nib
    from SegFlow.cropping import bounding_box_to_slice
    from SegFlow.predict import predict_sliding_window_return_logits, compute_gaussian
    from SegFlow.utils import empty_cache

    file = r'***\temp\s01_0000.nii.gz'

    weight_file = r'checkpoint_final.pth'
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    num_segmentation_heads = 3

    patch_size = [128, 128, 128]
    tile_step_size = 0.5
    use_gaussian = True
    tta = False
    disable_tta = not tta
    use_mirroring = not disable_tta
    perform_everything_on_gpu = True

    allow_tqdm = True
    verbose = True

    transpose_forward = [0, 1, 2]
    transpose_backward = [0, 1, 2]
    foreground_intensity_properties = {
        "max": 44877.0,
        "mean": -43.4468879699707,
        "median": 0.0,
        "min": -42801.0,
        "percentile_00_5": -985.0,
        "percentile_99_5": 1411.0,
        "std": 355.778564453125
    }

    # NibabelIOWithReorient
    nib_image = nib.load(file)
    original_affine = nib_image.affine

    reoriented_image = nib_image.as_reoriented(nib.io_orientation(original_affine))
    reoriented_affine = reoriented_image.affine

    spacings_for_nnunet = [float(i) for i in reoriented_image.header.get_zooms()[::-1]]

    data = reoriented_image.get_fdata().transpose((2, 1, 0))[None]

    data = data.astype(np.float32)
    data_properties = {
        'nibabel_stuff': {'original_affine': original_affine, 'reoriented_affine': reoriented_affine},
        'spacing': spacings_for_nnunet
    }

    seg = None
    # run_case_npy!!!
    data = run_case_npy(data, data_properties, transpose_forward, foreground_intensity_properties)

    data = torch.from_numpy(data).contiguous().float()

    ofile = '***\\s01'
    # output_filenames_truncated = ['***\\temp\\s01']
    # item = {'data': data, 'data_properties': data_properties,
    #         'ofile': output_filenames_truncated[0] if output_filenames_truncated is not None else None}

    network, allowed_mirroring_axes = build_network(weight_file, num_segmentation_heads)
    # predict_logits_from_preprocessed_data

    prediction = predict_sliding_window_return_logits(data, network, num_segmentation_heads, patch_size, device,
                                                      tile_step_size, use_gaussian, use_mirroring,
                                                      perform_everything_on_gpu,
                                                      allowed_mirroring_axes, verbose, allow_tqdm)
    # convert_predicted_logits_to_segmentation_with_correct_shape

    with torch.no_grad():
        prediction = prediction.float()
        # predicted_probabilities = torch.sigmoid(prediction)
        predicted_probabilities = torch.softmax(prediction, 0)
    del prediction

    # check correct number of outputs
    assert predicted_probabilities.shape[0] == num_segmentation_heads, \
        f'unexpected number of channels in predicted_probabilities. Expected {num_segmentation_heads}, ' \
        f'got {predicted_probabilities.shape[0]}. Remeber that predicted_probabilities should have shape ' \
        f'(c, x, y(, z)).'
    segmentation = predicted_probabilities.argmax(0)
    print('Prediction done, transferring to CPU if needed')
    segmentation = segmentation.cpu().numpy()

    # put segmentation in bbox (revert cropping)
    segmentation_reverted_cropping = np.zeros(data_properties['shape_before_cropping'], dtype=np.uint8)
    slicer = bounding_box_to_slice(data_properties['bbox_used_for_cropping'])
    segmentation_reverted_cropping[slicer] = segmentation
    del segmentation

    # revert transpose
    segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(transpose_backward)

    write_seg(segmentation_reverted_cropping, ofile + '.nii.gz', data_properties)
    # clear lru cache
    compute_gaussian.cache_clear()
    # clear device cache
    empty_cache(device)
    pass


# ----------------------------------
def main_total_body():
    from SegFlow.model import nnUNet_predict_image

    crop_addon = [3, 3, 3]  # default value

    task = task_name = "total"
    roi_subset = ['liver']
    fast = False
    fastest = False
    verbose = True
    quiet = False
    device = "cpu"
    if verbose:
        print(f"Using Device: {device}")
    if fast:
        task_id = 297
        resample = 3.0
        trainer = "nnUNetTrainer_4000epochs_NoMirroring"
        # trainer = "nnUNetTrainerNoMirroring"
        crop = None
        if not quiet:
            print("Using 'fast' option: resampling to lower resolution (3mm)")
    elif fastest:
        task_id = 298
        resample = 6.0
        trainer = "nnUNetTrainer_4000epochs_NoMirroring"
        crop = None
        if not quiet:
            print("Using 'fastest' option: resampling to lower resolution (6mm)")
    else:
        task_id = [291, 292, 293, 294, 295]
        resample = 1.5
        trainer = "nnUNetTrainerNoMirroring"
        crop = None
    model = "3d_fullres"
    folds = [0]

    # multimodel = type(task_id) is list

    file_in = r'D:\temp\temp\doc\CT.nii.gz'
    file_out = r'D:\temp\temp\doc\ct_seg'
    tmp_dir = r'D:\temp\temp\doc'

    crop_path = None
    nora_tag = None
    crop_path = file_out if crop_path is None else crop_path
    nora_tag = "None" if nora_tag is None else nora_tag

    seg_img, ct_img, stats = nnUNet_predict_image(file_in, file_out, task_id, model=model, folds=folds,
                                                  trainer=trainer, tta=False, multilabel_image=False, resample=resample,
                                                  crop=crop, crop_path=crop_path, task_name=task, nora_tag=nora_tag,
                                                  preview=False, nr_threads_resampling=1, nr_threads_saving=6,
                                                  force_split=False, crop_addon=crop_addon, roi_subset=roi_subset,
                                                  output_type="nifti", statistics=False, quiet=quiet, verbose=verbose,
                                                  test=0, skip_saving=False, device=device,
                                                  exclude_masks_at_border=True, no_derived_masks=False, v1_order=False,
                                                  stats_aggregation='mean', tmp_dir=tmp_dir)
    seg = seg_img.get_fdata().astype(np.uint8)
    pass


def test_preprocess_tb():
    import nibabel as nib
    from SegFlow.cropping import bounding_box_to_slice
    from SegFlow.predict import predict_sliding_window_return_logits, compute_gaussian
    from SegFlow.utils import empty_cache

    file = r'D:\heshengji\temp\temp\doc\s01_0000.nii.gz'

    weight_file = r'../weights/Dataset291_TotalSegmentator_part1_organs_1559subj/nnUNetTrainerNoMirroring__nnUNetPlans__3d_fullres/fold_0/checkpoint_final.pth'

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    num_segmentation_heads = 25

    patch_size = [128, 128, 128]
    tile_step_size = 0.8
    use_gaussian = True
    tta = False
    disable_tta = not tta
    use_mirroring = not disable_tta
    perform_everything_on_gpu = True

    allow_tqdm = True
    verbose = True

    transpose_forward = [0, 1, 2]
    transpose_backward = [0, 1, 2]
    foreground_intensity_properties = {
        "max": 3606.0,
        "mean": -370.00039267657144,
        "median": -249.0,
        "min": -3139.0,
        "percentile_00_5": -1024.0,
        "percentile_99_5": 276.0,
        "std": 436.5998675471528
    }

    # NibabelIOWithReorient
    nib_image = nib.load(file)
    original_affine = nib_image.affine

    reoriented_image = nib_image.as_reoriented(nib.io_orientation(original_affine))
    reoriented_affine = reoriented_image.affine

    spacings_for_nnunet = [float(i) for i in reoriented_image.header.get_zooms()[::-1]]

    data = reoriented_image.get_fdata().transpose((2, 1, 0))[None]

    data = data.astype(np.float32)
    data_properties = {
        'nibabel_stuff': {'original_affine': original_affine, 'reoriented_affine': reoriented_affine},
        'spacing': spacings_for_nnunet
    }

    seg = None
    # run_case_npy!!!
    data = run_case_npy(data, data_properties, transpose_forward, foreground_intensity_properties)

    data = torch.from_numpy(data).contiguous().float()

    ofile = r'D:\heshengji\temp\temp\doc\s01'
    # output_filenames_truncated = ['D:\\heshengji\\iAC_DL_Data\\Clinicdata_2019-2020\\77407_CHU_LI_MEI_34729\\temp\\s01']
    # item = {'data': data, 'data_properties': data_properties,
    #         'ofile': output_filenames_truncated[0] if output_filenames_truncated is not None else None}

    network, allowed_mirroring_axes = build_network(weight_file, num_segmentation_heads)
    # predict_logits_from_preprocessed_data

    prediction = predict_sliding_window_return_logits(data, network, num_segmentation_heads, patch_size, device,
                                                      tile_step_size, use_gaussian, use_mirroring,
                                                      perform_everything_on_gpu,
                                                      allowed_mirroring_axes, verbose, allow_tqdm)
    # convert_predicted_logits_to_segmentation_with_correct_shape

    with torch.no_grad():
        prediction = prediction.float()
        # predicted_probabilities = torch.sigmoid(prediction)
        predicted_probabilities = torch.softmax(prediction, 0)
    del prediction

    # check correct number of outputs
    assert predicted_probabilities.shape[0] == num_segmentation_heads, \
        f'unexpected number of channels in predicted_probabilities. Expected {num_segmentation_heads}, ' \
        f'got {predicted_probabilities.shape[0]}. Remeber that predicted_probabilities should have shape ' \
        f'(c, x, y(, z)).'
    segmentation = predicted_probabilities.argmax(0)
    print('Prediction done, transferring to CPU if needed')
    segmentation = segmentation.cpu().numpy()

    # put segmentation in bbox (revert cropping)
    segmentation_reverted_cropping = np.zeros(data_properties['shape_before_cropping'], dtype=np.uint8)
    slicer = bounding_box_to_slice(data_properties['bbox_used_for_cropping'])
    segmentation_reverted_cropping[slicer] = segmentation
    del segmentation

    # revert transpose
    segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(transpose_backward)

    write_seg(segmentation_reverted_cropping, ofile + '.nii.gz', data_properties)
    # clear lru cache
    compute_gaussian.cache_clear()
    # clear device cache
    empty_cache(device)


def temp_split():
    img_in_rsp = np.random.rand(1, 1, 963, 4, 4)
    img_in_out = np.zeros((1, 1, 963, 4, 4))
    zSample = 963
    num_img_parts = 4
    zPatchSize = zSample // num_img_parts
    margin = 20
    for i in range(num_img_parts):

        print(idx_in, idx_out, idx_ph)
        patch = img_in_rsp[:, :, idx_in,]
        img_in_out[:, :, idx_out,] = patch[:, :, idx_ph]

    np.testing.assert_array_equal(img_in_rsp, img_in_out)
    pass


if __name__ == '__main__':
    temp_split()
    # test_preprocess_tb()
    # main_total_body()
    # main()
    # build_network()
    # test_preprocess()
    print('done')
