#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/5/6
# @USER    : Shengji He
# @File    : inference.py
# @Software: PyCharm
# @Version  : Python-
# @TASK:
from __future__ import annotations
import os
import glob
import gc
import numpy as np
import torch
import torch.nn.functional as F

import SimpleITK as sitk

import nibabel as nib

from dnnlib.network import SegResNet
from dnnlib.inferer import SlidingWindowInferer

# --------------------------------------------------------------------------------------------
# preprocessing
# --------------------------------------------------------------------------------------------
"""
reset orientation
"""


def compute_orientation(init_axcodes, final_axcodes):
    """
    A thin wrapper around ``nib.orientations.ornt_transform``

    :param init_axcodes: Initial orientation codes
    :param final_axcodes: Target orientation codes
    :return: orientations array, start_ornt, end_ornt
    """
    ornt_init = nib.orientations.axcodes2ornt(init_axcodes)
    ornt_fin = nib.orientations.axcodes2ornt(final_axcodes)

    ornt_transf = nib.orientations.ornt_transform(ornt_init, ornt_fin)

    return ornt_transf, ornt_init, ornt_fin


def do_reorientation(data_array, init_axcodes, final_axcodes):
    """
    source: https://niftynet.readthedocs.io/en/dev/_modules/niftynet/io/misc_io.html#do_reorientation
    Performs the reorientation (changing order of axes)

    :param data_array: 3D Array to reorient
    :param init_axcodes: Initial orientation
    :param final_axcodes: Target orientation
    :return data_reoriented: New data array in its reoriented form
    """
    ornt_transf, ornt_init, ornt_fin = compute_orientation(init_axcodes, final_axcodes)
    if np.array_equal(ornt_init, ornt_fin):
        return data_array

    return nib.orientations.apply_orientation(data_array, ornt_transf)


def do_reorientation_sitk(image, target_direction: str | tuple[str, ...] = "RAS"):
    """
    使用 SimpleITK 的 DICOMOrientImageFilter 将图像重定向到目标方向
    :param image: SimpleITK.Image 对象
    :param target_direction: 目标方向字符串，如 "LPS", "RAS"
    :return: 重定向后的图像
    """
    if isinstance(target_direction, tuple):
        target_direction = ''.join(target_direction)
    orient_filter = sitk.DICOMOrientImageFilter()
    orient_filter.SetDesiredCoordinateOrientation(target_direction)
    oriented_image = orient_filter.Execute(image)
    return oriented_image


def get_axcodes_from_sitk(image):
    # 构建仿射变换矩阵 (SimpleITK to NIfTI)
    origin = image.GetOrigin()
    spacing = image.GetSpacing()
    direction = image.GetDirection()

    # 构建仿射矩阵
    affine = np.eye(4)
    affine[:3, :3] = np.array(direction).reshape(3, 3) * np.array(spacing)[:, None]
    affine[:3, 3] = origin

    # 使用 nibabel 获取 axcodes
    axcodes = nib.aff2axcodes(affine)
    # print(f"AxCodes: {cur_axcodes}")
    return axcodes


"""
reset image size
"""


def resample_image(image,
                   out_spacing=[1.0, 1.0, 1.0],
                   out_size=None,
                   interpolator=sitk.sitkLinear,
                   default_value=0.0):
    """
    使用 SimpleITK 对图像进行重采样

    :param image: 输入 SimpleITK.Image 对象
    :param out_spacing: 目标体素间距 (x, y, z)
    :param out_size: 输出图像尺寸 (x, y, z)，如果为 None 则自动计算
    :param interpolator: 插值方式，如 sitk.sitkLinear / sitk.sitkNearestNeighbor
    :param default_value: 填充值
    :return: 重采样后的图像
    """
    original_spacing = image.GetSpacing()
    original_size = image.GetSize()

    if out_size is None:
        out_size = [
            int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
            int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
            int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))
        ]

    # 创建目标图像属性
    reference_image = sitk.Image(out_size, image.GetPixelID())
    reference_image.SetOrigin(image.GetOrigin())
    reference_image.SetSpacing(out_spacing)
    reference_image.SetDirection(image.GetDirection())

    # 设置变换（恒等变换）
    transform = sitk.Transform()
    transform.SetIdentity()

    # 重采样
    resampler = sitk.ResampleImageFilter()
    resampler.SetReferenceImage(reference_image)
    resampler.SetInterpolator(interpolator)
    resampler.SetTransform(transform)
    resampler.SetDefaultPixelValue(default_value)

    return resampler.Execute(image)


def resample_3d_grid_sample(
        input_tensor: torch.Tensor,
        current_spacing: list[float] = None,
        target_spacing: list[float] = None,
        target_size: list[int] = None,
        mode: str = 'bilinear',
        padding_mode: str = 'border',
        align_corners: bool = True
):
    """
    使用 F.grid_sample 实现基于 spacing 的 3D 图像重采样

    :param input_tensor: 输入张量 ((B), C, D, H, W)
    :param current_spacing: 当前体素间距 [sx, sy, sz]
    :param target_spacing: 目标体素间距 [tx, ty, tz]
    :param mode: 插值方式 'nearest' / 'bilinear'
    :param padding_mode: 边界填充方式
    :param align_corners: 是否对齐角点
    :return: 重采样后的张量
    """
    if not isinstance(input_tensor, torch.Tensor):
        input_tensor = torch.tensor(input_tensor)

    # 确保是 4D 张量 (C, D, H, W)
    has_batch = True
    if input_tensor.dim() == 4:
        input_tensor = input_tensor.unsqueeze(0)  # (1, C, D, H, W)
        has_batch = False

    batch_size, channels, depth, height, width = input_tensor.shape

    if target_size is None and current_spacing is None and target_spacing is None:
        raise IOError("At least one of target_size, (current_spacing, target_spacing) must be provided.")
    elif target_size is not None:
        d_out, h_out, w_out = target_size
    elif current_spacing is not None and target_spacing is not None:
        # 计算缩放因子
        scale_factors = [
            current_spacing[0] / target_spacing[0],  # x-axis
            current_spacing[1] / target_spacing[1],  # y-axis
            current_spacing[2] / target_spacing[2],  # z-axis
        ]

        # 创建目标空间的坐标网格 (D_out, H_out, W_out, 3)
        d_out = int(np.round(depth * scale_factors[2]))
        h_out = int(np.round(height * scale_factors[1]))
        w_out = int(np.round(width * scale_factors[0]))
    else:
        raise IOError("At least one of target_size, (current_spacing, target_spacing) must be provided.")

    # Note: equivalent to theta generated by F.affine_grid!!!
    # # 创建归一化的坐标网格 [-1, 1]
    # d_grid = torch.linspace(-1, 1, steps=d_out)
    # h_grid = torch.linspace(-1, 1, steps=h_out)
    # w_grid = torch.linspace(-1, 1, steps=w_out)
    #
    # # meshgrid 生成 (x, y, z) 坐标
    # grid_z, grid_y, grid_x = torch.meshgrid(d_grid, h_grid, w_grid, indexing='ij')
    #
    # # 合并为 (D, H, W, 3) 的网格，并扩展为 batch 维度
    # grid = torch.stack([grid_x, grid_y, grid_z], dim=-1)  # shape: (D, H, W, 3)
    # grid = grid.unsqueeze(0).expand(batch_size, -1, -1, -1, -1).to(input_tensor.device)

    theta = torch.tensor([[
        [1, 0, 0, 0],
        [0, 1, 0, 0],
        [0, 0, 1, 0],
    ]], dtype=torch.float32).to(input_tensor.device)
    grid = F.affine_grid(theta, [batch_size, channels, d_out, h_out, w_out], align_corners=align_corners)

    # 使用 grid_sample 进行插值
    output_tensor = F.grid_sample(
        input_tensor,
        grid,
        mode=mode,
        padding_mode=padding_mode,
        align_corners=align_corners
    )
    if not has_batch:
        output_tensor = output_tensor.squeeze(0)
    return output_tensor  # 返回 (C, D_new, H_new, W_new)


"""
normalize and rescale
"""


def normalize_array(arr, nonzero: bool = False, ):
    if nonzero:
        slices = arr != 0
        masked_img = arr[slices]
        if not slices.any():
            return arr
    else:
        slices = None
        masked_img = arr

    _sub = torch.mean(masked_img.float()).item()
    _div = torch.std(masked_img.float(), unbiased=False).item()
    if _div == 0.0:
        _div = 1.0

    if slices is not None:
        arr[slices] = (masked_img - _sub) / _div
    else:
        arr = (arr - _sub) / _div
    return arr


def rescale_array(arr, minv: float | None = 0.0, maxv: float | None = 1.0, ):
    """
    Rescale the values of numpy array `arr` to be from `minv` to `maxv`.
    If either `minv` or `maxv` is None, it returns `(a - min_a) / (max_a - min_a)`.

    Args:
        arr: input array to rescale.
        minv: minimum value of target rescaled array.
        maxv: maximum value of target rescaled array.
        dtype: if not None, convert input array to dtype before computation.

    """
    mina = arr.min()
    maxa = arr.max()

    if mina == maxa:
        return arr * minv if minv is not None else arr

    norm = (arr - mina) / (maxa - mina)  # normalize the array first
    if (minv is None) or (maxv is None):
        return norm
    return (norm * (maxv - minv)) + minv  # rescale by minv and maxv, which is the normalized array by default


# --------------------------------------------------------------------------------------------
# postprocessing
# --------------------------------------------------------------------------------------------
def _free_memory():
    """统一内存回收接口"""
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()


def stable_block_softmax(x, dim=0, chunk_size=10, device='cpu'):
    """数值稳定性增强"""
    old_device = x.device
    final_shape = list(x.shape)
    # # Step 1: 计算全局最大值
    # max_vals = [torch.max(chunk, dim=dim, keepdim=True).values for chunk in torch.split(x, chunk_size, dim=dim)]
    # global_max = torch.max(torch.cat(max_vals, dim=dim), dim=dim, keepdim=True).values.to(device)

    # Step 2: 分块计算 exp(x - global_max)
    # exps = [torch.exp(chunk.to(device) - global_max).to(old_device) for chunk in torch.split(x, chunk_size, dim=dim)]

    # del max_vals, global_max, x
    # gc.collect()
    # if hasattr(torch.cuda, 'empty_cache'):
    #     torch.cuda.empty_cache()

    # Step 3: 计算全局分母
    # sum_exp = torch.sum(torch.cat(exps, dim=dim), dim=dim, keepdim=True).to(device)
    # sum_exp = torch.sum(torch.cat([torch.sum(cexp, dim=dim, keepdim=True) for cexp in exps], dim=dim), dim=dim, keepdim=True).to(device)

    # Step 4: 分块计算概率
    # probs = [(exp.to(device) / sum_exp).to(old_device) for exp in exps]

    # del exps, sum_exp
    # gc.collect()

    # 第一阶段：计算全局统计量
    # ----------------------------------
    # Step 1.1: 分块计算全局最大值
    global_max = None
    for chunk in torch.split(x, chunk_size, dim=dim):
        chunk_max = torch.max(chunk.to(device), dim=dim, keepdim=True).values
        global_max = chunk_max if global_max is None else torch.max(global_max, chunk_max)
        del chunk, chunk_max
        _free_memory()

    # Step 1.2: 分块计算全局分母
    sum_exp = torch.zeros(final_shape[:dim] + [1] + final_shape[dim + 1:], dtype=x.dtype, device=device)
    for chunk in torch.split(x, chunk_size, dim=dim):
        exp_chunk = torch.exp(chunk.to(device) - global_max)
        sum_exp += torch.sum(exp_chunk, dim=dim, keepdim=True)
        del chunk, exp_chunk
        _free_memory()

    # 第二阶段：生成概率分布
    # ----------------------------------
    probs = []
    for chunk in torch.split(x, chunk_size, dim=dim):
        exp_chunk = torch.exp(chunk.to(device) - global_max)
        prob_chunk = exp_chunk / sum_exp
        probs.append(prob_chunk.cpu())  # 转移结果到CPU释放显存
        del chunk, exp_chunk, prob_chunk
        _free_memory()

    return torch.cat(probs, dim=dim)


"""
main
"""


@torch.no_grad()
def main_dat():
    patch_size = [96, 96, 96]
    sw_batch_size = 1
    sw_overlap = 0.25
    is_highres = True

    target_axcodes = ('R', 'A', 'S')
    # target_axcodes = ('I', 'P', 'L')

    if is_highres:
        model_path = './models/model.pt'
        pixdim = [1.5, 1.5, 1.5]
    else:
        model_path = './models/model_lowres.pt'
        pixdim = [3.0, 3.0, 3.0]

    torch.backends.cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model = SegResNet(3, 32, 1, 105, 0.2, )

    model.to(device)
    try:
        torch._tensor._rebuild_from_type_v2
    except AttributeError:
        def _set_obj_state(obj, state):
            if isinstance(state, tuple):
                if not len(state) == 2:
                    raise RuntimeError(f"Invalid serialized state: {state}")
                dict_state = state[0]
                slots_state = state[1]
            else:
                dict_state = state
                slots_state = None

            # Starting with Python 3.11, the __dict__ attribute is lazily created
            # and is serialized as None when not needed.
            if dict_state:
                for k, v in dict_state.items():
                    setattr(obj, k, v)

            if slots_state:
                for k, v in slots_state.items():
                    setattr(obj, k, v)
            return obj

        def _rebuild_from_type_v2(func, new_type, args, state):
            ret = func(*args)
            if type(ret) is not new_type:
                ret = ret.as_subclass(new_type)
            # Tensor does define __setstate__ even though it doesn't define
            # __getstate__. So only use __setstate__ if it is NOT the one defined
            # on Tensor
            if (
                    getattr(ret.__class__, "__setstate__", torch.Tensor.__setstate__)
                    is not torch.Tensor.__setstate__
            ):
                ret.__setstate__(state)
            else:
                ret = torch._utils._set_obj_state(ret, state)
            return ret

        torch._utils._set_obj_state = _set_obj_state
        torch._tensor._rebuild_from_type_v2 = _rebuild_from_type_v2
    try:
        model.load_state_dict(torch.load(model_path, map_location=device))
        print('Successful initial weights: {}'.format(model_path))
    except RuntimeError as e:
        # init_weights(model, init_type, init_gain)
        # model_dict = model.state_dict()
        # checkpoint = torch.load(weight_file, map_location=device)
        #
        # checkpoint = {k: v for k, v in checkpoint.items() if
        #               (k in model_dict.keys()) and (v.shape == model_dict[k].shape)}
        # model_dict.update(checkpoint)
        # model.load_state_dict(model_dict)
        print(e)

    inferer = SlidingWindowInferer(patch_size, sw_batch_size, sw_overlap, mode='gaussian', padding_mode='replicate',
                                   # device=device)
                                   cpu_thresh=33554432, progress=True)

    dicom_folder = r'D:\heshengji\temp\temp\CT.20240408_121615.10003'
    save_name = 'segment_wb.nii.gz'

    reader = sitk.ImageSeriesReader()
    seriesID = reader.GetGDCMSeriesIDs(dicom_folder)[0]
    dicom_names = reader.GetGDCMSeriesFileNames(dicom_folder, seriesID)
    reader.SetFileNames(dicom_names)
    image = reader.Execute()

    # data = sitk.GetArrayFromImage(image)
    # # 注意：SimpleITK 的数组是 (z, y, x)，而 nibabel 默认是 (x, y, z)
    # # 所以需要转置一下
    # data_nib = data.transpose((2, 1, 0))  # (x, y, z)

    cur_axcodes = get_axcodes_from_sitk(image)

    # reorientation
    # data_refine = do_reorientation(data_nib, cur_axcodes, target_axcodes)
    image_oriented = do_reorientation_sitk(image, target_axcodes)

    # resample
    # image_resampled = resample_image(image_oriented, out_spacing=pixdim, default_value=-1024)
    #
    # data = sitk.GetArrayFromImage(image_resampled)
    # # 注意：SimpleITK 的数组是 (z, y, x)，而 nibabel 默认是 (x, y, z)
    # # 所以需要转置一下
    # data_nib = data.transpose((2, 1, 0))

    data = sitk.GetArrayFromImage(image_oriented).astype(np.float32)
    data = data.transpose((2, 1, 0))

    spacing = image_oriented.GetSpacing()
    img_size = image_oriented.GetSize()

    data_tensor = torch.from_numpy(data[np.newaxis,]).to(device)
    data_resampled_tensor = resample_3d_grid_sample(data_tensor, spacing[::-1], pixdim)

    # normalize
    data_resampled_tensor = torch.clip(data_resampled_tensor, -1024, 3071)  # WARNING: bone segment not stable!
    data_norm = normalize_array(data_resampled_tensor, True)
    # data_norm = normalize_array(data_resampled_tensor, )
    data_scaled = rescale_array(data_norm, -1.0, 1.0).unsqueeze(0)

    # predict
    _free_memory()
    model.eval()
    prediction = inferer(data_scaled, model)[0,]

    # postprocessing
    del model, data, data_tensor, data_resampled_tensor, data_norm, data_scaled
    del inferer
    _free_memory()

    # prediction = torch.softmax(prediction, dim=0)
    # if prediction.device.type == 'cuda' or device.type == 'cpu':
    #     prediction = torch.softmax(prediction, dim=0)
    # else:
    #     prediction = stable_block_softmax(prediction, dim=0, chunk_size=5, device=device)
    prediction = torch.argmax(prediction, dim=0, keepdim=True).to(dtype=torch.float32, device=device)

    # invert preprocessing
    prediction_resampled = resample_3d_grid_sample(prediction, target_size=img_size, mode='nearest',
                                                   padding_mode='zeros')

    prediction_resampled = prediction_resampled.detach().cpu().numpy().squeeze()

    del prediction
    _free_memory()

    prediction_resampled = prediction_resampled.transpose(2, 1, 0)
    prediction_resampled = prediction_resampled.astype(np.int8)
    image_prediction = sitk.GetImageFromArray(prediction_resampled)
    image_prediction.CopyInformation(image_oriented)

    image_prediction = do_reorientation_sitk(image_prediction, cur_axcodes)

    sitk.WriteImage(image_prediction, save_name)
    pass


if __name__ == '__main__':
    main_dat()
    print('done')