import logging
import os
from typing import Annotated, Optional

import slicer.util
import vtk

import slicer
from slicer.i18n import tr as _
from slicer.i18n import translate
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
from slicer.parameterNodeWrapper import (
    parameterNodeWrapper,
    WithinRange,
)

from slicer import vtkMRMLScalarVolumeNode

import time

# 导入数据增强包
from batchgenerators.transforms.noise_transforms import *
from batchgenerators.transforms.color_transforms import *
from batchgenerators.transforms.resample_transforms import *


#
# ImageProcessing
#


class ImageProcessing(ScriptedLoadableModule):
    """Uses ScriptedLoadableModule base class, available at:
    https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py
    """

    def __init__(self, parent):
        ScriptedLoadableModule.__init__(self, parent)
        self.parent.title = _("ImageProcessing")  # TODO: make this more human readable by adding spaces
        # TODO: set categories (folders where the module shows up in the module selector)
        self.parent.categories = [translate("qSlicerAbstractCoreModule", "Examples")]
        self.parent.dependencies = []  # TODO: add here list of module names that this module requires
        self.parent.contributors = ["John Doe (AnyWare Corp.)"]  # TODO: replace with "Firstname Lastname (Organization)"
        # TODO: update with short description of the module and a link to online module documentation
        # _() function marks text as translatable to other languages
        self.parent.helpText = _("""
This is an example of scripted loadable module bundled in an extension.
See more information in <a href="https://github.com/organization/projectname#ImageProcessing">module documentation</a>.
""")
        # TODO: replace with organization, grant and thanks
        self.parent.acknowledgementText = _("""
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc., Andras Lasso, PerkLab,
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""")

        # Additional initialization step after application startup is complete
        slicer.app.connect("startupCompleted()", registerSampleData)


#
# Register sample data sets in Sample Data module
#


def registerSampleData():
    """Add data sets to Sample Data module."""
    # It is always recommended to provide sample data for users to make it easy to try the module,
    # but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.

    import SampleData

    iconsPath = os.path.join(os.path.dirname(__file__), "Resources/Icons")

    # To ensure that the source code repository remains small (can be downloaded and installed quickly)
    # it is recommended to store data sets that are larger than a few MB in a Github release.

    # ImageProcessing1
    SampleData.SampleDataLogic.registerCustomSampleDataSource(
        # Category and sample name displayed in Sample Data module
        category="ImageProcessing",
        sampleName="ImageProcessing1",
        # Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.
        # It can be created by Screen Capture module, "Capture all views" option enabled, "Number of images" set to "Single".
        thumbnailFileName=os.path.join(iconsPath, "ImageProcessing1.png"),
        # Download URL and target file name
        uris="https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95",
        fileNames="ImageProcessing1.nrrd",
        # Checksum to ensure file integrity. Can be computed by this command:
        #  import hashlib; print(hashlib.sha256(open(filename, "rb").read()).hexdigest())
        checksums="SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95",
        # This node name will be used when the data set is loaded
        nodeNames="ImageProcessing1",
    )

    # ImageProcessing2
    SampleData.SampleDataLogic.registerCustomSampleDataSource(
        # Category and sample name displayed in Sample Data module
        category="ImageProcessing",
        sampleName="ImageProcessing2",
        thumbnailFileName=os.path.join(iconsPath, "ImageProcessing2.png"),
        # Download URL and target file name
        uris="https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97",
        fileNames="ImageProcessing2.nrrd",
        checksums="SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97",
        # This node name will be used when the data set is loaded
        nodeNames="ImageProcessing2",
    )


#
# ImageProcessingParameterNode
#


@parameterNodeWrapper
class ImageProcessingParameterNode:
    """
    The parameters needed by module.

    inputVolume - The volume to threshold.
    imageThreshold - The value at which to threshold the input volume.
    invertThreshold - If true, will invert the threshold.
    thresholdedVolume - The output volume that will contain the thresholded volume.
    invertedVolume - The output volume that will contain the inverted thresholded volume.
    """

    inputVolume: vtkMRMLScalarVolumeNode
    imageThreshold: Annotated[float, WithinRange(-100, 500)] = 100
    invertThreshold: bool = False
    thresholdedVolume: vtkMRMLScalarVolumeNode
    invertedVolume: vtkMRMLScalarVolumeNode


#
# ImageProcessingWidget
#


class ImageProcessingWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
    """Uses ScriptedLoadableModuleWidget base class, available at:
    https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py
    """

    def __init__(self, parent=None) -> None:
        """Called when the user opens the module the first time and the widget is initialized."""
        ScriptedLoadableModuleWidget.__init__(self, parent)
        VTKObservationMixin.__init__(self)  # needed for parameter node observation
        self.logic = None
        self._parameterNode = None
        self._parameterNodeGuiTag = None

    def setup(self) -> None:
        """Called when the user opens the module the first time and the widget is initialized."""
        ScriptedLoadableModuleWidget.setup(self)

        # Load widget from .ui file (created by Qt Designer).
        # Additional widgets can be instantiated manually and added to self.layout.
        uiWidget = slicer.util.loadUI(self.resourcePath("UI/ImageProcessing.ui"))
        self.layout.addWidget(uiWidget)
        self.ui = slicer.util.childWidgetVariables(uiWidget)

        # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
        # "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
        # "setMRMLScene(vtkMRMLScene*)" slot.
        uiWidget.setMRMLScene(slicer.mrmlScene)

        # Create logic class. Logic implements all computations that should be possible to run
        # in batch mode, without a graphical user interface.
        self.logic = ImageProcessingLogic()

        # Connections

        # These connections ensure that we update parameter node when scene is closed
        self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
        self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)

        # Buttons
        self.ui.applyButton.connect("clicked(bool)", self.onApplyButton)
        self.ui.histogramButton.connect("clicked(bool)", self.onHistogramButton)
        self.ui.gaussBlurPushButton.connect('clicked(bool)', self.onGaussBlurPushButton)
        self.ui.gaussNoisePushButton.connect('clicked(bool)', self.onGaussNoisePushButton)
        self.ui.contrastTransformPushButton.connect('clicked(bool)', self.onContrastTransformPushButton)
        self.ui.brightnessTransformPushButton.connect('clicked(bool)', self.onBrightnessTransformPushButton)
        self.ui.gammaTransformPushButton.connect('clicked(bool)', self.onGammaTransformPushButtonn)
        self.ui.resolutionTransformPushButton.connect('clicked(bool)', self.onResolutionTransformPushButton)


        # Make sure parameter node is initialized (needed for module reload)
        self.initializeParameterNode()
        self.ui.sigma_lineEdit.setText("1, 5")
        self.ui.noise_variance_lineEdit.setText("0, 0.05")
        self.ui.multiplier_range_lineEdit.setText("0.7, 1.5")
        self.ui.contrast_range_lineEdit.setText("0.75, 1.25")
        self.ui.gamma_range_lineEdit.setText("0.5, 2")
        self.ui.zoom_range_lineEdit.setText("0.5, 1.0")


    def cleanup(self) -> None:
        """Called when the application closes and the module widget is destroyed."""
        self.removeObservers()

    def enter(self) -> None:
        """Called each time the user opens this module."""
        # Make sure parameter node exists and observed
        self.initializeParameterNode()

    def exit(self) -> None:
        """Called each time the user opens a different module."""
        # Do not react to parameter node changes (GUI will be updated when the user enters into the module)
        if self._parameterNode:
            self._parameterNode.disconnectGui(self._parameterNodeGuiTag)
            self._parameterNodeGuiTag = None
            self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self._checkCanApply)

    def onSceneStartClose(self, caller, event) -> None:
        """Called just before the scene is closed."""
        # Parameter node will be reset, do not use it anymore
        self.setParameterNode(None)

    def onSceneEndClose(self, caller, event) -> None:
        """Called just after the scene is closed."""
        # If this module is shown while the scene is closed then recreate a new parameter node immediately
        if self.parent.isEntered:
            self.initializeParameterNode()

    def initializeParameterNode(self) -> None:
        """Ensure parameter node exists and observed."""
        # Parameter node stores all user choices in parameter values, node selections, etc.
        # so that when the scene is saved and reloaded, these settings are restored.

        self.setParameterNode(self.logic.getParameterNode())

        # Select default input nodes if nothing is selected yet to save a few clicks for the user
        if not self._parameterNode.inputVolume:
            firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
            if firstVolumeNode:
                self._parameterNode.inputVolume = firstVolumeNode

    def setParameterNode(self, inputParameterNode: Optional[ImageProcessingParameterNode]) -> None:
        """
        Set and observe parameter node.
        Observation is needed because when the parameter node is changed then the GUI must be updated immediately.
        """

        if self._parameterNode:
            self._parameterNode.disconnectGui(self._parameterNodeGuiTag)
            self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self._checkCanApply)
        self._parameterNode = inputParameterNode
        if self._parameterNode:
            # Note: in the .ui file, a Qt dynamic property called "SlicerParameterName" is set on each
            # ui element that needs connection.
            self._parameterNodeGuiTag = self._parameterNode.connectGui(self.ui)
            self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self._checkCanApply)
            self._checkCanApply()

    def _checkCanApply(self, caller=None, event=None) -> None:
        if self._parameterNode and self._parameterNode.inputVolume and self._parameterNode.thresholdedVolume:
            self.ui.applyButton.toolTip = _("Compute output volume")
            self.ui.applyButton.enabled = True
        else:
            self.ui.applyButton.toolTip = _("Select input and output volume nodes")
            self.ui.applyButton.enabled = False

    def onApplyButton(self) -> None:
        """Run processing when user clicks "Apply" button."""
        print("onApplyButton")
        with slicer.util.tryWithErrorDisplay(_("Failed to compute results."), waitCursor=True):
            # Compute output
            self.logic.process(self.ui.inputSelector.currentNode(), self.ui.outputSelector.currentNode(),
                               self.ui.imageThresholdSliderWidget.value, self.ui.invertOutputCheckBox.checked)

            # Compute inverted output (if needed)
            if self.ui.invertedOutputSelector.currentNode():
                # If additional output volume is selected then result with inverted threshold is written there
                self.logic.process(self.ui.inputSelector.currentNode(), self.ui.invertedOutputSelector.currentNode(),
                                   self.ui.imageThresholdSliderWidget.value, not self.ui.invertOutputCheckBox.checked, showResult=False)
                
    def onHistogramButton(self):
        print("onHistogramButton")
        # 获取当前输入的体积数据节点
        input_volume_node = self.ui.inputSelector.currentNode()
        # 将输入体数据转换为NumPy数组进行像素级操作（shape: [Z,Y,X]）
        image_arr = slicer.util.arrayFromVolume(input_volume_node)
        # 计算直方图
        histogram = np.histogram(image_arr, bins=50)
        slicer.util.plot(histogram, xColumnIndex=1)
        lowHeight = self.slicerStretchlim(image_arr)
        print(f"lowHeight:{lowHeight}")

    def slicerStretchlim(self, volume_array):
        # 计算 1% 和 99% 分位数
        low = np.percentile(volume_array, 1)
        high = np.percentile(volume_array, 99)
        return [low, high]

    def onGaussBlurPushButton(self):
        print("onGaussBlurPushButton")
        # 获取当前输入的体积数据节点
        input_volume_node = self.ui.inputSelector.currentNode()
        # 克隆输入的体数据，并作为输出的体数据节点
        output_volume_node = slicer.modules.volumes.logic().CloneVolume(input_volume_node, "gauss_blured")
        # 将输出体数据转换为NumPy数组进行像素级操作（shape: [Z,Y,X]）
        image_arr = slicer.util.arrayFromVolume(output_volume_node)
        # 调整数组维度适配batchgenerators输入格式（batch=1, channel=1, Z,Y,X）
        image_arr = image_arr[np.newaxis, np.newaxis, :, :, :]
        # 从界面文本框获取高斯模糊的强度范围
        sigma_range = self.ui.sigma_lineEdit.text
        print("sigma_range: ", sigma_range)
        # 将字符串拆分为浮点数范围
        sigma_range_min, sigma_range_max = list(map(float, sigma_range.split(",")))
        # 初始化高斯模糊变换器（batchgenerators库），进行 ​​高斯模糊​​，以模拟图像采集时的模糊效果。通过随机化模糊强度（sigma 范围），可以增强训练数据的多样性，防止模型过拟合。
        # blur_sigma：随机在[min,max]取sigma值
        # different_sigma_per_channel=False：所有通道统一模糊
        # p_per_sample=1.0：100%概率应用该变换
        gauss_blur = GaussianBlurTransform(blur_sigma=(sigma_range_min, sigma_range_max),
                                            different_sigma_per_channel=False, p_per_sample=1.0)
        # 记录处理开始时间
        tic = time.time()
        # 执行高斯模糊处理（输入要求：5D数组 [batch, channel, z, y, x]）
        out_dict = gauss_blur(data=image_arr)
        # 记录结束时间
        toc = time.time()
        # 计算耗时
        time_cost = toc - tic
        print(f"{time_cost} s")
        # 从输出字典提取处理后的数据（降维操作）
        d = out_dict.get('data')
        # 去除batch和channel维度（恢复为[Z,Y,X]）
        d = d[0][0]
        # 将处理后的NumPy数组更新至输出数据节点
        slicer.util.updateVolumeFromArray(output_volume_node, d)
        # 在3D切片视图中显示模糊后的体数据作为背景
        slicer.util.setSliceViewerLayers(background=output_volume_node)
        
    def onGaussNoisePushButton(self):
        print("onGaussNoisePushButton")
        # 获取当前输入的体积数据节点
        input_volume_node = self.ui.inputSelector.currentNode()
        # 克隆输入的体数据，并作为输出的体数据节点
        output_volume_node = slicer.modules.volumes.logic().CloneVolume(input_volume_node, "gauss_noise")
        # 将输出体数据转换为NumPy数组进行像素级操作（shape: [Z,Y,X]）
        image_arr = slicer.util.arrayFromVolume(output_volume_node)
        # 调整数组维度适配batchgenerators输入格式（batch=1, channel=1, Z,Y,X）
        image_arr = image_arr[np.newaxis, np.newaxis, :, :, :]
        # 从界面文本框获取高斯噪声的强度范围
        noise_variance_range = self.ui.sigma_lineEdit.text
        print("noise_variance_range: ", noise_variance_range)
        # 将字符串拆分为浮点数范围
        noise_range_min, noise_range_max = list(map(float, noise_variance_range.split(",")))
        # 初始化高斯噪声变换器（batchgenerators库），添加​​高斯噪声​​，以模拟真实场景中的噪声干扰，通过随机化噪声强度（方差范围），可以增加训练数据的多样性，防止模型过拟合。
        # blur_sigma：随机在[min,max]取sigma值
        # different_sigma_per_channel=False：- 单通道医学图像：False，- 多模态融合数据：True
        # p_per_sample=1.0：100%概率应用该变换
        gauss_noise = GaussianNoiseTransform(noise_variance=(noise_range_min, noise_range_max), p_per_sample=1.0)
        # 记录处理开始时间
        tic = time.time()
        # 执行高斯噪声处理（输入要求：5D数组 [batch, channel, z, y, x]）
        out_dict = gauss_noise(data=image_arr)
        # 记录结束时间
        toc = time.time()
        # 计算耗时
        time_cost = toc - tic
        print(f"{time_cost} s")
        # 从输出字典提取处理后的数据（降维操作）
        d = out_dict.get('data')
        # 去除batch和channel维度（恢复为[Z,Y,X]）
        d = d[0][0]
        # 将处理后的NumPy数组更新至输出数据节点
        slicer.util.updateVolumeFromArray(output_volume_node, d)
        # 在3D切片视图中显示模糊后的体数据作为背景
        slicer.util.setSliceViewerLayers(background=output_volume_node)

    def onContrastTransformPushButton(self):
        print("onContrastTransformPushButton")
        # 获取当前输入的体积数据节点
        input_volume_node = self.ui.inputSelector.currentNode()
        # 克隆输入的体数据，并作为输出的体数据节点
        output_volume_node = slicer.modules.volumes.logic().CloneVolume(input_volume_node, "contrast")
        # 将输出体数据转换为NumPy数组进行像素级操作（shape: [Z,Y,X]）
        image_arr = slicer.util.arrayFromVolume(output_volume_node)
        # 调整数组维度适配batchgenerators输入格式（batch=1, channel=1, Z,Y,X）
        image_arr = image_arr[np.newaxis, np.newaxis, :, :, :]
        # 从界面文本框获取对比度的程序范围
        contrast_range = self.ui.contrast_range_lineEdit.text
        print("contrast_range: ", contrast_range)
        # 将字符串拆分为浮点数范围
        contrast_range = tuple(map(float, contrast_range.split(",")))
        # 初始化对比度变换器（batchgenerators库），进行对比度变换​​，通过随机改变图像的明暗区域差异，模拟真实场景中的光照变化或设备成像差异。
        # contrast_range：随机在[min,max]取对比度强度，- α > 1.0​​：增强对比度（亮区更亮，暗区更暗）。- ​α < 1.0​​：降低对比度（亮区变暗，暗区变亮）。- α=1.0​​：无变化。
        # different_sigma_per_channel=False：- 单通道医学图像：False，- 多模态融合数据：True
        # p_per_sample=1.0：100%概率应用该变换
        contrast = ContrastAugmentationTransform(contrast_range=contrast_range, p_per_sample=1.0)
        # 记录处理开始时间
        tic = time.time()
        # 执行​对比度变化处理（输入要求：5D float数组 [batch, channel, z, y, x]）
        out_dict = contrast(data=image_arr)
        # 记录结束时间
        toc = time.time()
        # 计算耗时
        time_cost = toc - tic
        print(f"{time_cost} s")
        # 从输出字典提取处理后的数据（降维操作）
        d = out_dict.get('data')
        # 去除batch和channel维度（恢复为[Z,Y,X]）
        d = d[0][0]
        # 将处理后的NumPy数组更新至输出数据节点
        slicer.util.updateVolumeFromArray(output_volume_node, d)
        # 在3D切片视图中显示模糊后的体数据作为背景
        slicer.util.setSliceViewerLayers(background=output_volume_node)

    def onBrightnessTransformPushButton(self):
        print("onBrightnessTransformPushButton")
        # 获取当前输入的体积数据节点
        input_volume_node = self.ui.inputSelector.currentNode()
        # 克隆输入的体数据，并作为输出的体数据节点
        output_volume_node = slicer.modules.volumes.logic().CloneVolume(input_volume_node, "brightness")
        # 将输出体数据转换为NumPy数组进行像素级操作（shape: [Z,Y,X]）
        image_arr = slicer.util.arrayFromVolume(output_volume_node)
        # 调整数组维度适配batchgenerators输入格式（batch=1, channel=1, Z,Y,X）
        image_arr = image_arr[np.newaxis, np.newaxis, :, :, :]
        # 数据从uint8（0-255）转换为 float32（0.0-255.0）
        image_arr = image_arr.astype(np.float32)
        # 从界面文本框获取亮度的程序范围
        multi_range = self.ui.multiplier_range_lineEdit.text
        print("multi_range: ", multi_range)
        # 将字符串拆分为浮点数范围
        multi_range = tuple(map(float, multi_range.split(",")))
        # 初始化亮度乘法变换器（batchgenerators库），进行​亮度乘法变换​​，通过随机调整图像的整体或通道亮度，模拟光照变化或增强模型对亮度变化的鲁棒性。
        # multiplier_range：随机在[min,max]取亮度缩放因子
        # different_sigma_per_channel=False：- 单通道医学图像：False，- 多模态融合数据：True
        # p_per_sample=1.0：100%概率应用该变换
        brightness = BrightnessMultiplicativeTransform(multiplier_range=multi_range, per_channel=False,
                                                    p_per_sample=1.0)
        # 记录处理开始时间
        tic = time.time()
        # 执行​亮度乘法处理（输入要求：5D float数组 [batch, channel, z, y, x]）
        out_dict = brightness(data=image_arr)
        # 记录结束时间
        toc = time.time()
        # 计算耗时
        time_cost = toc - tic
        print(f"{time_cost} s")
        # 从输出字典提取处理后的数据（降维操作）
        d = out_dict.get('data')
        # 去除batch和channel维度（恢复为[Z,Y,X]）
        d = d[0][0]
        # 将处理后的NumPy数组更新至输出数据节点
        slicer.util.updateVolumeFromArray(output_volume_node, d)
        # 在3D切片视图中显示模糊后的体数据作为背景
        slicer.util.setSliceViewerLayers(background=output_volume_node)

    def onGammaTransformPushButtonn(self):
        print("onGammaTransformPushButton")
        # 获取当前输入的体积数据节点
        input_volume_node = self.ui.inputSelector.currentNode()
        # 克隆输入的体数据，并作为输出的体数据节点
        output_volume_node = slicer.modules.volumes.logic().CloneVolume(input_volume_node, "gamma")
        # 将输出体数据转换为NumPy数组进行像素级操作（shape: [Z,Y,X]）
        image_arr = slicer.util.arrayFromVolume(output_volume_node)
        # 调整数组维度适配batchgenerators输入格式（batch=1, channel=1, Z,Y,X）
        image_arr = image_arr[np.newaxis, np.newaxis, :, :, :]
        # 从界面文本框获取伽马的程序范围
        gamma_range = self.ui.gamma_range_lineEdit.text
        print("gamma_range: ", gamma_range)
        # 将字符串拆分为浮点数范围
        gamma_range = tuple(map(float, gamma_range.split(",")))
        # 初始化伽马变换器（batchgenerators库），改变图像的亮暗区域分布，同时通过 retain_stats=True 保持图像的统计特性（如均值、方差），避免因亮度调整破坏数据分布。
        # gamma_range：随机在[min,max]取伽马值 γ，即pow(value, γ)。- ​​γ<1.0​​：拉伸暗部细节，压缩亮部（整体变亮）。- γ>1.0​​：压缩暗部细节，拉伸亮部（整体变暗）。- γ=1.0​​：无变化（线性变换）。
        # retain_stats=True：- 允许更大变化：False，常用与生成任务（如超分辨率）。- 保持分布：True，常用与分类任务，保持变换后图像的统计特性（均值、方差）与原图一致。
        # p_per_sample=1.0：100%概率应用该变换
        gamma_transform = GammaTransform(gamma_range=gamma_range, retain_stats=True, p_per_sample=1.0)
        # 记录处理开始时间
        tic = time.time()
        # 执行伽马变换处理（输入要求：5D float数组 [batch, channel, z, y, x]）
        out_dict = gamma_transform(data=image_arr)
        # 记录结束时间
        toc = time.time()
        # 计算耗时
        time_cost = toc - tic
        print(f"{time_cost} s")
        # 从输出字典提取处理后的数据（降维操作）
        d = out_dict.get('data')
        # 去除batch和channel维度（恢复为[Z,Y,X]）
        d = d[0][0]
        # 将处理后的NumPy数组更新至输出数据节点
        slicer.util.updateVolumeFromArray(output_volume_node, d)
        # 在3D切片视图中显示模糊后的体数据作为背景
        slicer.util.setSliceViewerLayers(background=output_volume_node)

    def onResolutionTransformPushButton(self):
        print("onResolutionTransformPushButton")
        # 获取当前输入的体积数据节点
        input_volume_node = self.ui.inputSelector.currentNode()
        # 克隆输入的体数据，并作为输出的体数据节点
        output_volume_node = slicer.modules.volumes.logic().CloneVolume(input_volume_node, "resolution")
        # 将输出体数据转换为NumPy数组进行像素级操作（shape: [Z,Y,X]）
        image_arr = slicer.util.arrayFromVolume(output_volume_node)
        # 调整数组维度适配batchgenerators输入格式（batch=1, channel=1, Z,Y,X）
        image_arr = image_arr[np.newaxis, np.newaxis, :, :, :]
        # 从界面文本框获取亮度的程序范围
        zoom_range = self.ui.zoom_range_lineEdit.text
        print("zoom_range: ", zoom_range)
        # 将字符串拆分为浮点数范围
        zoom_range = tuple(map(float, zoom_range.split(",")))
        # 初始化伽马变换器（batchgenerators库），该变换通过 ​​下采样-上采样​​ 的流程降低图像分辨率，并引入插值误差，模拟真实场景中的细节丢失和模糊效果。
        # zoom_range：随机在[min,max]取下采样缩放比例范围，，控制分辨率降低的程度。
        # per_channel：控制是否对不同通道应用独立的缩放因子
        # p_per_channel：定义每个通道被应用该变换的概率（仅当 per_channel=True 时生效）。
        # order_downsample：下采样时的插值方法（决定图像缩小时的平滑度）。- 0：最近邻插值，速度快但锯齿明显；- 1：双线性插值；3-三次样条插值，更平滑。
        # order_upsample：上采样时的插值方法（影响图像放大后的模糊程度）。同上。
        # p_per_sample=1.0：100%概率应用该变换
        # ignore_axes：指定忽略的轴向（如3D图像中保持某维度不缩放）。若 ignore_axes=(2,)，则3D图像的Z轴不进行缩放，保留原始分辨率。
        resolution = SimulateLowResolutionTransform(zoom_range=zoom_range, per_channel=False, p_per_channel=3,
                                                    order_downsample=0, order_upsample=3, p_per_sample=1.0,
                                                    ignore_axes=None)
        # 记录处理开始时间
        tic = time.time()
        # 执行分辨率变换变换处理（输入要求：5D float数组 [batch, channel, z, y, x]）
        out_dict = resolution(data=image_arr)
        # 计算耗时
        toc = time.time()
        time_cost = toc - tic
        print(f"{time_cost} s")
        # 从输出字典提取处理后的数据（降维操作）
        d = out_dict.get('data')
        # 去除batch和channel维度（恢复为[Z,Y,X]）
        d = d[0][0]
        # 将处理后的NumPy数组更新至输出数据节点
        slicer.util.updateVolumeFromArray(output_volume_node, d)
        # 在3D切片视图中显示模糊后的体数据作为背景
        slicer.util.setSliceViewerLayers(background=output_volume_node)

#
# ImageProcessingLogic
#


class ImageProcessingLogic(ScriptedLoadableModuleLogic):
    """This class should implement all the actual
    computation done by your module.  The interface
    should be such that other python code can import
    this class and make use of the functionality without
    requiring an instance of the Widget.
    Uses ScriptedLoadableModuleLogic base class, available at:
    https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py
    """

    def __init__(self) -> None:
        """Called when the logic class is instantiated. Can be used for initializing member variables."""
        ScriptedLoadableModuleLogic.__init__(self)

    def getParameterNode(self):
        return ImageProcessingParameterNode(super().getParameterNode())

    def process(self,
                inputVolume: vtkMRMLScalarVolumeNode,
                outputVolume: vtkMRMLScalarVolumeNode,
                imageThreshold: float,
                invert: bool = False,
                showResult: bool = True) -> None:
        """
        Run the processing algorithm.
        Can be used without GUI widget.
        :param inputVolume: volume to be thresholded
        :param outputVolume: thresholding result
        :param imageThreshold: values above/below this threshold will be set to 0
        :param invert: if True then values above the threshold will be set to 0, otherwise values below are set to 0
        :param showResult: show output volume in slice viewers
        """

        if not inputVolume or not outputVolume:
            raise ValueError("Input or output volume is invalid")

        import time

        startTime = time.time()
        logging.info("Processing started")

        # Compute the thresholded output volume using the "Threshold Scalar Volume" CLI module
        cliParams = {
            "InputVolume": inputVolume.GetID(),
            "OutputVolume": outputVolume.GetID(),
            "ThresholdValue": imageThreshold,
            "ThresholdType": "Above" if invert else "Below",
        }
        cliNode = slicer.cli.run(slicer.modules.thresholdscalarvolume, None, cliParams, wait_for_completion=True, update_display=showResult)
        # We don't need the CLI module node anymore, remove it to not clutter the scene with it
        slicer.mrmlScene.RemoveNode(cliNode)

        stopTime = time.time()
        logging.info(f"Processing completed in {stopTime-startTime:.2f} seconds")


#
# ImageProcessingTest
#


class ImageProcessingTest(ScriptedLoadableModuleTest):
    """
    This is the test case for your scripted module.
    Uses ScriptedLoadableModuleTest base class, available at:
    https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py
    """

    def setUp(self):
        """Do whatever is needed to reset the state - typically a scene clear will be enough."""
        slicer.mrmlScene.Clear()

    def runTest(self):
        """Run as few or as many tests as needed here."""
        self.setUp()
        self.test_ImageProcessing1()

    def test_ImageProcessing1(self):
        """Ideally you should have several levels of tests.  At the lowest level
        tests should exercise the functionality of the logic with different inputs
        (both valid and invalid).  At higher levels your tests should emulate the
        way the user would interact with your code and confirm that it still works
        the way you intended.
        One of the most important features of the tests is that it should alert other
        developers when their changes will have an impact on the behavior of your
        module.  For example, if a developer removes a feature that you depend on,
        your test should break so they know that the feature is needed.
        """

        self.delayDisplay("Starting the test")

        # Get/create input data

        import SampleData

        registerSampleData()
        inputVolume = SampleData.downloadSample("ImageProcessing1")
        self.delayDisplay("Loaded test data set")

        inputScalarRange = inputVolume.GetImageData().GetScalarRange()
        self.assertEqual(inputScalarRange[0], 0)
        self.assertEqual(inputScalarRange[1], 695)

        outputVolume = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLScalarVolumeNode")
        threshold = 100

        # Test the module logic

        logic = ImageProcessingLogic()

        # Test algorithm with non-inverted threshold
        logic.process(inputVolume, outputVolume, threshold, True)
        outputScalarRange = outputVolume.GetImageData().GetScalarRange()
        self.assertEqual(outputScalarRange[0], inputScalarRange[0])
        self.assertEqual(outputScalarRange[1], threshold)

        # Test algorithm with inverted threshold
        logic.process(inputVolume, outputVolume, threshold, False)
        outputScalarRange = outputVolume.GetImageData().GetScalarRange()
        self.assertEqual(outputScalarRange[0], inputScalarRange[0])
        self.assertEqual(outputScalarRange[1], inputScalarRange[1])

        self.delayDisplay("Test passed")
