| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from typing import Callable, Optional, Sequence, Tuple, Union |
|
|
| import torch |
| import torch.nn.functional as F |
|
|
| from monai.data.utils import compute_importance_map, dense_patch_slices, get_valid_patch_size |
| from monai.utils import BlendMode, PytorchPadMode, fall_back_tuple |
|
|
|
|
| def sliding_window_inference( |
| inputs: torch.Tensor, |
| roi_size: Union[Sequence[int], int], |
| sw_batch_size: int, |
| predictor: Callable[[torch.Tensor], torch.Tensor], |
| overlap: float = 0.25, |
| mode: Union[BlendMode, str] = BlendMode.CONSTANT, |
| padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT, |
| cval: float = 0.0, |
| device: Optional[torch.device] = None, |
| ) -> torch.Tensor: |
| """ |
| Sliding window inference on `inputs` with `predictor`. |
| |
| When roi_size is larger than the inputs' spatial size, the input image are padded during inference. |
| To maintain the same spatial sizes, the output image will be cropped to the original input size. |
| |
| Args: |
| inputs: input image to be processed (assuming NCHW[D]) |
| roi_size: the spatial window size for inferences. |
| When its components have None or non-positives, the corresponding inputs dimension will be used. |
| if the components of the `roi_size` are non-positive values, the transform will use the |
| corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted |
| to `(32, 64)` if the second spatial dimension size of img is `64`. |
| sw_batch_size: the batch size to run window slices. |
| predictor: given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)` |
| should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D]; |
| where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`. |
| overlap: Amount of overlap between scans. |
| mode: {``"constant"``, ``"gaussian"``} |
| How to blend output of overlapping windows. Defaults to ``"constant"``. |
| |
| - ``"constant``": gives equal weight to all predictions. |
| - ``"gaussian``": gives less weight to predictions on edges of windows. |
| |
| padding_mode: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``} |
| Padding mode when ``roi_size`` is larger than inputs. Defaults to ``"constant"`` |
| See also: https://pytorch.org/docs/stable/nn.functional.html#pad |
| cval: fill value for 'constant' padding mode. Default: 0 |
| device: device running the concatenation of the windows. |
| By default the device and accordingly the memory of the input device is used. If for example |
| set to device=torch.device('cpu') the gpu memory consumption is less and independent of the |
| input and roi_size parameter. Output is on the device set or if not set the inputs device. |
| |
| Raises: |
| NotImplementedError: When ``inputs`` does not have batch size = 1. |
| |
| Note: |
| - input must be channel-first and have a batch dim, support both spatial 2D and 3D. |
| - currently only supports `inputs` with batch_size=1. |
| |
| """ |
| num_spatial_dims = len(inputs.shape) - 2 |
| assert 0 <= overlap < 1, "overlap must be >= 0 and < 1." |
|
|
| |
| |
| image_size_ = list(inputs.shape[2:]) |
| batch_size = inputs.shape[0] |
|
|
| |
| if batch_size > 1: |
| raise NotImplementedError("Currently only inputs with batch size = 1 are supported.") |
|
|
| if device is None: |
| device = inputs.device |
|
|
| roi_size = fall_back_tuple(roi_size, image_size_) |
| |
| image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims)) |
| pad_size = [] |
| for k in range(len(inputs.shape) - 1, 1, -1): |
| diff = max(roi_size[k - 2] - inputs.shape[k], 0) |
| half = diff // 2 |
| pad_size.extend([half, diff - half]) |
| inputs = F.pad(inputs, pad=pad_size, mode=PytorchPadMode(padding_mode).value, value=cval) |
|
|
| scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap) |
|
|
| |
| slices = dense_patch_slices(image_size, roi_size, scan_interval) |
|
|
| slice_batches = [] |
| for slice_index in range(0, len(slices), sw_batch_size): |
| slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices))) |
| input_slices = [] |
| for curr_index in slice_index_range: |
| curr_slice = slices[curr_index] |
| if len(curr_slice) == 3: |
| input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1], curr_slice[2]]) |
| else: |
| input_slices.append(inputs[0, :, curr_slice[0], curr_slice[1]]) |
| slice_batches.append(torch.stack(input_slices)) |
|
|
| |
| output_rois = list() |
| for data in slice_batches: |
| seg_prob = predictor(data) |
| output_rois.append(seg_prob.to(device)) |
|
|
| |
| output_classes = output_rois[0].shape[1] |
| output_shape = [batch_size, output_classes] + list(image_size) |
|
|
| |
| importance_map = compute_importance_map(get_valid_patch_size(image_size, roi_size), mode=mode, device=device) |
|
|
| |
| output_image = torch.zeros(output_shape, dtype=torch.float32, device=device) |
| count_map = torch.zeros(output_shape, dtype=torch.float32, device=device) |
|
|
| for window_id, slice_index in enumerate(range(0, len(slices), sw_batch_size)): |
| slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices))) |
|
|
| |
| for curr_index in slice_index_range: |
| curr_slice = slices[curr_index] |
| if len(curr_slice) == 3: |
| output_image[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += ( |
| importance_map * output_rois[window_id][curr_index - slice_index, :] |
| ) |
| count_map[0, :, curr_slice[0], curr_slice[1], curr_slice[2]] += importance_map |
| else: |
| output_image[0, :, curr_slice[0], curr_slice[1]] += ( |
| importance_map * output_rois[window_id][curr_index - slice_index, :] |
| ) |
| count_map[0, :, curr_slice[0], curr_slice[1]] += importance_map |
|
|
| |
| output_image = output_image / count_map |
|
|
| if num_spatial_dims == 3: |
| return output_image[ |
| ..., |
| pad_size[4] : image_size_[0] + pad_size[4], |
| pad_size[2] : image_size_[1] + pad_size[2], |
| pad_size[0] : image_size_[2] + pad_size[0], |
| ] |
| return output_image[ |
| ..., pad_size[2] : image_size_[0] + pad_size[2], pad_size[0] : image_size_[1] + pad_size[0] |
| ] |
|
|
|
|
| def _get_scan_interval( |
| image_size: Sequence[int], roi_size: Sequence[int], num_spatial_dims: int, overlap: float |
| ) -> Tuple[int, ...]: |
| """ |
| Compute scan interval according to the image size, roi size and overlap. |
| Scan interval will be `int((1 - overlap) * roi_size)`, if interval is 0, |
| use 1 instead to make sure sliding window works. |
| |
| """ |
| if len(image_size) != num_spatial_dims: |
| raise ValueError("image coord different from spatial dims.") |
| if len(roi_size) != num_spatial_dims: |
| raise ValueError("roi coord different from spatial dims.") |
|
|
| scan_interval = [] |
| for i in range(num_spatial_dims): |
| if roi_size[i] == image_size[i]: |
| scan_interval.append(int(roi_size[i])) |
| else: |
| interval = int(roi_size[i] * (1 - overlap)) |
| scan_interval.append(interval if interval > 0 else 1) |
| return tuple(scan_interval) |
|
|