#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/7/25
# @USER    : Shengji He
# @File    : predict.py
# @Software: PyCharm
# @Version  : Python-
# @TASK:
from typing import Union, Tuple, List
from functools import lru_cache
from tqdm import tqdm

import numpy as np
from scipy.ndimage import gaussian_filter

import torch

if torch.__version__ > '1.6.0':
    from torch.cuda.amp import autocast
else:
    print(f'Use custom autocast for lower pytorch version: {torch.__version__}')
    import contextlib
    @contextlib.contextmanager
    def autocast(enabled=True, *args, **kwargs):
        yield

from SegFlow.padding import pad_nd_image

__all__ = ['compute_gaussian', 'predict_sliding_window_return_logits']

from SegFlow.utils import dummy_context, empty_cache


@lru_cache(maxsize=2)
def compute_gaussian(tile_size: Union[Tuple[int, ...], List[int]], sigma_scale: float = 1. / 8,
                     value_scaling_factor: float = 1, dtype=torch.float16, device=torch.device('cuda', 0)) \
        -> torch.Tensor:
    tmp = np.zeros(tile_size)
    center_coords = [i // 2 for i in tile_size]
    sigmas = [i * sigma_scale for i in tile_size]
    tmp[tuple(center_coords)] = 1
    gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)

    gaussian_importance_map = torch.from_numpy(gaussian_importance_map).type(dtype).to(device)

    gaussian_importance_map = gaussian_importance_map / torch.max(gaussian_importance_map) * value_scaling_factor
    gaussian_importance_map = gaussian_importance_map.type(dtype)

    # gaussian_importance_map cannot be 0, otherwise we may end up with nans!
    gaussian_importance_map[gaussian_importance_map == 0] = torch.min(
        gaussian_importance_map[gaussian_importance_map != 0])

    return gaussian_importance_map


def compute_steps_for_sliding_window(image_size: Tuple[int, ...], tile_size: Tuple[int, ...], tile_step_size: float) -> \
        List[List[int]]:
    assert [i >= j for i, j in zip(image_size, tile_size)], "image size must be as large or larger than patch_size"
    assert 0 < tile_step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'

    # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of
    # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46
    target_step_sizes_in_voxels = [i * tile_step_size for i in tile_size]

    num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, tile_size)]

    steps = []
    for dim in range(len(tile_size)):
        # the highest step value for this dimension is
        max_step_value = image_size[dim] - tile_size[dim]
        if num_steps[dim] > 1:
            actual_step_size = max_step_value / (num_steps[dim] - 1)
        else:
            actual_step_size = 99999999999  # does not matter because there is only one step at 0

        steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]

        steps.append(steps_here)

    return steps


def _internal_get_sliding_window_slicers(image_size: Tuple[int, ...], patch_size, tile_step_size: float = 0.5,
                                         verbose: bool = False):
    slicers = []
    if len(patch_size) < len(image_size):
        assert len(patch_size) == len(
            image_size) - 1, 'if tile_size has less entries than image_size, ' \
                             'len(tile_size) ' \
                             'must be one shorter than len(image_size) ' \
                             '(only dimension ' \
                             'discrepancy of 1 allowed).'
        steps = compute_steps_for_sliding_window(image_size[1:], patch_size,
                                                 tile_step_size)
        if verbose:
            print(f'n_steps {image_size[0] * len(steps[0]) * len(steps[1])}, image size is'
                  f' {image_size}, tile_size {patch_size}, '
                  f'tile_step_size {tile_step_size}\nsteps:\n{steps}')
        for d in range(image_size[0]):
            for sx in steps[0]:
                for sy in steps[1]:
                    slicers.append(
                        tuple([slice(None), d, *[slice(si, si + ti) for si, ti in
                                                 zip((sx, sy), patch_size)]]))
    else:
        steps = compute_steps_for_sliding_window(image_size, patch_size,
                                                 tile_step_size)
        if verbose:
            print(
                f'n_steps {np.prod([len(i) for i in steps])}, image size is {image_size}, tile_size {patch_size}, '
                f'tile_step_size {tile_step_size}\nsteps:\n{steps}')
        for sx in steps[0]:
            for sy in steps[1]:
                for sz in steps[2]:
                    slicers.append(
                        tuple([slice(None), *[slice(si, si + ti) for si, ti in
                                              zip((sx, sy, sz), patch_size)]]))
    return slicers


def _internal_maybe_mirror_and_predict(x: torch.Tensor, network, use_mirroring: bool = True,
                                       allowed_mirroring_axes=None) -> torch.Tensor:
    mirror_axes = allowed_mirroring_axes if use_mirroring else None
    prediction = network(x)

    if mirror_axes is not None:
        # check for invalid numbers in mirror_axes
        # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3
        assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!'

        num_predictons = 2 ** len(mirror_axes)
        if 0 in mirror_axes:
            prediction += torch.flip(network(torch.flip(x, (2,))), (2,))
        if 1 in mirror_axes:
            prediction += torch.flip(network(torch.flip(x, (3,))), (3,))
        if 2 in mirror_axes:
            prediction += torch.flip(network(torch.flip(x, (4,))), (4,))
        if 0 in mirror_axes and 1 in mirror_axes:
            prediction += torch.flip(network(torch.flip(x, (2, 3))), (2, 3))
        if 0 in mirror_axes and 2 in mirror_axes:
            prediction += torch.flip(network(torch.flip(x, (2, 4))), (2, 4))
        if 1 in mirror_axes and 2 in mirror_axes:
            prediction += torch.flip(network(torch.flip(x, (3, 4))), (3, 4))
        if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes:
            prediction += torch.flip(network(torch.flip(x, (2, 3, 4))), (2, 3, 4))
        prediction /= num_predictons
    return prediction


def predict_sliding_window_return_logits(input_image: torch.Tensor, network, num_segmentation_heads, patch_size,
                                         device, tile_step_size: float = 0.5, use_gaussian: bool = True,
                                         use_mirroring: bool = True, perform_everything_on_gpu: bool = True,
                                         allowed_mirroring_axes=None, verbose: bool = False,
                                         allow_tqdm: bool = True) -> Union[np.ndarray, torch.Tensor]:
    assert isinstance(input_image, torch.Tensor)

    if device.type != 'cuda':
        print(f'perform_everything_on_gpu=True is only supported for cuda devices! Setting this to False')
        perform_everything_on_gpu = False

    network = network.to(device)
    network.eval()

    empty_cache(device)

    # Autocast is a little bitch.
    # If the device_type is 'cpu' then it's slow as heck on some CPUs (no auto bfloat16 support detection)
    # and needs to be disabled.
    # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False
    # is set. Whyyyyyyy. (this is why we don't make use of enabled=False)
    # So autocast will only be active if we have a cuda device.
    with torch.no_grad():
        # with autocast(device.type, enabled=True) if device.type == 'cuda' else dummy_context():
        with autocast(enabled=True) if device.type == 'cuda' else dummy_context():
            assert len(input_image.shape) == 4, 'input_image must be a 4D np.ndarray or torch.Tensor (c, x, y, z)'

            if verbose:
                print(f'Input shape: {input_image.shape}')
                print("step_size:", tile_step_size)
                print("mirror_axes:", allowed_mirroring_axes if use_mirroring else None)

            # if input_image is smaller than tile_size we need to pad it to tile_size.
            data, slicer_revert_padding = pad_nd_image(input_image, patch_size,
                                                       'constant', {'value': 0}, True,
                                                       None)

            slicers = _internal_get_sliding_window_slicers(data.shape[1:], patch_size, tile_step_size, verbose)

            # preallocate results and num_predictions
            results_device = device if perform_everything_on_gpu else torch.device('cpu')
            if verbose:
                print('preallocating arrays')
            try:
                data = data.to(device)
                predicted_logits = torch.zeros((num_segmentation_heads, *data.shape[1:]),
                                               dtype=torch.half,
                                               device=results_device)
                n_predictions = torch.zeros(data.shape[1:], dtype=torch.half,
                                            device=results_device)
                if use_gaussian:
                    gaussian = compute_gaussian(tuple(patch_size), sigma_scale=1. / 8,
                                                value_scaling_factor=1000,
                                                device=results_device)
            except RuntimeError:
                # sometimes the stuff is too large for GPUs. In that case fall back to CPU
                results_device = torch.device('cpu')
                data = data.to(results_device)
                predicted_logits = torch.zeros((num_segmentation_heads, *data.shape[1:]),
                                               dtype=torch.half,
                                               device=results_device)
                n_predictions = torch.zeros(data.shape[1:], dtype=torch.half,
                                            device=results_device)
                if use_gaussian:
                    gaussian = compute_gaussian(tuple(patch_size), sigma_scale=1. / 8,
                                                value_scaling_factor=1000,
                                                device=results_device)
            finally:
                empty_cache(device)

            if verbose:
                print('running prediction')
            for sl in tqdm(slicers, disable=not allow_tqdm):
                workon = data[sl][None]
                workon = workon.to(device, non_blocking=False)

                prediction = _internal_maybe_mirror_and_predict(workon, network, use_mirroring, allowed_mirroring_axes)[
                    0].to(results_device)
                try:
                    predicted_logits[sl] += (prediction * gaussian if use_gaussian else prediction)
                    n_predictions[sl[1:]] += (gaussian if use_gaussian else 1)
                except Exception as e:
                    predicted_logits[sl] += ((prediction.float() * gaussian.float()).half() if use_gaussian else prediction)
                    n_predictions[sl[1:]] += (gaussian if use_gaussian else 1)

            predicted_logits /= n_predictions
    empty_cache(device)
    return predicted_logits[tuple([slice(None), *slicer_revert_padding[1:]])]
