from io import BytesIO
from vision_3d_handler import Vision3DHandler
import torch
import numpy as np
from torch import version

from model import FullRes3D


class Image3DSegmenter(Vision3DHandler):
    """
    Refer to ts.torch_handler.image_segmenter
    """

    # image_processing = transforms.Compose([
    #     transforms.ToTensor(),
    #     transforms.Normalize((0.1307,), (0.3081,))
    # ])

    def __init__(self):
        self.model: FullRes3D
        super(Image3DSegmenter, self).__init__()
        self.profiler_args = {
            # "activities" : [ProfilerActivity.CPU],
            "record_shapes": True,
        }

    def preprocess(self, data):
        """将请求的数据转化为可以被网络处理的tensor

        # mock data for fast test
        # torch.backends.cudnn.enabled=False
        # for k, v in data.items():
        #     print(k, type(v))
        # mock_data = torch.rand([1, 1, 64, 160, 192]).cuda()
        # print(f'This is mock_data: {mock_data.shape}')
        # return mock_data

        Args:
            data (_type_): _description_

        Returns:
            _type_: _description_
        """
        print(
            f'**preprocess the data for {self.context.model_name}, '
            f'type:{type(data)}, request id: {self.context.get_request_id()}')
        assert len(data) == 1
        e: dict = data[0]
        arr_bytes: bytearray = e.get('data', e.get('body'))
        if arr_bytes is None:
            print(f'data: {data}, data[0]: {e}')

        arr_compressed = np.load(BytesIO(arr_bytes))
        for _, v in arr_compressed.items():
            # 数据中只允许放一个矩阵
            # data来自于 np.savez_compressed()
            # TODO 把数据的shape限制定得更灵活一点
            # assert v.shape[1:] == (1, 64, 160, 192)
            break
        # return torch.as_tensor(v, dtype=torch.float, device=self.device)
        return v

    def inference(self, data, *args, **kwargs):
        print(
            f'this is the torch version: {torch.__version__}, cuda:{version.cuda}'
        )
        print(f'path of torch: {torch.__file__}')
        # TODO self.model.do_ds ----- do deep_supervision.
        # 在预测的时候是要关闭的
        # 那么这个操作应当在模型初始化的时候就处理
        self.model.do_ds = False
        # TODO 调用预测函数，返回结果即可。结果会传入postprocess进行处理
        self.model.eval()
        # Refer to 'nnUNet/nnunet/training/network_training/nnUNetTrainer.py:518'
        ret = self.model.predict_3D(data,
                                    do_mirroring=True,  # 需要与训练时一致
                                    mirror_axes=(0, 1, 2),
                                    use_sliding_window=True,
                                    step_size=0.5,
                                    patch_size=np.array([64, 160, 192]),
                                    regions_class_order=None,
                                    use_gaussian=True,
                                    pad_border_mode='constant',
                                    pad_kwargs={'constant_values': 0},
                                    all_in_gpu=False,
                                    verbose=True,
                                    mixed_precision=True)
        # segmentation, class_probabilities = ret
        return ret

    def postprocess(self, data):
        """The post process of MNIST converts the predicted output response to a label.

        Args:
            data (list): The predicted output from the Inference with probabilities is passed
            to the post-process function
        Returns:
            list : A list of dictionaries with predictions and explanations is returned
        """
        print(f'The output data shape: {len(data)}, {data[0].shape}')
        # result = data[0].cpu()
        # TODO 把result转为mask，dtype为uint8

        bytes_io = BytesIO()
        np.savez_compressed(bytes_io, data=data[0], probabilities=data[1])
        return [bytes_io.getvalue()]

        # a = BytesIO()
        # torch.save(data, a)
        # return [a.getvalue()]
        # return ['^_^' * (i + 1) for i in range(len(data[0]))]
