
## 有重叠的进行预测。
import os
import time
import h5py
import numpy as np
import torch
from unet3d.utils import remove_halo
from torch.utils.data import DataLoader, Dataset, TensorDataset
import collections

# 把patch shape 和 stride shape 引入进来
from dataset import patch_shape, stride_shape
 
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
out_channels = 1 # 最后分三类
patch_halo = (4, 16, 16) # 这个去除伪影  就是预测的时候 先去掉边上一部分。


class _AbstractPredictor:
    def __init__(self, model, loader, output_file):
        self.model = model
        self.loader = loader
        self.output_file = output_file

    @staticmethod
    def _volume_shape(dataset):
        # TODO: support multiple internal datasets
        raw = dataset.raws[0]
        if raw.ndim == 3:
            return raw.shape
        else:
            return raw.shape[1:]

    @staticmethod
    def _get_output_dataset_names(number_of_datasets, prefix='predictions'):
        if number_of_datasets == 1:
            return [prefix]
        else:
            return [f'{prefix}{i}' for i in range(number_of_datasets)]

    def predict(self):
        raise NotImplementedError


class StandardPredictor(_AbstractPredictor):
    """
    Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
    Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
    use `LazyPredictor` instead.

    The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
    not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
    of the output head from the network.

    Args:
        model (Unet3D): trained 3D UNet model used for prediction
        data_loader (torch.utils.data.DataLoader): input data loader
        output_file (str): path to the output H5 file
        config (dict): global config dict
    """

    def __init__(self, model, loader, output_file):
        super().__init__(model, loader, output_file)

    def predict(self):

        print(f'Running prediction on {len(self.loader)} batches...')

        # dimensionality of the the output predictions
        volume_shape = self._volume_shape(self.loader.dataset)
        
        prediction_maps_shape = (out_channels,) + volume_shape

        print(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')

        print(f'Using patch_halo: {patch_halo}')
        self._validate_halo() ## 检查一下 halo shape 设置的对不对
        # create destination H5 file
        h5_output_file = h5py.File(self.output_file, 'w')
        # allocate prediction and normalization arrays
        print('Allocating prediction and normalization arrays...')
        prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,
                                                                            h5_output_file)

        # Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)
        # self.model.eval()
        # Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!
        # self.model.testing = True
        # Run predictions on the entire input dataset
        with torch.no_grad():
            for batch, indices in self.loader:
                # send batch to device
                # batch : (1, channels, D, W, H)
                batch = batch.to(device)
                print("batch is " + str(batch.shape))
                # forward pass
                # predictions 就是经过前向传播后的输出。 (patch, out_channels, D, W, H）
                # predictions = self.model(batch)
                predictions = batch
                print("模型预测shape为" + str(predictions.shape))

                # convert to numpy array
                predictions = predictions.cpu().numpy()

                # for each batch sample
                # 循环每个batch数据 
                # pred: (out_channels, D, W, H)
                for pred, index in zip(predictions, indices):
                    # index: [(0, 3), (20, 25), (20, 25)] 包含深度 高度 宽度的slice
                    # save patch index: (C,D,H,W)
              
                    channel_slice = slice(0, out_channels)
                  
                    index = (channel_slice,) + index

                    print(f'Saving predictions for slice:{index}...')

                
                    # remove halo in order to avoid block artifacts in the output probability maps
                    u_prediction, u_index = remove_halo(pred, index, volume_shape, patch_halo)
                    # # accumulate probabilities into the output prediction array
                    prediction_maps[u_index] += u_prediction # 这一行代码就是进行聚合 会把patch 聚合 输出图片。
                    # # count voxel visits for normalization
                    normalization_masks[u_index] += 1

        # save results to
        self._save_results(prediction_maps, normalization_masks, h5_output_file)
        # close the output H5 file
        h5_output_file.close()

    def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
        # initialize the output prediction arrays
        # output_shape: (C, D, W, H)
        prediction_maps = np.zeros(output_shape, dtype='float32')
        # initialize normalization mask in order to average out probabilities of overlapping patches
        normalization_masks = np.zeros(output_shape, dtype='uint8')
        return prediction_maps, normalization_masks

    def _save_results(self, prediction_maps, normalization_masks, h5_output_file):
        def _slice_from_pad(pad):
            if pad == 0:
                return slice(None, None)
            else:
                return slice(pad, -pad)

        # save probability maps
        prediction_datasets = 'predictions'
       
        prediction_maps = prediction_maps / normalization_masks  # 这个很重要，因为输出有重叠，所以需要归一化一下

        print(f'Saving predictions to: {self.output_file}/{prediction_dataset}...')
        h5_output_file.create_dataset(prediction_dataset, data=prediction_maps, compression="gzip")

    @staticmethod
    def _validate_halo():
        patch_overlap = np.subtract(patch_shape, stride_shape)

        assert np.all(
            patch_overlap - patch_halo >= 0), f"Not enough patch overlap for stride: {stride} and halo: {patch_halo}"


def remove_halo(patch, index, shape, patch_halo):
    """
    patch 就是输入进来的模型预测结果 (C, D, W, H)
    消除给定patch周围的部分立体像素！！
    Remove `pad_width` voxels around the edges of a given patch.
    """
    assert len(patch_halo) == 3
   
    def _new_slices(slicing, max_size, pad):
        if slicing.start == 0:
            p_start = 0
            i_start = 0
        else:
            p_start = pad
            i_start = slicing.start + pad

        if slicing.stop == max_size:
            p_stop = None
            i_stop = max_size
        else:
            p_stop = -pad if pad != 0 else 1
            i_stop = slicing.stop - pad

        return slice(p_start, p_stop), slice(i_start, i_stop)

    D, H, W = shape

    i_c, i_z, i_y, i_x = index # 获取到当前slice的情况
    p_c = slice(0, patch.shape[0])

    p_z, i_z = _new_slices(i_z, D, patch_halo[0]) # z (0, 30) -> p_start = 0 i_start = 0 .p_stop = -5 i_stop = 25
    p_y, i_y = _new_slices(i_y, H, patch_halo[1]) # z (30 , 60) -> p_start = 5 i_start = 35 p_stop = -5 i_stop = 55
    p_x, i_x = _new_slices(i_x, W, patch_halo[2])

    patch_index = (p_c, p_z, p_y, p_x)
    index = (i_c, i_z, i_y, i_x)
    return patch[patch_index], index

from torch.utils.data import Dataset, DataLoader

from slice_builder import SliceBuilder
import dataset

class TestDataset(Dataset):

    def __init__(self, t1):
        # build slice indices for raw and label data sets
        sbuilder = SliceBuilder(t1, label_datasets=None, weight_dataset=None, patch_shape=(1, 5, 5), stride_shape=(1, 5, 5), skip_shape_check=True)
        # slice_builder = get_slice_builder(self.raws, self.labels, self.weight_maps, slice_builder_config)
        self.raw_slices = sbuilder.raw_slices
        self.raws = t1
        self.phase = "test"
        self.mirror_padding = None 

    def __getitem__(self, idx):
        if idx >= len(self):
            raise StopIteration

        # get the slice for a given index 'idx'
        raw_idx = self.raw_slices[idx]
        print(raw_idx)
        # get the raw data patch for a given slice
        # raw_patch_transformed = self._transform_patches(self.raws, raw_idx, self.raw_transform)
        raw_patch = self.raws[0][raw_idx] ## 因为t1 的batch size 为1 所以直接取出来就好了

        if self.phase == 'test':
            # 整个返回的raw_idx 是为了去还原分片用的！！！！
            # example: raw_idx:[(0, 32), (0, 64), (0, 64)] 这是第一片
            # discard the channel dimension in the slices: predictor requires only the spatial dimensions of the volume
            if len(raw_idx) == 4:
                raw_idx = raw_idx[1:]
            # print(raw_patch_transformed.shape)
            return raw_patch, raw_idx

    def __len__(self):
        return len(self.raw_slices)
       
def prediction_collate(batch):
    data = [item[0] for item in batch]
    indice = [item[1] for item in batch]
    return torch.stack(data, 0), indice


if __name__ == "__main__":
    import random 
    random.seed(666)
    torch.manual_seed(666)
    np.random.seed(666)
    # 自己搞一个dataloader
    # t1 = torch.ones((1, 1, 10, 20, 20))
    t1 = torch.rand((1, 1, 2, 20, 20))
    t1 = (t1 > 0.5).float() 
    print(t1)
    my_dataset = TestDataset(t1)
    dataloader = DataLoader(my_dataset, batch_size=1, collate_fn=prediction_collate)

    sp = StandardPredictor("model", dataloader, "out.h5")
    sp.predict()

    f = h5py.File('./out.h5','r')   #打开h5文件
    # print(f.keys())                          #可以查看所有的主键
    # # a = f['data'][:]                    #取出主键为data的所有的键值
    # # a = f["raw"][()][100, :, :]
    # # b = f["label"][()][100, :, :]

    res = torch.tensor(f["predictions"][()])

    res = res.unsqueeze(1)
    print(f["predictions"][()])
    print(res.shape)
    print(t1.shape)
    print(torch.equal(res, t1))

    # print(f["predictions"][()][0, 10, 10, :50])


    # for data, indice in dataloader:
    #     print(data.shape)
    #     print(indice)

    # sbuilder = SliceBuilder(t1, label_datasets=None, weight_dataset=None, patch_shape=(10, 10, 10), stride_shape=(5, 5, 5), skip_shape_check=True)
    # print(sbuilder.raw_slices)
    # dataset = TensorDataset(t1)
    # sp = StandardPredictor()

    # loader = dataset.get_test_loaders("./test_h5/")
    # for loader in dataset.get_test_loaders("./test_h5/"):
    #     for data, indices in loader:
    #         print("data_shape is :" + str(data.shape))
    #         # print(tgt.shape)
    #         print(indices)
    #         break

        # sp = StandardPredictor("model", loader, "out.h5")
        # sp.predict()
        

        # plt.imshow(src.squeeze(0).squeeze(0)[10], cmap="gray")
        # plt.show()
        # plt.imshow(tgt.squeeze(0).squeeze(0)[10], cmap="gray")
        # plt.show()



