from skorch.core import *
from skorch.utils import *
from skorch.callbacks import EpochScoring, LRScheduler, CyclicLR, Checkpoint
from skorch.loss import *
from skorch.helper import predefined_split
from torch.optim import SGD, lr_scheduler, Adam
from model import UNet_3D
from prepare_dataset import image2tensor
import SimpleITK as sitk
from scipy import ndimage
from batchgenerators.utilities.file_and_folder_operations import *
from sklearn.metrics import accuracy_score
from nnunet.preprocessing.preprocessing import resample_patient
from sklearn.model_selection import train_test_split
from resnet import resnet50
import pickle
from skorch import NeuralNet, NeuralNetClassifier
import vertebra_classify
from network_architecture.generic_UNet import Generic_UNet
from network_architecture.utilities import InitWeights_He, softmax_helper
from connection_list import Connection_List
from dice_loss import DC_and_CE_loss

ct_vertebra_stats = [[0.030088576], [0.13161582]]


class PatchStatus(Enum):
    """
    Patch and mask intersection.
    """
    complemet = 0
    incomplement = 1
    containsnothing = 2


def load_pickle(file, mode='rb'):
    with open(file, mode) as f:
        a = pickle.load(f)
    return a


def segment_cnn(n_classes: int=2, softmax: bool=False, feature_scale:int=1, imsize:Sizes=(256,256, 256)):
    """
    classification module
    :param n_classes: classes number
    :param softmax: add softmax layer
    :param feature_scale: scale value
    :param imsize: image size
    :return:
    """
    layers = []
    layers.append(resnet50(sample_input_W=imsize[0],
                           sample_input_H=imsize[1],
                           sample_input_D=imsize[2],
                           shortcut_type='B',
                           no_cuda=False,
                           num_classes=n_classes,
                           task_type='segment'))

    if softmax: layers.append(nn.Softmax(dim=-1))

    return nn.Sequential(*layers)


def segment_nnunet(n_classes: int=2, softmax: bool=False, feature_scale:int=1, imsize:Sizes=(256,256, 256)):
    """
    classification module
    :param n_classes: classes number
    :param softmax: add softmax layer
    :param feature_scale: scale value
    :param imsize: image size
    :return:
    """
    num_input_channels = 1
    net_numpool = 6
    base_num_features = 30
    net_num_pool_op_kernel_sizes = None     # [[2,2,2], [2,2,2], [2,2,2], [2,2,2], [2,2,2]]
    net_conv_kernel_sizes = None            # [[3,3,3], [3,3,3], [3,3,3], [3,3,3], [3,3,3], [3,3,3]]
    conv_op = nn.Conv3d
    dropout_op = nn.Dropout3d
    norm_op = nn.InstanceNorm3d
    norm_op_kwargs = {'eps': 1e-5, 'affine': True}
    dropout_op_kwargs = {'p': 0, 'inplace': True}
    net_nonlin = nn.LeakyReLU
    net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
    network = Generic_UNet(num_input_channels, base_num_features, n_classes, net_numpool,
                           2, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
                           net_nonlin, net_nonlin_kwargs, False, False, softmax_helper, InitWeights_He(1e-2),
                           net_num_pool_op_kernel_sizes, net_conv_kernel_sizes, False, True, True)

    network.inference_apply_nonlin = softmax_helper
    return network


class SegmentDataset(Dataset):
    def read_nii(self, nii_file):
        return sitk.GetArrayFromImage(sitk.ReadImage(str(nii_file)))

    def __init__(self, image_path:PathOrStr, augmentation_func=None, stats:Collection[Tensor]=None, target_size:Sizes=None):
        """
        classification
        :param data_path:
        :param augmentation_func:
        :param stats:
        :param target_size:
        """
        super().__init__()
        self.data_path = image_path[0].parents[1]
        self.images_items = sorted(image_path)
        self.target_size = target_size
        self.augmentation = None
        # if augmentation_func:
        #     self.augmentation = augmentation_func(*self.target_size)
        if stats:
            self.stats = stats

    def __len__(self):
        return len(self.images_items)

    def transform_tensor(self, image:NPImage, mask:NPImage):
        img_t = image[np.newaxis]
        img_t = torch.from_numpy((img_t / (255. if img_t.dtype == np.uint8 else 1)).astype(np.float32))
        mask_t = mask[np.newaxis]
        # mask_t[mask_t > 0] = 1
        mask_t = torch.from_numpy(mask_t).float()
        return (img_t - torch.tensor(self.stats[0])) / torch.tensor(self.stats[1]), mask_t

    def __getitem__(self, idx):
        pkl_item_file:Path = self.images_items[idx]
        item_info = load_pickle(str(pkl_item_file))

        image_name = pkl_item_file.with_name(item_info['ctvolume_name']).with_suffix(".nii")
        mask_name = pkl_item_file.with_name("segvolume_" + image_name.name.split("_")[-1])

        # 1.read origin image and label
        img = self.read_nii(str(image_name))
        mask = self.read_nii(str(mask_name))

        scale = float(self.target_size[0]) / img.shape[0]
        if scale != 1.0:
            img = ndimage.zoom(img, scale, mode='nearest')
            mask = ndimage.zoom(mask, scale, mode='nearest')

        # 2. convert_label
        # mask[mask > 0] = 1.0

        # 3.augmente
        if self.augmentation:
            aug_image = self.augmentation(image=img, mask=mask)
            img = aug_image['image']
            mask = aug_image['mask']
        # img, mask = self.transform_tensor(img, mask)
        # patch_label = item_info['pacth_label']
        # mask = [mask, patch_label]
        return self.transform_tensor(img, mask)


def create_ct_vertebra_datasets(data_path:PathList, augmentation_func=None, target_size:Sizes=None):
    with open(os.path.join(data_path, "intensityproperties.pkl"), 'rb') as f:
        intensityproperties = pickle.load(f)

    mean_intensity = intensityproperties[0]['mean']
    std_intensity = intensityproperties[0]['sd']

    item_path = Path(os.path.join(data_path, "patch"))
    image_items = sorted(item_path.rglob('*.pkl'))

    discard_items = []
    for item_file in image_items:
        item_info = load_pickle(str(item_file))
        if item_info['complement_class'] == PatchStatus.containsnothing:
            discard_items.append(item_file)

    image_items = list(set(image_items).difference(set(discard_items)))

    x_train, x_test = train_test_split(image_items, test_size=0.25, shuffle=True)

    train_ds = SegmentDataset(x_train, augmentation_func=augmentation_func, stats=(mean_intensity, std_intensity), target_size=target_size)
    valida_ds = SegmentDataset(x_test, augmentation_func=augmentation_func, stats=(mean_intensity, std_intensity), target_size=target_size)
    return train_ds, valida_ds


def approximate_iou_metric(true_masks:NPArrayMask, predicted_masks:NPArrayMask, sigmoid=True):
    "Jaccard similarity coefficient score"
    if sigmoid:
        test_thresh = 0.5
        predicted_masks = 1 / (1 + np.exp(-predicted_masks))
    predicted_masks = (predicted_masks > test_thresh).astype(np.uint8)

    approx_intersect = np.sum(np.minimum(predicted_masks, true_masks), axis=(2, 3))
    approx_union = np.sum(np.maximum(predicted_masks, true_masks), axis=(2, 3))
    return np.around(np.mean(approx_intersect / approx_union, axis=0), decimals=4)


def approximate_dice_metric(true_masks:NPArrayMask, predicted_masks:NPArrayMask, sigmoid=True):
    "Dice metric https://github.com/mattmacy/torchbiomed/blob/master/torchbiomed/loss.py"
    test_thresh = 0.5
    if sigmoid:
        predicted_masks = 1 / (1 + np.exp(-predicted_masks))

    predicted_masks = predicted_masks.argmax(1)
    predicted_masks = predicted_masks.astype(np.uint8)

    true_masks = np.squeeze(true_masks)
    [ori_c, ori_d, ori_h, ori_w] = true_masks.shape
    [n, d, h, w] = predicted_masks.shape
    if ori_d != d or ori_h != h or ori_w != w:
        scale = [1, 1, ori_d * 1.0 / d , ori_h * 1.0 / h, ori_w * 1.0 / w]
        predicted_masks = ndimage.interpolation.zoom(predicted_masks, scale, order=0)

    # predicted_masks = (predicted_masks > test_thresh).astype(np.uint8)

    # approx_intersect = np.equal(predicted_masks, true_masks).astype(np.uint8)
    # img_score = (2. * approx_intersect.sum() + 1e-7) / (np.size(predicted_masks) + np.size(true_masks) + 1e-7)
    # score_mean = np.around(img_score.mean(), decimals=4)
    # print('seg_score', score_mean)
    tp = np.sum(predicted_masks * true_masks)
    fp = np.sum(predicted_masks) - tp
    fn = np.sum(true_masks) - tp
    eps = 1e-7

    img_score = (2 * tp + eps) / (2 * tp + fn + fp + eps)

    score_mean = np.around(img_score.mean(), decimals=4)

    return score_mean


def iou_scoring(net, ds, y):
    predicted_logit_masks = net.predict(ds)
    return approximate_iou_metric(y, predicted_logit_masks)


def dice_scoring(net, ds, y):
    predicted_logit_masks = net.predict(ds)
    return approximate_dice_metric(y, predicted_logit_masks, sigmoid=False)


iou_scoring = EpochScoring(iou_scoring, name='iou_metric', lower_is_better=False)
dice_scoring = EpochScoring(dice_scoring, name='dice_metric', lower_is_better=False)
cyclicLR = LRScheduler(policy=CyclicLR, base_lr=0.002, max_lr=0.2, step_size_up=540, step_size_down=540)
stepLR = LRScheduler(policy=lr_scheduler.StepLR, step_size=130, gamma=0.1)


class CELoss3D(nn.Module):
    def __init__(self, eps=1e-7):
        super().__init__()
        self.eps = eps
        self.bce = nn.BCELoss(reduction='mean')

    def forward(self, y_pr, y_gt):
        d_pr, h_pr, w_pr = y_pr.size(2), y_pr.size(3), y_pr.size(4)
        d_pt, h_gt, w_gt = y_gt.size(2), y_gt.size(3), y_gt.size(4)
        if d_pr != d_pt or h_pr != h_gt or w_pr != w_gt:
            y_pr = F.interpolate(y_pr, size=(d_pt, h_gt, w_gt), mode='trilinear', align_corners=True)
        bce = self.bce(y_pr, y_gt.float())
        return bce


def train():
    target_size = (128, 128, 128)
    dirname = 'segment_v3_check'
    pretrianed_check_file = Path("/pretrain/resnet_50.pth")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    pretrianed = False
    data_root = '/home/cao/disk1/cx/vertebra_segment_data'
    train_ds, valida_ds = create_ct_vertebra_datasets(data_root, None, target_size=target_size)

    module = segment_nnunet(n_classes=25, softmax=True, imsize=target_size)
    net = NeuralNet(
        module,
        criterion=DC_and_CE_loss,
        batch_size=2,
        max_epochs=300,
        optimizer=SGD,
        optimizer__momentum=0.99,
        optimizer__weight_decay=0.001,
        lr=0.003,
        iterator_train__shuffle=True,
        iterator_train__num_workers=4,
        iterator_train__drop_last=True,
        iterator_valid__shuffle=False,
        iterator_valid__num_workers=4,
        train_split=predefined_split(valida_ds),
        callbacks=[stepLR, dice_scoring, Checkpoint(f_params='best_params_v2.pt', dirname=dirname)],
        device=device,
        parallel=True
    )
    if pretrianed:
        net.initialize()
        # torch.save(pretrain['state_dict'], pretrianed_check_file.with_suffix(".pt"))
        pretrain = torch.load(str(pretrianed_check_file))
        net_dict = net.module_.state_dict()
        pretrain_dict = {k: v for k, v in pretrain['state_dict'].items() if k in net_dict.keys()}
        net_dict.update(pretrain_dict)
        net.module_.load_state_dict(net_dict)

        # net.load_params(f_params=pretrianed_check_file)
        net.load_params(f_params=dirname+'/best_params_v2.pt',
                        f_optimizer=dirname+'/optimizer.pt',
                        f_history=dirname+'/history.json')
    net.fit(train_ds)


def predict_ct():
    target_size = (128, 128, 128)
    dirname = 'segment_check'
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    module = UNet_3D(feature_scale=2, n_classes=1, in_channels=1, debug=False)
    net = NeuralNet(
        module,
        criterion=BCEDiceLoss,
        batch_size=1,
        max_epochs=120,
        optimizer=SGD,
        optimizer__momentum=0.99,
        optimizer__weight_decay=0.001,
        lr=0.001,
        iterator_train__shuffle=True,
        iterator_train__num_workers=8,
        iterator_valid__shuffle=False,
        iterator_valid__num_workers=8,
        callbacks=[stepLR,
                   dice_scoring,
                   Checkpoint(f_params='oldvert_seg.pt')],
        device=device,
        parallel=True
    )
    net.initialize()
    net.load_params(f_params=dirname+'/best_params.pt')

    img_file = '/home/cao/xVertSeg2019/training_phase_1_release/verse005.npy'

    predict_img = np.load(img_file)
    deep, height, width = predict_img.shape
    seg_memory = np.zeros((deep, height, width))
    seg_img = np.zeros((deep, height, width))
    step = 12
    test_thresh = 0.5
    patch_flag = False
    for d in range(-120, deep - step, step):
        for j in range(0, height - step, step):
            for i in range(0, width - step, step):
                dd = d
                if dd < 0:
                    dd = 0
                predict_img = predict_img - seg_memory
                img_patch = predict_img[dd:128 + d, j:128 + j, i:128 + i]
                a = sitk.GetImageFromArray(img_patch)
                sitk.WriteImage(a, 'a.nii')
                # if img_patch.shape != (128, 128, 128):
                #     img_patch_pad = np.pad(img_patch, ((0, 128 - img_patch.shape[0]), (0, 128 - img_patch.shape[1]),
                #                                    (0, 128 - img_patch.shape[2])), 'constant', constant_values=(0, 0))
                #     img_patch_pad = img_patch_pad[np.newaxis, ..., np.newaxis]
                #     img_patch_pad = image2tensor(img_patch_pad)
                #     img, label = net.predict(img_patch_pad)
                # else:
                img_patch = img_patch[np.newaxis, ..., np.newaxis]
                img, label = net.predict(image2tensor(img_patch))
                img = img.squeeze(0).squeeze(0)
                img = 1 / (1 + np.exp(-img))
                img = (img > test_thresh).astype(np.uint8)
                b = sitk.GetImageFromArray(img)
                sitk.WriteImage(b, 'b.nii')
                sumVert = np.sum(img > 0)
                print(sumVert)
                if sumVert > 1000:
                    centre = np.mean(np.where(img != 0), axis=1)
                    patch_flag = True
                    break
            if patch_flag:
                break
        if patch_flag:
            break
    patch_width = i
    patch_height = (j + abs(centre[1] - 64)).astype(int)
    patch_deep = d
    for d in range(patch_deep, deep - step, step):
        img_patch = predict_img[dd:128 + d, patch_height:128 + patch_height, patch_width:128 + patch_width]
        img_patch = img_patch[np.newaxis, ..., np.newaxis]
        img, label = net.predict(image2tensor(img_patch))
        img = img.squeeze(0).squeeze(0)
        label = label.squeeze(0)
        img = 1 / (1 + np.exp(-img))
        img = (img > test_thresh).astype(np.uint8)
        out = sitk.GetImageFromArray(img)
        sitk.WriteImage(out, str(i)+'.nii')
        label = 1 / (1 + np.exp(-label))
        label = (label > test_thresh).astype(np.uint8)
        if label == 1:
            # print(label)
                    # print((seg_memory[dd:128 + d, j:128 + j, i:128 + i]).shape)
                    # print((img[dd:128 + d, :, :]).shape)
                    # memory_counter = Counter(seg_memory.flatten())
                    # print('memory-before:', memory_counter)
                    # print('d:{},j:{},i:{}'.format(d, j, i))
                    # a = predict_img[dd:128 + d, j:128 + j, i:128 + i]
                    # b = img
                    # a = sitk.GetImageFromArray(a)
                    # sitk.WriteImage(a, 'a.nii')
                    # b = sitk.GetImageFromArray(b)
                    # sitk.WriteImage(b, 'b.nii')
            seg_memory[dd:128 + d, j:128 + j, i:128 + i] = predict_img[np.where(img!=0)]
            seg_img[dd:128 + d, j:128 + j, i:128 + i] = img
    outImage = sitk.GetImageFromArray(seg_img)
    sitk.WriteImage(outImage, 'PreditSeg.nii')


class VertebraInfer():
    def init_seg_model(self, target_size, device):
        module = segment_nnunet(n_classes=2, softmax=True, imsize=target_size)
        net = NeuralNet(
            module,
            criterion=DC_and_CE_loss,
            batch_size=1,
            max_epochs=50,
            optimizer=SGD,
            optimizer__momentum=0.99,
            optimizer__weight_decay=0.001,
            lr=0.001,
            iterator_train__shuffle=True,
            iterator_train__num_workers=4,
            iterator_train__drop_last=True,
            iterator_valid__shuffle=False,
            iterator_valid__num_workers=4,
            device=device,
            parallel=True
        )
        net.initialize()

        return net

    def init_classify_model(self, target_size, device):
        module = vertebra_classify.classify_cnn(n_classes=3, softmax=True, imsize=target_size)
        net = NeuralNetClassifier(
            module,
            batch_size=1,
            max_epochs=50,
            optimizer=SGD,
            lr=0.01,
            optimizer__momentum=0.9,
            iterator_train__shuffle=True,
            iterator_train__num_workers=4,
            iterator_valid__shuffle=False,
            iterator_valid__num_workers=4,
            device=device,
            parallel=True
        )
        net.initialize()
        return net

    def __init__(self, data_root, seg_dirname, classify_dirname, training_flag, thresh, target_size, device):
        super().__init__()
        self.eps = 1e-7
        self.LowerHU = 200.0
        self.thresh = thresh
        self.training = training_flag
        self.data_root = data_root
        self.seg_dirname = seg_dirname
        self.classify_dirname = classify_dirname
        self.target_size = target_size
        self.seg_model = self.init_seg_model(target_size, device)
        self.classify_model = self.init_classify_model(target_size, device)
        with open(os.path.join(self.data_root, "intensityproperties.pkl"), 'rb') as f:
            self.intensityproperties = pickle.load(f)

    def init(self):
        self.seg_model.load_params(f_params=os.path.join(self.data_root, self.seg_dirname, 'best_params_v2.pt'))
        self.classify_model.load_params(f_params=os.path.join(self.data_root, self.classify_dirname, 'best_params_v2.pt'))
        # self.mean_intensity = self.intensityproperties[0]['mean']
        # self.std_intensity = self.intensityproperties[0]['sd']

    def norm_input_data(self, image:NPImage, stats):
        img_t = image[np.newaxis]
        img_t = torch.from_numpy((img_t / (255. if img_t.dtype == np.uint8 else 1)).astype(np.float32))
        return (img_t - torch.tensor(stats[0])) / torch.tensor(stats[1])

    def patch_infer(self, input_patch):
        # img_t = self.norm_input_data(input_patch, (self.mean_intensity, self.std_intensity)).unsqueeze_(0)
        predicted_probs = self.classify_model.predict(input_patch[np.newaxis, np.newaxis, ...])
        predicted_mask = self.seg_model.predict(input_patch[np.newaxis, np.newaxis, ...])
        predicted_mask = predicted_mask.argmax(1)
        predicted_mask = predicted_mask.astype(np.uint8)
        predicted_mask = (predicted_mask * 255).astype(np.uint8)
        predicted_mask = np.squeeze(predicted_mask)
        return predicted_mask, predicted_probs

    def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
        # 1. resample original image
        original_spacing = np.array(properties['original_spacing'])[[0,1,2]]

        before = {'original image spacing': original_spacing, 'original image shape': data.shape}
        data, seg = resample_patient(data, seg, np.array(original_spacing), target_spacing, 3, 1,
                                     force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0)

        after = {'spacing': target_spacing, 'data.shape (data is resampled)': data.shape}
        print("before:", before, "\nafter: ", after, "\n")

        if seg is not None:  # hippocampus 243 has one voxel with -2 as label. wtf?
            seg[seg < -1] = 0

        properties['size_after_resampling'] = data[0].shape
        properties['spacing_after_resampling'] = target_spacing

        # 2. cliping original image
        print("normalization...")

        for c in range(len(data)):
            # clip to lb and ub from train data foreground and use foreground mn and sd from training data
            assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
            mean_intensity = self.intensityproperties[c]['mean']
            std_intensity = self.intensityproperties[c]['sd']
            lower_bound = self.intensityproperties[c]['percentile_00_5']
            upper_bound = self.intensityproperties[c]['percentile_99_5']
            data[c] = np.clip(data[c], self.LowerHU, upper_bound)
            data[c] = (data[c] - mean_intensity) / std_intensity

        print("normalization done")
        return data, seg, properties

    def load_image(self, img_file):
        properties = {}
        originalImage = sitk.ReadImage(str(img_file))
        all_data = sitk.GetArrayFromImage(originalImage)
        data = all_data[:-1].astype(np.float32)
        seg = np.zeros(data.shape).astype(np.float32)  # all_data[-1:]
        properties['original_spacing'] = originalImage.GetSpacing()
        return data[np.newaxis, ...], seg[np.newaxis, ...], properties

    def loadAndResampleData(self, img_file):
        transpose_forward = [0, 1, 2]
        target_spacing = np.array([1.0, 1.0, 1.0]).astype(np.float64)
        force_separate_z = None

        data, seg, properties = self.load_image(img_file)
        data = data.transpose((0, *[i + 1 for i in transpose_forward]))
        seg = seg.transpose((0, *[i + 1 for i in transpose_forward]))

        data, seg, properties = self.resample_and_normalize(data, target_spacing, properties, seg, force_separate_z)
        return data, seg, properties

    def centerConvertzyxzyx(self, _centerzyx):
        _cropZYXZYX = np.array([_centerzyx[0] - self.z_crop_radius, _centerzyx[1] - self.y_crop_radius,
                                _centerzyx[2] - self.x_crop_radius, _centerzyx[0] + self.z_crop_radius - 1,
                                _centerzyx[1] + self.y_crop_radius - 1, _centerzyx[2] + self.x_crop_radius - 1])
        _cropZYXZYX[0::3] = np.clip(_cropZYXZYX[0::3], 0, self.d - 1)
        _cropZYXZYX[1::3] = np.clip(_cropZYXZYX[1::3], 0, self.h - 1)
        _cropZYXZYX[2::3] = np.clip(_cropZYXZYX[2::3], 0, self.w - 1)
        return _cropZYXZYX

    def ImageInfer(self, img_file, save_seg_dir):
        self.init()
        save_seg_dir = self.data_root + save_seg_dir
        data, seg, properties = self.loadAndResampleData(img_file)
        maybe_mkdir_p(save_seg_dir)

        self.x_crop_radius = self.target_size[2] // 2
        self.y_crop_radius = self.target_size[1] // 2
        self.z_crop_radius = self.target_size[0] // 2
        x_step = int(self.x_crop_radius)
        y_step = int(self.y_crop_radius)
        z_step = int(self.z_crop_radius // 2)

        case_all_data_donly = data[0]
        case_all_data_segonly = seg[0]

        thresh_list = [0.98, 0.96, 0.94, 0.92, 0.90]
        global_thresh = max(thresh_list)

        instace_memory_volume = np.zeros(case_all_data_segonly.shape, dtype=case_all_data_segonly.dtype)
        self.d, self.h, self.w = case_all_data_donly.shape
        start_z = 0 - self.z_crop_radius  # + z_crop_radius//2
        start_y = 0 - self.y_crop_radius + self.y_crop_radius // 2
        start_x = 0 - self.x_crop_radius + self.x_crop_radius // 2
        end_z = self.d
        change_z = True
        while (start_z < end_z):
            if change_z:
                start_z += z_step
            tmp_zyx = (start_z, start_y, start_x)
            print('center zyx:', tmp_zyx)
            # 计算坐标
            _cropZYXZYX = self.centerConvertzyxzyx(tmp_zyx)
            # 取patch
            patchimage = case_all_data_donly[_cropZYXZYX[0]: _cropZYXZYX[3] + 1, _cropZYXZYX[1]:_cropZYXZYX[4] + 1,
                       _cropZYXZYX[2]:_cropZYXZYX[5] + 1]
            # pad
            if patchimage.shape != self.target_size:
                pad_size = [[self.target_size[0] - patchimage.shape[0], 0],
                            [self.target_size[1] - patchimage.shape[1], 0],
                            [self.target_size[2] - patchimage.shape[2], 0]]
                patchimage = np.pad(patchimage, pad_size, "constant", **{'constant_values': 0})
            # patch infer
            predicted_mask, predicted_probs = self.patch_infer(patchimage.astype(np.float32))
            if predicted_probs != 2:        # PatchStatus.complemet:
                if predicted_probs == 0:
                    print(Counter(predicted_mask.flatten()))
                    data_itk = sitk.GetImageFromArray(predicted_mask)
                    sitk.WriteImage(data_itk, 'label=0.nii')
                    # ======================================= #
                    # 0)找出完整骨骼的区域
                    predicted_image = sitk.GetImageFromArray(predicted_mask)
                    mean_centre = np.mean(np.where(predicted_mask == 255), axis=1)
                    print('mean_centre:', mean_centre)
                    centre = mean_centre.copy()
                    while predicted_mask[int(centre[0]), int(centre[1]), int(centre[2])] != 255:
                        centre[0] = random.randint(int(mean_centre[0]), int(mean_centre[0])+32)
                        centre[1] = random.randint(int(mean_centre[1]), int(mean_centre[1])+32)
                        centre[2] = random.randint(int(mean_centre[2]), int(mean_centre[2])+32)
                        centre[centre > 127] = 127
                        print('centre:', centre)
                    seed_point = list([(int(centre[0]), int(centre[1]), int(centre[2]))])
                    # Confidence Connected Method
                    seg_explicit_threshold = sitk.ConfidenceConnected(predicted_image, seedList=seed_point,
                                                                      numberOfIterations=5, multiplier=4.5,
                                                                      initialNeighborhoodRadius=1, replaceValue=1)
                    # Connected Threshold Method
                    # seg_explicit_threshold = sitk.ConnectedThreshold(predicted_image, seedList=seed_point,
                    #                                                   lower=100, upper=170)
                    # patchimage_itk = sitk.GetImageFromArray(patchimage)
                    # result = sitk.LabelOverlay(patchimage_itk, seg_explicit_threshold)
                    sitk.WriteImage(seg_explicit_threshold, 'result.nii')
                    label_coord = [1, 2, 3]
                    # 1)放入实例存储器
                    instace_memory_volume[label_coord] = case_all_data_segonly[label_coord]
                    # 2)seg mask相关联的label区域清空
                    case_all_data_segonly[label_coord] = 0
                    # ======================================= #
                    voxel_where = np.where(predicted_mask > 0)
                    mean_where = np.mean(voxel_where, axis=1)
                if predicted_probs == 1:
                    print(Counter(predicted_mask.flatten()))
                    data_itk = sitk.GetImageFromArray(predicted_mask)
                    sitk.WriteImage(data_itk, 'label=1.nii')
                    voxel_where = np.where(predicted_mask > 0)
                    mean_where = np.mean(voxel_where, axis=1)
                change_z = False
                m_start_z, m_start_y, m_start_x = (mean_where.astype(np.int)).tolist()
                if start_z == m_start_z and start_y == m_start_y and start_x == m_start_x:
                    start_z += z_step // 2
                else:
                    if m_start_z < start_z:
                        for t in thresh_list:
                            if t < global_thresh:
                                global_thresh = t
                                break

                    start_z = m_start_z
                    start_y = m_start_y
                    start_x = m_start_x
            else:
                change_z = False
                start_y = (start_y + y_step)
                start_x = (start_x + x_step)
                if start_y >= self.h - self.y_crop_radius or start_x >= self.w - self.x_crop_radius:
                    change_z = True
                    start_y = start_y % (self.h - self.y_crop_radius)
                    start_x = start_x % (self.w - self.x_crop_radius)

        result = data
        return result


def predict():
    target_size = (128, 128, 128)
    thresh = 0.5
    data_root = '/home/cao/disk1/cx/vertebra_segment_data/'
    classify_dirname = 'classify_v2_check'
    seg_dirname = 'segment_v2_check'
    training_flag = False
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    img_file = "/home/cao/disk1/cx/VerSe_2019/xVertSeg2019/xVertSeg2019/training_phase_2_release/verse075.nii"
    save_seg_dir = 'result'
    # 1. init infer model
    infer_model = VertebraInfer(data_root, seg_dirname, classify_dirname, training_flag, thresh, target_size, device)
    # 2. infer origin image
    infer_result = infer_model.ImageInfer(img_file, save_seg_dir)

    # 3.

    # 1.read origin image and label
    input_img = sitk.GetArrayFromImage(sitk.ReadImage(img_file))

    # img_t = norm_input_data(input_img, (mean_intensity, std_intensity)).unsqueeze_(0)
    #
    #
    # sitk.WriteImage(sitk.GetImageFromArray(predicted_mask), 'predicted_mask.nii')
    # print('Complete label:', predicted_probs)


def vertebra_segment():
    print("vertebra_segment")
    train()
    # predict()


if __name__ == '__main__':
    vertebra_segment()