ho11laqe's picture
init
ecf08bc
raw
history blame
No virus
12.2 kB
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nnunet.experiment_planning.alternative_experiment_planning.experiment_planner_baseline_3DUNet_v21_16GB import \
ExperimentPlanner3D_v21_16GB
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
ExperimentPlanner3D_v21
from nnunet.paths import *
class ExperimentPlanner3D_v21_noResampling(ExperimentPlanner3D_v21):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21_noResampling, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_noRes_plans_v2.1"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.1_noRes_plans_3D.pkl")
self.preprocessor_name = "PreprocessorFor3D_NoResampling"
def plan_experiment(self):
"""
DIFFERENCE TO ExperimentPlanner3D_v21: no 3d lowres
:return:
"""
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print("Are we using the nonzero mask for normalizaion?", use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)]
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]
self.transpose_forward = [max_spacing_axis] + remaining_axes
self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]
# we base our calculations on the median shape of the datasets
median_shape = np.median(np.vstack(new_shapes), 0)
print("the median shape of the dataset is ", median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print("the max shape in the dataset is ", max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print("the min shape in the dataset is ", min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck")
# how many stages will the image pyramid have?
self.plans_per_stage = list()
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print("the transposed median shape of the dataset is ", median_shape_transposed)
print("generating configuration for 3d_fullres")
self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1))
# thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-)
# if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
# architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0:
architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64)
if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
architecture_input_voxels_here < self.how_much_of_a_patient_must_the_network_see_at_stage0:
more = False
else:
more = True
if more:
pass
self.plans_per_stage = self.plans_per_stage[::-1]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict
print(self.plans_per_stage)
print("transpose forward", self.transpose_forward)
print("transpose backward", self.transpose_backward)
normalization_schemes = self.determine_normalization_scheme()
only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None
# removed training data based postprocessing. This is deprecated
# these are independent of the stage
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,
'modalities': modalities, 'normalization_schemes': normalization_schemes,
'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,
'original_spacings': spacings, 'original_sizes': sizes,
'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),
'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,
'use_mask_for_norm': use_nonzero_mask_for_normalization,
'keep_only_largest_region': only_keep_largest_connected_component,
'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,
'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,
'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,
'preprocessor_name': self.preprocessor_name,
'conv_per_stage': self.conv_per_stage,
}
self.plans = plans
self.save_my_plans()
class ExperimentPlanner3D_v21_noResampling_16GB(ExperimentPlanner3D_v21_16GB):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21_noResampling_16GB, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_noRes_plans_16GB_v2.1"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.1_noRes_16GB_plans_3D.pkl")
self.preprocessor_name = "PreprocessorFor3D_NoResampling"
def plan_experiment(self):
"""
DIFFERENCE TO ExperimentPlanner3D_v21: no 3d lowres
:return:
"""
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print("Are we using the nonzero mask for normalizaion?", use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)]
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]
self.transpose_forward = [max_spacing_axis] + remaining_axes
self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]
# we base our calculations on the median shape of the datasets
median_shape = np.median(np.vstack(new_shapes), 0)
print("the median shape of the dataset is ", median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print("the max shape in the dataset is ", max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print("the min shape in the dataset is ", min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck")
# how many stages will the image pyramid have?
self.plans_per_stage = list()
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print("the transposed median shape of the dataset is ", median_shape_transposed)
print("generating configuration for 3d_fullres")
self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1))
# thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-)
# if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
# architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0:
architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64)
if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
architecture_input_voxels_here < self.how_much_of_a_patient_must_the_network_see_at_stage0:
more = False
else:
more = True
if more:
pass
self.plans_per_stage = self.plans_per_stage[::-1]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict
print(self.plans_per_stage)
print("transpose forward", self.transpose_forward)
print("transpose backward", self.transpose_backward)
normalization_schemes = self.determine_normalization_scheme()
only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None
# removed training data based postprocessing. This is deprecated
# these are independent of the stage
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,
'modalities': modalities, 'normalization_schemes': normalization_schemes,
'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,
'original_spacings': spacings, 'original_sizes': sizes,
'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),
'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,
'use_mask_for_norm': use_nonzero_mask_for_normalization,
'keep_only_largest_region': only_keep_largest_connected_component,
'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,
'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,
'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,
'preprocessor_name': self.preprocessor_name,
'conv_per_stage': self.conv_per_stage,
}
self.plans = plans
self.save_my_plans()