File size: 4,447 Bytes
e5d4212 80ed565 e5d4212 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
{
"displayable_configs": {
"highres": true,
"sw_overlap": 0.25,
"sw_batch_size": 1
},
"imports": [
"$import glob",
"$import os"
],
"bundle_root": ".",
"output_dir": "$@bundle_root + '/eval'",
"dataset_dir": "sampledata",
"datalist": "$list(sorted(glob.glob(@dataset_dir + '/imagesTs/*.nii.gz')))",
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"pixdim": "$[1.5, 1.5, 1.5] if @displayable_configs#highres else [3.0, 3.0, 3.0]",
"modelname": "$'model.pt' if @displayable_configs#highres else 'model_lowres.pt'",
"network_def": {
"_target_": "SegResNet",
"spatial_dims": 3,
"in_channels": 1,
"out_channels": 105,
"init_filters": 32,
"blocks_down": [
1,
2,
2,
4
],
"blocks_up": [
1,
1,
1
],
"dropout_prob": 0.2
},
"network": "$@network_def.to(@device)",
"preprocessing": {
"_target_": "Compose",
"transforms": [
{
"_target_": "LoadImaged",
"keys": "image"
},
{
"_target_": "EnsureTyped",
"keys": "image"
},
{
"_target_": "EnsureChannelFirstd",
"keys": "image"
},
{
"_target_": "Orientationd",
"keys": "image",
"axcodes": "RAS"
},
{
"_target_": "Spacingd",
"keys": "image",
"pixdim": "@pixdim",
"mode": "bilinear"
},
{
"_target_": "NormalizeIntensityd",
"keys": "image",
"nonzero": true
},
{
"_target_": "ScaleIntensityd",
"keys": "image",
"minv": -1.0,
"maxv": 1.0
}
]
},
"dataset": {
"_target_": "Dataset",
"data": "$[{'image': i} for i in @datalist]",
"transform": "@preprocessing"
},
"dataloader": {
"_target_": "DataLoader",
"dataset": "@dataset",
"batch_size": 1,
"shuffle": false,
"num_workers": 1
},
"inferer": {
"_target_": "SlidingWindowInferer",
"roi_size": [
96,
96,
96
],
"sw_batch_size": "@displayable_configs#sw_batch_size",
"overlap": "@displayable_configs#sw_overlap",
"padding_mode": "replicate",
"mode": "gaussian",
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')"
},
"postprocessing": {
"_target_": "Compose",
"transforms": [
{
"_target_": "Activationsd",
"keys": "pred",
"softmax": true
},
{
"_target_": "AsDiscreted",
"keys": "pred",
"argmax": true
},
{
"_target_": "Invertd",
"keys": "pred",
"transform": "@preprocessing",
"orig_keys": "image",
"meta_key_postfix": "meta_dict",
"nearest_interp": true,
"to_tensor": true
},
{
"_target_": "SaveImaged",
"keys": "pred",
"meta_keys": "pred_meta_dict",
"output_dir": "@output_dir"
}
]
},
"handlers": [
{
"_target_": "CheckpointLoader",
"load_path": "$@bundle_root + '/models/' + @modelname",
"load_dict": {
"model": "@network"
}
},
{
"_target_": "StatsHandler",
"iteration_log": false
}
],
"evaluator": {
"_target_": "SupervisedEvaluator",
"device": "@device",
"val_data_loader": "@dataloader",
"network": "@network",
"inferer": "@inferer",
"postprocessing": "@postprocessing",
"val_handlers": "@handlers",
"amp": true
},
"initialize": [
"$setattr(torch.backends.cudnn, 'benchmark', True)"
],
"run": [
"$@evaluator.run()"
]
}
|