File size: 3,906 Bytes
ef02275 058ac3a ef02275 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
{
"imports": [
"$import glob",
"$import os"
],
"bundle_root": "/workspace/MONAI_Bundle/swin_unetr_btcv_segmentation/",
"output_dir": "$@bundle_root + '/eval'",
"dataset_dir": "/dataset/dataset0",
"datalist": "$list(sorted(glob.glob(@dataset_dir + '/imagesTs/*.nii.gz')))",
"device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"network_def": {
"_target_": "SwinUNETR",
"spatial_dims": 3,
"img_size": 96,
"in_channels": 1,
"out_channels": 14,
"feature_size": 48,
"use_checkpoint": true
},
"network": "$@network_def.to(@device)",
"preprocessing": {
"_target_": "Compose",
"transforms": [
{
"_target_": "LoadImaged",
"keys": "image",
"reader": "ITKReader"
},
{
"_target_": "EnsureChannelFirstd",
"keys": "image"
},
{
"_target_": "Orientationd",
"keys": "image",
"axcodes": "RAS"
},
{
"_target_": "Spacingd",
"keys": "image",
"pixdim": [
1.5,
1.5,
2.0
],
"mode": "bilinear"
},
{
"_target_": "ScaleIntensityRanged",
"keys": "image",
"a_min": -175,
"a_max": 250,
"b_min": 0.0,
"b_max": 1.0,
"clip": true
},
{
"_target_": "EnsureTyped",
"keys": "image"
}
]
},
"dataset": {
"_target_": "Dataset",
"data": "$[{'image': i} for i in @datalist]",
"transform": "@preprocessing"
},
"dataloader": {
"_target_": "DataLoader",
"dataset": "@dataset",
"batch_size": 1,
"shuffle": false,
"num_workers": 4
},
"inferer": {
"_target_": "SlidingWindowInferer",
"roi_size": [
96,
96,
96
],
"sw_batch_size": 4,
"overlap": 0.5
},
"postprocessing": {
"_target_": "Compose",
"transforms": [
{
"_target_": "Activationsd",
"keys": "pred",
"softmax": true
},
{
"_target_": "Invertd",
"keys": "pred",
"transform": "@preprocessing",
"orig_keys": "image",
"meta_key_postfix": "meta_dict",
"nearest_interp": false,
"to_tensor": true
},
{
"_target_": "AsDiscreted",
"keys": "pred",
"argmax": true
},
{
"_target_": "SaveImaged",
"keys": "pred",
"meta_keys": "pred_meta_dict",
"output_dir": "@output_dir"
}
]
},
"handlers": [
{
"_target_": "CheckpointLoader",
"load_path": "$@bundle_root + '/models/model.pt'",
"load_dict": {
"model": "@network"
}
},
{
"_target_": "StatsHandler",
"iteration_log": false
}
],
"evaluator": {
"_target_": "SupervisedEvaluator",
"device": "@device",
"val_data_loader": "@dataloader",
"network": "@network",
"inferer": "@inferer",
"postprocessing": "@postprocessing",
"val_handlers": "@handlers",
"amp": true
},
"evaluating": [
"$setattr(torch.backends.cudnn, 'benchmark', True)",
"$@evaluator.run()"
]
}
|