File size: 3,826 Bytes
54f43fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
{
    "imports": [
        "$import glob",
        "$import os"
    ],
    "bundle_root": "$os.getcwd()",
    "output_dir": ".",
    "dataset_dir": "CoNSeP/Test/Images",
    "num_cpus": 6,
    "batch_size": 1,
    "sw_batch_size": 16,
    "hovernet_mode": "fast",
    "patch_size": 256,
    "out_size": 164,
    "device": "$torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
    "network_def": {
        "_target_": "HoVerNet",
        "mode": "@hovernet_mode",
        "adapt_standard_resnet": true,
        "in_channels": 3,
        "out_classes": 5
    },
    "network": "$@network_def.to(@device)",
    "preprocessing": {
        "_target_": "Compose",
        "transforms": [
            {
                "_target_": "LoadImaged",
                "keys": "image",
                "reader": "$monai.data.PILReader",
                "converter": "$lambda x: x.convert('RGB')"
            },
            {
                "_target_": "EnsureChannelFirstd",
                "keys": "image"
            },
            {
                "_target_": "CastToTyped",
                "keys": "image",
                "dtype": "float32"
            },
            {
                "_target_": "ScaleIntensityRanged",
                "keys": "image",
                "a_min": 0.0,
                "a_max": 255.0,
                "b_min": 0.0,
                "b_max": 1.0,
                "clip": true
            }
        ]
    },
    "data_list": "$[{'image': image} for image in glob.glob(os.path.join(@dataset_dir, '*.png'))]",
    "dataset": {
        "_target_": "Dataset",
        "data": "@data_list",
        "transform": "@preprocessing"
    },
    "dataloader": {
        "_target_": "DataLoader",
        "dataset": "@dataset",
        "batch_size": "@batch_size",
        "shuffle": false,
        "num_workers": "@num_cpus",
        "pin_memory": true
    },
    "inferer": {
        "_target_": "SlidingWindowHoVerNetInferer",
        "roi_size": "@patch_size",
        "sw_batch_size": "@sw_batch_size",
        "overlap": "$1.0 - float(@out_size) / float(@patch_size)",
        "padding_mode": "constant",
        "cval": 0,
        "progress": true,
        "extra_input_padding": "$((@patch_size - @out_size) // 2,) * 4"
    },
    "postprocessing": {
        "_target_": "Compose",
        "transforms": [
            {
                "_target_": "FlattenSubKeysd",
                "keys": "pred",
                "sub_keys": [
                    "horizontal_vertical",
                    "nucleus_prediction",
                    "type_prediction"
                ],
                "delete_keys": true
            },
            {
                "_target_": "HoVerNetInstanceMapPostProcessingd",
                "sobel_kernel_size": 21,
                "marker_threshold": 0.4,
                "marker_radius": 2
            },
            {
                "_target_": "HoVerNetNuclearTypePostProcessingd"
            },
            {
                "_target_": "FromMetaTensord",
                "keys": [
                    "image"
                ]
            }
        ]
    },
    "handlers": [
        {
            "_target_": "CheckpointLoader",
            "load_path": "$os.path.join(@bundle_root, 'models', 'model.pt')",
            "map_location": "@device",
            "load_dict": {
                "model": "@network"
            }
        }
    ],
    "evaluator": {
        "_target_": "SupervisedEvaluator",
        "device": "@device",
        "val_data_loader": "@dataloader",
        "val_handlers": "@handlers",
        "network": "@network",
        "postprocessing": "@postprocessing",
        "inferer": "@inferer",
        "amp": true
    },
    "evaluating": [
        "$setattr(torch.backends.cudnn, 'benchmark', True)",
        "$@evaluator.run()"
    ]
}