File size: 21,467 Bytes
66427d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
import numpy as np
import torch
import torch.nn as nn
from torch.nn import LazyConv3d, MaxPool3d, BatchNorm3d

from torch.nn.modules import Module
from torch.nn.modules import ReLU
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.instancenorm import InstanceNorm3d
from custom_modules import LazyConvDropoutNormNonlinCat, ModularConvLayers, LazyConvBottleneckLayer


class modular_hdunet_encoder(Module):
    """HDUnet encoder with modular parameters
    """

    def __init__(self, base_num_filter, num_blocks_per_stage, num_stages, pool_kernel_sizes, conv_kernel_sizes,
                 padding='same', conv_type: Module = LazyConvDropoutNormNonlinCat, norm_type: Module = InstanceNorm3d,
                 dropout_type: Module = Dropout, dropout_rate=0, expansion_rate=1, pooling_type: Module = MaxPool3d,
                 pooling_kernel_size=(2, 2, 2), nonlin: Module = ReLU):
        """Object creation

        :param base_num_filter: base number of filters (output channels).
        :param num_blocks_per_stage: number of convolutional block per stage (can be different for each stage).
        :param num_stages: number of stages.
        :param pool_kernel_sizes: last conv layer is strided => we use this parameter to set its kernel size and stride
        (can be different for each stage).
        Please note that this parameter is retrieved in our modular decoder and used as the scale factor (upsampling).
        :param conv_kernel_sizes: kernel size (can be different for each stage).
        :param padding: padding used, default is 'same'.
        :param conv_type: type of convolution used, default is a lazy convolution using:
            - dropout;
            - normalization;
            - nonlinear activation function;
            - concatenation.
        Must be a torch Module (should be a custom Module).
        :param norm_type: normalization type that is used, default is 3D instance normalization. Must be a torch Module.
        :param dropout_type: dropout type that is used, default is Dropout. Must be a torch Module.
        :param dropout_rate: dropout rate used by dropout, default is 0.
        :param expansion_rate: expansion rate used to modify the number of filters, default is 1.
        :param pooling_type: type of pooling used, default is 3D max pooling. Must be a torch Module.
        :param pooling_kernel_size: kernel size of the pooling layer, default is (2, 2, 2).
        :param nonlin: the nonlinear activation function to use, default is ReLU. Must be a torch Module.
        """
        super(modular_hdunet_encoder, self).__init__()
        self.base_num_filter = base_num_filter
        self.num_blocks_per_stage = num_blocks_per_stage
        self.num_stages = num_stages
        self.pool_kernel_sizes = pool_kernel_sizes
        self.conv_kernel_sizes = conv_kernel_sizes
        self.padding = padding
        self.conv_type = conv_type
        self.norm_type = norm_type
        self.dropout_type = dropout_type
        self.dropout_rate = dropout_rate
        self.nonlin = nonlin
        self.expansion_rate = expansion_rate
        self.pooling_type = pooling_type
        self.pooling_kernel_size = pooling_kernel_size

        self.stages = []
        self.pooling_stages = []
        self.end_stages = []
        self.stage_output_features = []
        self.stage_pool_kernel_size = []
        self.stage_conv_kernel_size = []

        assert len(pool_kernel_sizes) == len(conv_kernel_sizes) == num_stages

        if not isinstance(num_blocks_per_stage, (list, tuple)):
            num_blocks_per_stage = [num_blocks_per_stage] * num_stages
        else:
            assert len(num_blocks_per_stage) == num_stages

        self.num_blocks_per_stage = num_blocks_per_stage

        current_out_channels = 0
        # This is where we manage the number of steps
        for stage in range(num_stages):
            current_out_channels = np.round((expansion_rate ** stage) * self.base_num_filter)
            current_num_blocks_per_stage = num_blocks_per_stage[stage]
            current_pool_kernel_size = pool_kernel_sizes[stage]
            current_kernel_size = conv_kernel_sizes[stage]

            current_stage = ModularConvLayers(output_channels=current_out_channels,
                                              num_conv_layers=current_num_blocks_per_stage,
                                              kernel_size=current_kernel_size, padding=padding, conv_type=conv_type,
                                              norm_type=norm_type, dropout_type=dropout_type, dropout_rate=dropout_rate,
                                              nonlin=self.nonlin)

            self.pooling_stages.append(pooling_type(kernel_size=current_pool_kernel_size))

            # BatchNorm3d added statically here (to be similar to the original model)
            current_end_stage = nn.Sequential(
                LazyConv3d(out_channels=current_out_channels, kernel_size=current_pool_kernel_size,
                           stride=current_pool_kernel_size, padding=0), nonlin(), BatchNorm3d(current_out_channels)
            )

            self.stages.append(current_stage)
            self.end_stages.append(current_end_stage)
            self.stage_output_features.append(current_out_channels)
            self.stage_pool_kernel_size.append(current_pool_kernel_size)
            self.stage_conv_kernel_size.append(current_kernel_size)

        self.stages = nn.ModuleList(self.stages)
        self.pooling_stages = nn.ModuleList(self.pooling_stages)
        self.end_stages = nn.ModuleList(self.end_stages)
        self.output_features = current_out_channels
        #self.features_reduction = nn.Conv1d(current_out_channels, current_out_channels//2, 3, stride=2)
    def forward(self, x):
        """Forward inputs through the layer

        :param x: the input to forward.
        :return: an array containing the results of the input at each stage of the down-sampling (before concatenation)
         which will be used in the decoder later on. The last value of the array is the very last value provided by the
         encoder (after concatenation) and will be used in the bottleneck. Therefore, provided x is the number of stages
         there are x + 1 values in the array.
        """
        skips = []

        for i, stage in enumerate(self.stages):
            x = stage(x)
            buff = self.pooling_stages[i](x)
            tmp = self.end_stages[i](x)
            skips.append(x)
            x = torch.cat([tmp, buff], dim=1)
        skips.append(x)
        # skips[-1]=self.features_reduction(skips[-1])
        return skips


class modular_hdunet_bottleneck(Module):
    """HDUnet bottleneck with modular parameters
    """

    def __init__(self, base_num_filter, num_stages, conv_kernel_sizes, padding='same', num_steps_bottleneck=4,
                 conv_type: Module = LazyConvBottleneckLayer, norm_type: Module = InstanceNorm3d,
                 dropout_type: Module = Dropout, dropout_rate=0, expansion_rate=1, nonlin: Module = ReLU):
        """Object creation

        :param base_num_filter: base number of filters (output channels).
        :param num_stages: number of stages of the encoder.
        :param conv_kernel_sizes: kernel size (can be different for each stage).
        :param padding: padding used, default is 'same'.
        :param num_steps_bottleneck: number of steps in the bottleneck, default is 4.
        :param conv_type: type of convolution used, default is a lazy convolution using:
            - dropout;
            - normalization;
            - nonlinear activation function.
        Must be a torch Module (should be a custom Module).
        :param norm_type: normalization type that is used, default is 3D instance normalization. Must be a torch Module.
        :param dropout_type: dropout type that is used, default is Dropout. Must be a torch Module.
        :param dropout_rate: dropout rate used by dropout, default is 0.
        :param expansion_rate: expansion rate used to modify the number of filters, default is 1.
        :param nonlin: the nonlinear activation function to use, default is ReLU. Must be a torch Module.
        """
        super(modular_hdunet_bottleneck, self).__init__()
        self.base_num_filter = base_num_filter
        self.conv_kernel_sizes = conv_kernel_sizes
        self.padding = padding
        self.num_steps_bottleneck = num_steps_bottleneck
        self.conv_type = conv_type
        self.norm_type = norm_type
        self.dropout_type = dropout_type
        self.dropout_rate = dropout_rate
        self.expansion_rate = expansion_rate
        self.nonlin = nonlin

        encoder_output_features = (expansion_rate ** num_stages * base_num_filter)

        self.stages = []
        self.step_conv_kernel_size = []

        assert len(conv_kernel_sizes) == num_steps_bottleneck

        # This is where we manage the number of steps
        for step in range(num_steps_bottleneck):
            current_kernel_size = conv_kernel_sizes[step]
            self.stages.append(
                conv_type(output_channels=encoder_output_features, kernel_size=current_kernel_size, padding=padding,
                          norm_type=norm_type, dropout_type=dropout_type,
                          dropout_rate=dropout_rate, nonlin=self.nonlin)
            )

        self.stages = nn.ModuleList(self.stages)
        

    def forward(self, x):
        """Forward inputs through the layer

        :param x: the input to forward. At each step the input is concatenated with
        its result in order to produce the input of the next bottleneck layer.
        :return: the input forwarded through the layer.
        """
        for stage in self.stages:
            buff = stage(x)
            x = torch.cat([buff, x], dim=1)
        return x


class modular_hdunet_decoder(Module):
    """HDUnet decoder with modular parameters
    """

    def __init__(self, previous, base_num_filter, num_blocks_per_stage=None, padding='same',
                 conv_type: Module = LazyConvDropoutNormNonlinCat, norm_type: Module = InstanceNorm3d,
                 dropout_type: Module = Dropout, dropout_rate=0, expansion_rate=1, nonlin: Module = ReLU):
        """Object creation

        :param previous: the encoder which was previously used in the model. It is useful to retrieve some information
        that do not change such as the number of stages or the kernel sizes of each stages per example.
        :param base_num_filter: base number of filters (output channels).
        :param num_blocks_per_stage: number of convolutional block per stage (can be different for each stage).
        If set to None, it will be same than the encoder (reversed).
        :param padding: padding used, default is 'same'.
        :param conv_type: type of convolution used, default is a lazy convolution using:
            - dropout;
            - normalization;
            - nonlinear activation function;
            - concatenation.
        Must be a torch Module (should be a custom Module).
        :param norm_type: normalization type that is used, default is 3D instance normalization. Must be a torch Module.
        :param dropout_type: dropout type that is used, default is Dropout. Must be a torch Module.
        :param dropout_rate: dropout rate used by dropout, default is 0.
        :param expansion_rate: expansion rate used to modify the number of filters, default is 1.
        :param nonlin: the nonlinear activation function to use, default is ReLU. Must be a torch Module.
        """
        super(modular_hdunet_decoder, self).__init__()
        self.base_num_filter = base_num_filter
        self.num_blocks_per_stage = num_blocks_per_stage
        self.padding = padding
        self.conv_type = conv_type
        self.norm_type = norm_type
        self.dropout_type = dropout_type
        self.dropout_rate = dropout_rate
        self.expansion_rate = expansion_rate
        self.nonlin = nonlin

        # We had to provide the skips using the set function since we are using Lazy layer and torchsummary does not
        # allow us to use an array as a parameter for the forward function.
        self.skips = []

        # We retrieve the 'architectural' information that were provided to the encoder
        # in order to have a consistent decoder
        previous_stages = previous.stages
        previous_stage_output_features = previous.stage_output_features
        previous_stage_pool_kernel_size = previous.stage_pool_kernel_size
        previous_stage_conv_kernel_size = previous.stage_conv_kernel_size

        # We have the same as the first stage given that bottleneck is done separately
        self.num_stages = len(previous_stages)

        # If num_blocks_per_stage is set to None, it will be same than the encoder (reversed).
        if num_blocks_per_stage is None:
            self.num_blocks_per_stage = previous.num_blocks_per_stage[:][::-1]

        if not isinstance(self.num_blocks_per_stage, (list, tuple)):
            self.num_blocks_per_stage = [self.num_blocks_per_stage] * self.num_stages
        else:
            assert len(self.num_blocks_per_stage) == self.num_stages

        # There should be the same number of stages since we are doing the bottleneck and the encoder parts separately
        assert len(self.num_blocks_per_stage) == len(previous.num_blocks_per_stage)

        self.stage_output_features = previous_stage_output_features
        self.stage_pool_kernel_size = previous_stage_pool_kernel_size[::-1]
        self.stage_conv_kernel_size = previous_stage_conv_kernel_size[::-1]

        self.stages = []

        number_half_layer = self.num_stages + 1
        # This is where we manage the number of steps
        for stage in range(self.num_stages):
            current_out_channels = np.round(
                (expansion_rate ** (2 * number_half_layer - (stage + number_half_layer) - 1)) * self.base_num_filter)
            current_num_blocks_per_stage = self.num_blocks_per_stage[stage]
            current_pool_kernel_size = self.stage_pool_kernel_size[stage]
            current_kernel_size = self.stage_conv_kernel_size[stage]
            self.stages.append(
                ModularConvLayers(output_channels=current_out_channels, kernel_size=current_kernel_size,
                                  padding=padding, pool_size=current_pool_kernel_size, conv_type=conv_type,
                                  norm_type=norm_type, dropout_type=dropout_type, dropout_rate=dropout_rate,
                                  num_conv_layers=current_num_blocks_per_stage, nonlin=self.nonlin, upsampling=True))

        self.stages = nn.ModuleList(self.stages)

    def forward(self, x):
        """Forward inputs through the layer

        :param x: the input to forward.
        :return: the input forwarded through the layer.
        """
        for i, stage in enumerate(self.stages):
            x = stage(x, self.skips[i + 1])
        return x

    def set_skips(self, skips):
        self.skips = skips


# We did our best we could to allow a maximum of modularity while keeping a certain sense in the parameters
# we propose to modify. Nevertheless, we cannot guarantee that the model will work no matter what parameters you pass.
# So if you change some parameters and the result is not what you expected, be careful to understand how it works
# If you want to change the type of convolutional layer used, we advise you to check how the existing ones have
# been implemented.
# “With great power comes great responsibility” Uncle Ben.
class modular_hdunet(Module):
    """HDUnet model with modular parameters
    """

    def __init__(self, base_num_filter, num_blocks_per_stage_encoder, num_stages,
                 pool_kernel_sizes, conv_kernel_sizes, conv_bottleneck_kernel_sizes, num_blocks_per_stage_decoder=None,
                 padding='same', num_steps_bottleneck=4, conv_type: Module = LazyConvDropoutNormNonlinCat,
                 bottleneck_conv_type: Module = LazyConvBottleneckLayer, norm_type: Module = InstanceNorm3d,
                 dropout_type: Module = Dropout, dropout_rate=0, expansion_rate=1, pooling_type: Module = MaxPool3d,
                 pooling_kernel_size=(2, 2, 2), nonlin: Module = ReLU):
        """Object creation

        :param base_num_filter: base number of filters (output channels).
        :param num_blocks_per_stage_encoder: number of convolutional block per stage for the encoder
        (can be different for each stage).
        :param num_stages: number of stages.
        :param pool_kernel_sizes: last convolutional layer of the encoder is strided => we use this parameter
        to set its kernel size and stride (can be different for each stage).
        :param conv_kernel_sizes: kernel size for the encoder and decoder (can be different for each stage).
        :param conv_bottleneck_kernel_sizes: kernel size for the bottleneck (can be different for each stage).
        :param padding: padding used, default is 'same'.
        :param num_blocks_per_stage_decoder: number of convolutional block per stage for the decoder
        (can be different for each stage). Default is None (it will be the same as the encoder).
        :param num_steps_bottleneck: number of steps in the bottleneck, default is 4.
        :param conv_type: type of convolution used, default is a lazy convolution using:
            - dropout;
            - normalization;
            - nonlinear activation function;
            - concatenation.
        Must be a torch Module (should be a custom Module).
        :param bottleneck_conv_type: type of convolution used in the bottleneck, default is a lazy convolution using:
            - dropout;
            - normalization;
            - nonlinear activation function.
        Must be a torch Module (should be a custom Module).
        :param norm_type: normalization type that is used, default is 3D instance normalization. Must be a torch Module.
        :param dropout_type: dropout type that is used, default is Dropout. Must be a torch Module.
        :param dropout_rate: dropout rate used by dropout, default is 0.
        :param expansion_rate: expansion rate used to modify the number of filters, default is 1.
        :param pooling_type: type of pooling used, default is 3D max pooling. Must be a torch Module.
        :param pooling_kernel_size: kernel size of the pooling layer, default is (2, 2, 2).
        :param nonlin: the nonlinear activation function to use, default is ReLU. Must be a torch Module.
        """
        super(modular_hdunet, self).__init__()
        self.nonlin = nonlin
        self.encoder = modular_hdunet_encoder(base_num_filter=base_num_filter,
                                              num_blocks_per_stage=num_blocks_per_stage_encoder, num_stages=num_stages,
                                              pool_kernel_sizes=pool_kernel_sizes, conv_kernel_sizes=conv_kernel_sizes,
                                              padding=padding, conv_type=conv_type, norm_type=norm_type,
                                              dropout_type=dropout_type, dropout_rate=dropout_rate,
                                              expansion_rate=expansion_rate, pooling_type=pooling_type,
                                              pooling_kernel_size=pooling_kernel_size, nonlin=self.nonlin)

        self.bottleNeck = modular_hdunet_bottleneck(base_num_filter=base_num_filter, num_stages=num_stages,
                                                    conv_kernel_sizes=conv_bottleneck_kernel_sizes, padding=padding,
                                                    num_steps_bottleneck=num_steps_bottleneck,
                                                    conv_type=bottleneck_conv_type, norm_type=norm_type,
                                                    dropout_type=dropout_type, dropout_rate=dropout_rate,
                                                    expansion_rate=expansion_rate, nonlin=self.nonlin)

        self.decoder = modular_hdunet_decoder(previous=self.encoder, base_num_filter=base_num_filter,
                                              num_blocks_per_stage=num_blocks_per_stage_decoder, padding=padding,
                                              conv_type=conv_type, norm_type=norm_type, dropout_type=dropout_type,
                                              dropout_rate=dropout_rate, expansion_rate=expansion_rate,
                                              nonlin=self.nonlin)
        
        self.last_block = nn.Sequential(
            LazyConv3d(out_channels=1, kernel_size=(3, 3, 3), padding='same'),
            nonlin()
        )

    def forward(self, x):
        """Forward inputs through the layer
        (using the forward functions of the encoder/bottleneck/decoder)

        :param x: the input to forward.
        :return: the input forwarded through the layer.
        """
        skips = self.encoder(x)
        tmp = self.bottleNeck(skips[-1])

        # After providing the last value of skips to the bottleneck,
        # we replace it with the value computed in the bottleneck
        skips = skips[:-1]
        skips.append(tmp)

        # Since the first value that'll be used in the decoder is actually the last one of the array, we reverse it.
        skips = skips[::-1]
        self.decoder.set_skips(skips)
        x = skips[0]
        x = self.decoder(x)
        return self.last_block(x)