from skorch.core import *
from skorch.layers import *
from skorch.vision import *
from skorch.loss import DiceLoss
from skorch.model import create_body
nonlinearity = partial(F.relu, inplace=True)


def unet(n_classes: int=2, feature_scale:int=1, imsize:Sizes = (1024,1024), arch:Callable = models.resnet18,
         pretrained:bool=True, cut: Union[int, Callable] = None, debug=False):
    model = models.OriginalUNet(n_classes=n_classes, feature_scale=feature_scale, debug=debug)
    return model


class DACblock(nn.Module):
    def __init__(self, channel):
        super(DACblock, self).__init__()
        self.dilate1 = init_default(nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1))
        self.dilate2 = init_default(nn.Conv2d(channel, channel, kernel_size=3, dilation=3, padding=3))
        self.dilate3 = init_default(nn.Conv2d(channel, channel, kernel_size=3, dilation=5, padding=5))
        self.conv1x1 = init_default(nn.Conv2d(channel, channel, kernel_size=1, dilation=1, padding=0))

    def forward(self, x):
        dilate1_out = nonlinearity(self.dilate1(x))
        dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
        dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
        dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
        out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
        return out


class SPPblock(nn.Module):
    def __init__(self, in_channels):
        super(SPPblock, self).__init__()
        self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2)
        self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3)
        self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5)
        self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6)

        self.conv = init_default(nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0))

    def forward(self, x):
        self.in_channels, h, w = x.size(1), x.size(2), x.size(3)
        self.layer1 = F.upsample(self.conv(self.pool1(x)), size=(h, w), mode='bilinear')
        self.layer2 = F.upsample(self.conv(self.pool2(x)), size=(h, w), mode='bilinear')
        self.layer3 = F.upsample(self.conv(self.pool3(x)), size=(h, w), mode='bilinear')
        self.layer4 = F.upsample(self.conv(self.pool4(x)), size=(h, w), mode='bilinear')

        out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4, x], 1)

        return out


class DecoderBlock(nn.Module):
    def __init__(self, in_channels, n_filters):
        super(DecoderBlock, self).__init__()

        self.conv1 = init_default(nn.Conv2d(in_channels, in_channels // 4, 1))
        self.norm1 = nn.BatchNorm2d(in_channels // 4)
        self.relu1 = nonlinearity

        self.deconv2 = init_default(nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1))
        self.norm2 = nn.BatchNorm2d(in_channels // 4)
        self.relu2 = nonlinearity

        self.conv3 = init_default(nn.Conv2d(in_channels // 4, n_filters, 1))
        self.norm3 = nn.BatchNorm2d(n_filters)
        self.relu3 = nonlinearity

    def forward(self, x):
        x = self.conv1(x)
        x = self.norm1(x)
        x = self.relu1(x)
        x = self.deconv2(x)
        x = self.norm2(x)
        x = self.relu2(x)
        x = self.conv3(x)
        x = self.norm3(x)
        x = self.relu3(x)
        return x


class CE_Net(nn.Module):
    def __init__(self, num_classes=2, num_channels=3, debug=False):
        super(CE_Net, self).__init__()

        self.debug = debug
        filters = [64, 128, 256, 512]
        resnet = models.resnet34(pretrained=True)
        self.firstconv = resnet.conv1
        self.firstbn = resnet.bn1
        self.firstrelu = resnet.relu
        self.firstmaxpool = resnet.maxpool
        self.encoder1 = resnet.layer1
        self.encoder2 = resnet.layer2
        self.encoder3 = resnet.layer3
        self.encoder4 = resnet.layer4

        self.dblock = DACblock(512)
        self.spp = SPPblock(512)

        self.decoder4 = DecoderBlock(516, filters[2])
        self.decoder3 = DecoderBlock(filters[2], filters[1])
        self.decoder2 = DecoderBlock(filters[1], filters[0])
        self.decoder1 = DecoderBlock(filters[0], filters[0])

        self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1)
        self.finalrelu1 = nonlinearity
        self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
        self.finalrelu2 = nonlinearity
        self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1)

    def forward(self, x):
        # Encoder
        if self.debug:print("input shape: ", x.shape)
        x = self.firstconv(x)
        x = self.firstbn(x)
        x = self.firstrelu(x)
        x = self.firstmaxpool(x)
        e1 = self.encoder1(x)
        if self.debug: print("encoder1 shape: ", e1.shape)
        e2 = self.encoder2(e1)
        if self.debug: print("encoder2 shape: ", e2.shape)
        e3 = self.encoder3(e2)
        if self.debug: print("encoder3 shape: ", e3.shape)
        e4 = self.encoder4(e3)
        if self.debug: print("encoder4 shape: ", e4.shape)

        # Center
        e4 = self.dblock(e4)
        if self.debug: print("dblock shape: ", e4.shape)
        e4 = self.spp(e4)
        if self.debug: print("spp shape: ", e4.shape)

        # Decoder
        d4 = self.decoder4(e4) + e3
        if self.debug: print("decoder4 shape: ", d4.shape)
        d3 = self.decoder3(d4) + e2
        if self.debug: print("decoder3 shape: ", d3.shape)
        d2 = self.decoder2(d3) + e1
        if self.debug: print("decoder2 shape: ", d2.shape)
        d1 = self.decoder1(d2)
        if self.debug: print("decoder1 shape: ", d1.shape)

        out = self.finaldeconv1(d1)
        if self.debug: print("finaldeconv1 shape: ", out.shape)
        out = self.finalrelu1(out)
        out = self.finalconv2(out)
        out = self.finalrelu2(out)
        out = self.finalconv3(out)
        return out


frrn_specs_dic = {
    "A": {
        "encoder": [[3, 96, 2], [4, 192, 4], [2, 384, 8], [2, 384, 16]],
        "decoder": [[2, 192, 8], [2, 192, 4], [2, 48, 2]],
    },
    "B": {
        "encoder": [[3, 96, 2], [4, 192, 4], [2, 384, 8], [2, 384, 16], [2, 384, 32]],
        "decoder": [[2, 192, 16], [2, 192, 8], [2, 192, 4], [2, 48, 2]],
    },
}


class conv2DGroupNormRelu(nn.Module):
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        n_groups=16,
    ):
        super(conv2DGroupNormRelu, self).__init__()

        conv_mod = nn.Conv2d(int(in_channels),
                             int(n_filters),
                             kernel_size=k_size,
                             padding=padding,
                             stride=stride,
                             bias=bias,
                             dilation=dilation,)

        self.cgr_unit = nn.Sequential(conv_mod,
                                      nn.GroupNorm(n_groups, int(n_filters)),
                                      nn.ReLU(inplace=True))

    def forward(self, inputs):
        outputs = self.cgr_unit(inputs)
        return outputs


class conv2DGroupNormRelu(nn.Module):
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        n_groups=16,
    ):
        super(conv2DGroupNormRelu, self).__init__()

        conv_mod = nn.Conv2d(int(in_channels),
                             int(n_filters),
                             kernel_size=k_size,
                             padding=padding,
                             stride=stride,
                             bias=bias,
                             dilation=dilation,)

        self.cgr_unit = nn.Sequential(conv_mod,
                                      nn.GroupNorm(n_groups, int(n_filters)),
                                      nn.ReLU(inplace=True))

    def forward(self, inputs):
        outputs = self.cgr_unit(inputs)
        return outputs


class conv2DGroupNorm(nn.Module):
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        n_groups=16,
    ):
        super(conv2DGroupNorm, self).__init__()

        conv_mod = nn.Conv2d(int(in_channels),
                             int(n_filters),
                             kernel_size=k_size,
                             padding=padding,
                             stride=stride,
                             bias=bias,
                             dilation=dilation,)

        self.cg_unit = nn.Sequential(conv_mod,
                                     nn.GroupNorm(n_groups, int(n_filters)))

    def forward(self, inputs):
        outputs = self.cg_unit(inputs)
        return outputs


class conv2DBatchNorm(nn.Module):
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        with_bn=True,
    ):
        super(conv2DBatchNorm, self).__init__()

        conv_mod = nn.Conv2d(int(in_channels),
                             int(n_filters),
                             kernel_size=k_size,
                             padding=padding,
                             stride=stride,
                             bias=bias,
                             dilation=dilation,)

        if with_bn:
            self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)))
        else:
            self.cb_unit = nn.Sequential(conv_mod)

    def forward(self, inputs):
        outputs = self.cb_unit(inputs)
        return outputs


class conv2DBatchNormRelu(nn.Module):
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        with_bn=True,
    ):
        super(conv2DBatchNormRelu, self).__init__()

        conv_mod = nn.Conv2d(int(in_channels),
                             int(n_filters),
                             kernel_size=k_size,
                             padding=padding,
                             stride=stride,
                             bias=bias,
                             dilation=dilation,)

        if with_bn:
            self.cbr_unit = nn.Sequential(conv_mod,
                                          nn.BatchNorm2d(int(n_filters)),
                                          nn.ReLU(inplace=True))
        else:
            self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True))

    def forward(self, inputs):
        outputs = self.cbr_unit(inputs)
        return outputs


class conv2DGroupNormRelu(nn.Module):
    def __init__(
        self,
        in_channels,
        n_filters,
        k_size,
        stride,
        padding,
        bias=True,
        dilation=1,
        n_groups=16,
    ):
        super(conv2DGroupNormRelu, self).__init__()

        conv_mod = nn.Conv2d(int(in_channels),
                             int(n_filters),
                             kernel_size=k_size,
                             padding=padding,
                             stride=stride,
                             bias=bias,
                             dilation=dilation,)

        self.cgr_unit = nn.Sequential(conv_mod,
                                      nn.GroupNorm(n_groups, int(n_filters)),
                                      nn.ReLU(inplace=True))

    def forward(self, inputs):
        outputs = self.cgr_unit(inputs)
        return outputs


class RU(nn.Module):
    """
    Residual Unit for FRRN
    """

    def __init__(self,
                 channels,
                 kernel_size=3,
                 strides=1,
                 group_norm=False,
                 n_groups=None):
        super(RU, self).__init__()
        self.group_norm = group_norm
        self.n_groups = n_groups

        if self.group_norm:
            self.conv1 = conv2DGroupNormRelu(
               channels, channels, k_size=kernel_size,
               stride=strides, padding=1, bias=False,n_groups=self.n_groups)
            self.conv2 = conv2DGroupNorm(
                channels, channels, k_size=kernel_size,
                stride=strides, padding=1, bias=False,n_groups=self.n_groups)

        else:
            self.conv1 = conv2DBatchNormRelu(
               channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False,)
            self.conv2 = conv2DBatchNorm(
                channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False,)

    def forward(self, x):
        incoming = x
        x = self.conv1(x)
        x = self.conv2(x)
        return x + incoming


class FRRU(nn.Module):
    """
    Full Resolution Residual Unit for FRRN
    """

    def __init__(self,
                 prev_channels,
                 out_channels,
                 scale,
                 group_norm=False,
                 n_groups=None):
        super(FRRU, self).__init__()
        self.scale = scale
        self.prev_channels = prev_channels
        self.out_channels = out_channels
        self.group_norm = group_norm
        self.n_groups = n_groups


        if self.group_norm:
            conv_unit = conv2DGroupNormRelu
            self.conv1 = conv_unit(
                prev_channels + 32, out_channels, k_size=3,
                stride=1, padding=1, bias=False, n_groups=self.n_groups
            )
            self.conv2 = conv_unit(
                out_channels, out_channels, k_size=3,
                stride=1, padding=1, bias=False, n_groups=self.n_groups
            )

        else:
            conv_unit = conv2DBatchNormRelu
            self.conv1 = conv_unit(prev_channels + 32, out_channels, k_size=3,
                                   stride=1, padding=1, bias=False,)
            self.conv2 = conv_unit(out_channels, out_channels, k_size=3,
                                   stride=1, padding=1, bias=False,)

        self.conv_res = nn.Conv2d(out_channels, 32, kernel_size=1, stride=1, padding=0)

    def forward(self, y, z):
        x = torch.cat([y, nn.MaxPool2d(self.scale, self.scale)(z)], dim=1)
        y_prime = self.conv1(x)
        y_prime = self.conv2(y_prime)

        x = self.conv_res(y_prime)
        # upsample_size = torch.Size([_s * self.scale for _s in y_prime.shape[-2:]])
        # x = F.upsample(x, size=upsample_size, mode="nearest")
        upsample_size = torch.Size([_s for _s in z.shape[-2:]])
        x = F.interpolate(x, size=upsample_size, mode="nearest")
        z_prime = z + x

        return y_prime, z_prime


class DRRU(nn.Module):
    """
    Full Resolution Residual Unit for FRRN
    """

    def __init__(self,
                 prev_channels,
                 out_channels,
                 scale,
                 group_norm=False,
                 n_groups=None,
                 dilation=1):
        super(DRRU, self).__init__()
        self.scale = scale
        self.prev_channels = prev_channels
        self.out_channels = out_channels
        self.group_norm = group_norm
        self.n_groups = n_groups


        if self.group_norm:
            conv_unit = conv2DGroupNormRelu
            self.conv1 = conv_unit(
                prev_channels + 32, out_channels, k_size=3,
                stride=1, padding=1, bias=False, n_groups=self.n_groups, dilation=dilation
            )
            self.conv2 = conv_unit(
                out_channels, out_channels, k_size=3,
                stride=1, padding=1, bias=False, n_groups=self.n_groups, dilation=dilation
            )

        else:
            conv_unit = conv2DBatchNormRelu
            self.conv1 = conv_unit(prev_channels + 32, out_channels, k_size=3,
                                   stride=1, padding=dilation, bias=False, dilation=dilation)
            self.conv2 = conv_unit(out_channels, out_channels, k_size=3,
                                   stride=1, padding=dilation, bias=False, dilation=dilation)

        self.conv_res = nn.Conv2d(out_channels, 32, kernel_size=1, stride=1, padding=0)

    def forward(self, y, z):
        x = torch.cat([y, nn.MaxPool2d(self.scale, self.scale)(z)], dim=1)
        y_prime = self.conv1(x)
        y_prime = self.conv2(y_prime)

        x = self.conv_res(y_prime)
        upsample_size = torch.Size([_s  for _s in z.shape[-2:]])
        x = F.interpolate(x, size=upsample_size, mode="nearest")
        z_prime = z + x

        return y_prime, z_prime


class FRRN(nn.Module):
    """
    Full Resolution Residual Networks for Semantic Segmentation
    URL: https://arxiv.org/abs/1611.08323

    References:
    1) Original Author's code: https://github.com/TobyPDE/FRRN
    2) TF implementation by @kiwonjoon: https://github.com/hiwonjoon/tf-frrn
    """

    def __init__(self,
                 n_classes=21,
                 model_type=None,
                 group_norm=False,
                 n_groups=16,
                 debug=False):
        super(FRRN, self).__init__()
        self.n_classes = n_classes
        self.model_type = model_type
        self.group_norm = group_norm
        self.n_groups = n_groups
        self.debug = debug

        if self.group_norm:
            self.conv1 = conv2DGroupNormRelu(3, 48, 5, 1, 2)
        else:
            self.conv1 = conv2DBatchNormRelu(3, 48, 5, 1, 2)

        self.up_residual_units = []
        self.down_residual_units = []
        for i in range(3):
            self.up_residual_units.append(RU(channels=48,
                                             kernel_size=3,
                                             strides=1,
                                             group_norm=self.group_norm,
                                             n_groups=self.n_groups))
            self.down_residual_units.append(RU(channels=48,
                                               kernel_size=3,
                                               strides=1,
                                               group_norm=self.group_norm,
                                               n_groups=self.n_groups))

        self.up_residual_units = nn.ModuleList(self.up_residual_units)
        self.down_residual_units = nn.ModuleList(self.down_residual_units)

        self.split_conv = nn.Conv2d(
            48, 32, kernel_size=1, padding=0, stride=1, bias=False
        )

        # each spec is as (n_blocks, channels, scale)
        self.encoder_frru_specs = frrn_specs_dic[self.model_type]["encoder"]

        self.decoder_frru_specs = frrn_specs_dic[self.model_type]["decoder"]

        encoder_dilation = [1,1,2,2,4]
        decoder_dilation = [2,2,1,1]

        # encoding
        prev_channels = 48
        self.encoding_frrus = {}
        for i, (n_blocks, channels, scale) in enumerate(self.encoder_frru_specs):
            for block in range(n_blocks):
                key = "_".join(map(str, ["encoding_frru", n_blocks, channels, scale, block]))
                if self.model_type == 'B' and encoder_dilation is not None:
                    setattr(self, key, DRRU(prev_channels=prev_channels,
                                            out_channels=channels,
                                            scale=scale,
                                            group_norm=self.group_norm,
                                            n_groups=self.n_groups,
                                            dilation = encoder_dilation[i]), )
                else:
                    setattr(self, key, FRRU(prev_channels=prev_channels,
                                            out_channels=channels,
                                            scale=scale,
                                            group_norm=self.group_norm,
                                            n_groups=self.n_groups),)
            prev_channels = channels

        # decoding
        self.decoding_frrus = {}
        for i, (n_blocks, channels, scale) in enumerate(self.decoder_frru_specs):
            # pass through decoding FRRUs
            for block in range(n_blocks):
                key = "_".join(map(str, ["decoding_frru", n_blocks, channels, scale, block]))
                if self.model_type == 'B' and decoder_dilation is not None:
                    setattr(self, key, DRRU(prev_channels=prev_channels,
                                            out_channels=channels,
                                            scale=scale,
                                            group_norm=self.group_norm,
                                            n_groups=self.n_groups,
                                            dilation=decoder_dilation[i]), )
                else:
                    setattr(self, key, FRRU(prev_channels=prev_channels,
                                            out_channels=channels,
                                            scale=scale,
                                            group_norm=self.group_norm,
                                            n_groups=self.n_groups),)
            prev_channels = channels

        self.merge_conv = nn.Conv2d(
            prev_channels + 32, 48, kernel_size=1, padding=0, stride=1, bias=False
        )

        self.classif_conv = nn.Conv2d(
            48, self.n_classes, kernel_size=1, padding=0, stride=1, bias=True
        )

    def forward(self, x):
        if self.debug: print('inputs', x.shape)
        # pass to initial conv
        x = self.conv1(x)
        if self.debug: print('conv1', x.shape)

        # pass through residual units
        for i in range(3):
            x = self.up_residual_units[i](x)
        if self.debug: print('up_residual_units', x.shape)

        # divide stream
        y = x
        z = self.split_conv(x)
        if self.debug: print('split_conv', z.shape)

        prev_channels = 48
        # encoding
        encoder_size = []
        encoder_size.append([y.shape[-2:][0], y.shape[-2:][1]])
        for n_blocks, channels, scale in self.encoder_frru_specs:
            # maxpool bigger feature map
            y_pooled = F.max_pool2d(y, stride=2, kernel_size=2, padding=0)
            # pass through encoding FRRUs
            for block in range(n_blocks):
                key = "_".join(
                    map(str, ["encoding_frru", n_blocks, channels, scale, block])
                )
                y, z = getattr(self, key)(y_pooled, z)
            encoder_size.append([y.shape[-2:][0], y.shape[-2:][1]])
            if self.debug: print(key, y.shape, z.shape)
            prev_channels = channels

        encoder_size = encoder_size[::-1]
        # decoding
        for i, (n_blocks, channels, scale) in enumerate(self.decoder_frru_specs):
            # bilinear upsample smaller feature map
            upsample_size = torch.Size(encoder_size[i+1])#torch.Size([_s * 2 for _s in y.size()[-2:]])
            y_upsampled = F.upsample(y, size=upsample_size, mode="bilinear", align_corners=True)
            # pass through decoding FRRUs
            for block in range(n_blocks):
                key = "_".join(
                    map(str, ["decoding_frru", n_blocks, channels, scale, block])
                )
                # print("Incoming FRRU Size: ", key, y_upsampled.shape, z.shape)
                y, z = getattr(self, key)(y_upsampled, z)
                if self.debug: print(key, y.shape, z.shape)
                # print("Outgoing FRRU Size: ", key, y.shape, z.shape)
            prev_channels = channels

        # merge streams
        upsample_size = torch.Size(encoder_size[-1])
        x = torch.cat([F.upsample(y, size=upsample_size, mode="bilinear", align_corners=True), z], dim=1)
        x = self.merge_conv(x)

        if self.debug: print('merge_conv', x.shape)

        # pass through residual units
        for i in range(3):
            x = self.down_residual_units[i](x)
        if self.debug: print('down_residual_units', x.shape)

        # final 1x1 conv to get classification
        x = self.classif_conv(x)
        if self.debug: print('out ', x.shape)

        return x


class UNet3D(nn.Module):
    def __init__(self, in_channel, n_classes):
        self.in_channel = in_channel
        self.n_classes = n_classes
        super(UNet3D, self).__init__()
        self.ec0 = self.encoder(self.in_channel, 32, bias=False, batchnorm=False)
        self.ec1 = self.encoder(32, 64, bias=False, batchnorm=False)
        self.ec2 = self.encoder(64, 64, bias=False, batchnorm=False)
        self.ec3 = self.encoder(64, 128, bias=False, batchnorm=False)
        self.ec4 = self.encoder(128, 128, bias=False, batchnorm=False)
        self.ec5 = self.encoder(128, 256, bias=False, batchnorm=False)
        self.ec6 = self.encoder(256, 256, bias=False, batchnorm=False)
        self.ec7 = self.encoder(256, 512, bias=False, batchnorm=False)

        self.pool0 = nn.MaxPool3d(2)
        self.pool1 = nn.MaxPool3d(2)
        self.pool2 = nn.MaxPool3d(2)

        self.dc9 = self.decoder(512, 512, kernel_size=2, stride=2, bias=False)
        self.dc8 = self.decoder(256 + 512, 256, kernel_size=3, stride=1, padding=1, bias=False)
        self.dc7 = self.decoder(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
        self.dc6 = self.decoder(256, 256, kernel_size=2, stride=2, bias=False)
        self.dc5 = self.decoder(128 + 256, 128, kernel_size=3, stride=1, padding=1, bias=False)
        self.dc4 = self.decoder(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
        self.dc3 = self.decoder(128, 128, kernel_size=2, stride=2, bias=False)
        self.dc2 = self.decoder(64 + 128, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.dc1 = self.decoder(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.dc0 = self.decoder(64, n_classes, kernel_size=1, stride=1, bias=False)

    def encoder(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
                bias=True, batchnorm=False):
        if batchnorm:
            layer = nn.Sequential(
                nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
                nn.BatchNorm2d(out_channels),
                nn.ReLU())
        else:
            layer = nn.Sequential(
                nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
                nn.ReLU())
        return layer

    def decoder(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
                output_padding=0, bias=True):
        layer = nn.Sequential(
            nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride,
                               padding=padding, output_padding=output_padding, bias=bias),
            nn.ReLU())
        return layer

    def forward(self, x):
        print('进入网络时大小', np.shape(x))
        e0 = self.ec0(x)
        syn0 = self.ec1(e0)
        e1 = self.pool0(syn0)
        e2 = self.ec2(e1)
        syn1 = self.ec3(e2)
        del e0, e1, e2
        print('第一层conv后', np.shape(syn1))
        e3 = self.pool1(syn1)
        e4 = self.ec4(e3)
        syn2 = self.ec5(e4)
        del e3, e4
        print('第二层conv后', np.shape(syn2))
        e5 = self.pool2(syn2)
        e6 = self.ec6(e5)
        e7 = self.ec7(e6)
        del e5, e6
        print('e7:', np.shape(e7))
        print('self.dc9(e7):', np.shape(self.dc9(e7)))
        print('syn2:', np.shape(syn2))
        an = torch.reshape(self.dc9(e7), [1, -1, syn2.shape[2], syn2.shape[3], syn2.shape[4]])
        # print('an:', np.shape(an))
        # an = self.dc9(e7)
        # d9 = torch.cat(an.reshape(an.shape[0], an.shape[1], syn2.shape[2], syn2.shape[3], syn2.shape[4]).shape, syn2)
        d9 = torch.cat(an, syn2)          # self.dc9(e7)
        del e7, syn2

        d8 = self.dc8(d9)
        d7 = self.dc7(d8)
        del d9, d8

        d6 = torch.cat((self.dc6(d7), syn1))
        del d7, syn1

        d5 = self.dc5(d6)
        d4 = self.dc4(d5)
        del d6, d5

        d3 = torch.cat((self.dc3(d4), syn0))
        del d4, syn0

        d2 = self.dc2(d3)
        d1 = self.dc1(d2)
        del d3, d2

        d0 = self.dc0(d1)
        return d0


def conv_layer3D(ni:int, nf:int, ks:int=3, stride:int=1, dilation=1, padding:int=None, bias:bool=None, is_1d:bool=False,
               norm_type:Optional[NormType]=NormType.Batch,  use_activ:bool=True, leaky:float=None,
               transpose:bool=False, init:Callable=nn.init.kaiming_normal_, self_attention:bool=False):
    "Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers."
    if padding is None: padding = (ks-1)//2 if not transpose else 0
    bn = norm_type in (NormType.Batch, NormType.BatchZero)
    if bias is None: bias = not bn
    conv_func = nn.ConvTranspose3d if transpose else nn.Conv1d if is_1d else nn.Conv3d
    conv = init_default(conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding, dilation=dilation), init)
    if   norm_type==NormType.Weight:   conv = weight_norm(conv)
    elif norm_type==NormType.Spectral: conv = spectral_norm(conv)
    layers = [conv]
    if bn: layers.append((nn.BatchNorm3d)(nf))
    if use_activ: layers.append(relu(True, leaky=leaky))
    if self_attention: layers.append(SelfAttention(nf))
    return nn.Sequential(*layers)


class EncoderBlock3D(nn.Module):
    def __init__(self, in_channels, out_channels, ks: int = 3, stride: int = 1, padding: int = 0,
                 dropout_p:float=0, leaky=None, pool:bool=True, pooling_stride=2):
        super(EncoderBlock3D, self).__init__()
        self.block = nn.Sequential(conv_layer3D(in_channels, out_channels, ks=ks, stride=stride, padding=padding, leaky=leaky),
                                   conv_layer3D(out_channels, out_channels, ks=ks, stride=stride, padding=padding, leaky=leaky),
                                   nn.Dropout3d(p=dropout_p))
        self.pool = None
        if pool: self.pool = nn.MaxPool3d(kernel_size=2, stride=pooling_stride)

    def forward(self, input):
        if self.pool:
            input = self.pool(input)
        out = self.block(input)

        return out


def conv3d_trans(ni:int, nf:int, ks:int=2, stride:int=2, padding:int=0, bias=False) -> nn.ConvTranspose3d:
    "Create `nn.ConvTranspose2d` layer."
    return nn.ConvTranspose3d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias)


class DecoderBlock3D(nn.Module):
    def __init__(self, in_channels, out_channels, ks:int=3, stride:int=1, padding:int=0,
                 dropout_p=0, upconv_type:Upconv_Type=Upconv_Type.ConvTranspose, leaky=None):
        super(DecoderBlock3D, self).__init__()
        self.block = nn.Sequential(conv_layer3D(in_channels, out_channels, ks=ks, stride=stride, padding=padding, leaky=leaky),
                                   conv_layer3D(out_channels, out_channels, ks=ks, stride=stride, padding=padding, leaky=leaky),
                                   nn.Dropout3d(p=dropout_p))
        if upconv_type == Upconv_Type.ConvTranspose:
            self.upconv = conv3d_trans(in_channels, out_channels)
        elif upconv_type == Upconv_Type.Upsampling:
            self.upconv = nn.Sequential(nn.UpsamplingBilinear3d(scale_factor=2),
                                        conv_layer3D(in_channels, out_channels, ks=1, use_activ=False))
        elif upconv_type == Upconv_Type.PixelShuffle:
            self.upconv = PixelShuffle_ICNR(in_channels, out_channels, leaky=leaky)

    def forward(self, input, skip):
        input = self.upconv(input)
        if input.shape[-3:] != skip.shape[-3:]:
            input = F.interpolate(input, skip.shape[-3:], mode='nearest')
        return self.block(torch.cat([input, skip], 1))


class UNet_3D(nn.Module):
    """
    refrence "U-Net: Convolutional Networks for Biomedical Image Segmentation"
    """
    def __init__(self, feature_scale=1, n_classes=1, in_channels=1, debug=False):
        super(UNet_3D, self).__init__()
        self.in_channels = in_channels
        self.feature_scale = feature_scale
        self.debug = debug

        coder_channels = [64, 128, 256, 512, 1024]          # [64, 128, 256, 512, 1024]  #[16, 32, 64, 128, 256]
        coder_channels = [int(x / self.feature_scale) for x in coder_channels]

        self.encoder1 = EncoderBlock3D(self.in_channels, coder_channels[0], ks=3, stride=1, padding=1, pool=False)
        self.encoder2 = EncoderBlock3D(coder_channels[0], coder_channels[1], ks=3, stride=1, padding=1)
        self.encoder3 = EncoderBlock3D(coder_channels[1], coder_channels[2], ks=3, stride=1, padding=1)
        self.encoder4 = EncoderBlock3D(coder_channels[2], coder_channels[3], ks=3, stride=1, padding=1)
        self.middle_conv = EncoderBlock3D(coder_channels[3], coder_channels[4], ks=3, stride=1, padding=1,dropout_p=0.5)
        self.decoder4 = DecoderBlock3D(coder_channels[4], coder_channels[3], ks=3, stride=1, padding=1,
                                     upconv_type=Upconv_Type.ConvTranspose)
        self.decoder3 = DecoderBlock3D(coder_channels[3], coder_channels[2], ks=3, stride=1, padding=1,
                                     upconv_type=Upconv_Type.ConvTranspose)
        self.decoder2 = DecoderBlock3D(coder_channels[2], coder_channels[1], ks=3, stride=1, padding=1,
                                     upconv_type=Upconv_Type.ConvTranspose)
        self.decoder1 = DecoderBlock3D(coder_channels[1], coder_channels[0], ks=3, stride=1, padding=1,
                                     upconv_type=Upconv_Type.ConvTranspose)
        self.final = conv_layer3D(coder_channels[0], n_classes, ks=1, use_activ=False)

        classify_encode= [EncoderBlock3D(coder_channels[4], coder_channels[3], ks=3, stride=1, padding=1),
                          EncoderBlock3D(coder_channels[3], coder_channels[2], ks=3, stride=1, padding=1),
                          EncoderBlock3D(coder_channels[2], coder_channels[1], ks=3, stride=1, padding=1),
                          conv_layer3D(coder_channels[1], coder_channels[0], ks=1, use_activ=False)]
        self.Ccoder = nn.Sequential(*classify_encode)
        self.fc = nn.Sequential(*[Flatten(), nn.Linear(coder_channels[0], n_classes)])

    def forward(self, inputs):
        # =======================Segment Network================================ #
        if self.debug:print('inputs', inputs.shape)
        conv1 = self.encoder1(inputs)
        if self.debug:print('encoder1', conv1.shape)
        conv2 = self.encoder2(conv1)
        if self.debug:print('encoder2', conv2.shape)
        conv3 = self.encoder3(conv2)
        if self.debug:print('encoder3', conv3.shape)
        conv4 = self.encoder4(conv3)
        if self.debug:print('encoder4', conv4.shape)
        center = self.middle_conv(conv4)
        if self.debug:print('middle_conv', center.shape)

        up4 = self.decoder4(center, conv4)
        if self.debug:print('decoder4', up4.shape)
        up3 = self.decoder3(up4, conv3)
        if self.debug:print('decoder3', up3.shape)
        up2 = self.decoder2(up3, conv2)
        if self.debug:print('decoder2', up2.shape)
        up1 = self.decoder1(up2, conv1)
        if self.debug:print('decoder1', up1.shape)

        sfinal = self.final(up1)
        if self.debug: print('final', sfinal.shape)

        # =======================intact predict Network================================ #
        # Cfinal = self.Ccoder(center)
        # Cfinal = self.fc(Cfinal)
        # final = (sfinal, Cfinal)
        return sfinal


def main():
    # target_size = (614, 512)
    # # model = unet(n_classes=3, feature_scale=2, imsize=target_size, self_attention=False, debug=True)
    # model = FRRN(n_classes=3, model_type='B', group_norm=False, n_groups=16, debug=True)
    #
    # x = torch.ones((1, 3, target_size[0], target_size[1]))
    # y = model.forward(x)
    #
    #summary(net, (3,512,512), batch_size=1)
    a = torch.rand(2,1,128,128,128).cuda()
    model = UNet_3D(n_classes=1, feature_scale=2, in_channels=1,  debug=True)
    model = model.cuda()
    b = model.forward(a)


if __name__ == '__main__':
    main()