import torch
import torch.nn as nn
import torch.nn.functional as F

from models.osmnet import L2NormDense, SSLCNet, SSLC, input_norm
from models.cbam import CBAM, ChannelGate, SpatialGate, ChannelPool, BasicConv
from models.scnet import SCBottleneck

class SSLC_cbam(SSLC):

    def __init__(self, **kwargs):
        super(SSLC_cbam, SSLC_cbam).__init__(self, **kwargs)
        self.cbam = CBAM(9, reduction_ratio=1)

    def forward(self, input):
        input = self.input_norm(input)
        feature = self.backbone(input)
        feature = self.cbam(feature)
        L2norm_feature = L2NormDense()(feature)
        
        return feature, L2norm_feature

class SSLC_cam(SSLC):

    def __init__(self, **kwargs):
        super(SSLC_cam, SSLC_cam).__init__(self, **kwargs)
        self.cam = ChannelGate(9, reduction_ratio=4)

    def forward(self, input):
        input = self.input_norm(input)
        feature = self.backbone(input)
        feature = self.cam(feature)
        L2norm_feature = L2NormDense()(feature)
        
        return feature, L2norm_feature

class SSLC_sam(SSLC):

    def __init__(self, **kwargs):
        super(SSLC_sam, SSLC_sam).__init__(self, **kwargs)
        self.sam = SpatialGate()

    def forward(self, input):
        input = self.input_norm(input)
        feature = self.backbone(input)
        feature = self.sam(feature)
        L2norm_feature = L2NormDense()(feature)
        
        return feature, L2norm_feature

class SSLC_cbam_before_reduct(SSLCNet):

    def __init__(self, **kwargs):
        super(
            SSLC_cbam_before_reduct, SSLC_cbam_before_reduct
        ).__init__(self, **kwargs)
        self.cbam = CBAM(160, reduction_ratio=4)

    def input_norm(self, x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1)
        sp = torch.std(flat, dim=1) + 1e-7
        return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.detach().unsqueeze(
            -1).unsqueeze(-1).unsqueeze(1).expand_as(x)

    def forward(self, input):
        input = self.input_norm(input)
        
        output1 = self.conv1(self.input_norm(input))
        output2 = self.conv2_x(output1)
        output = self.conv3_x(output2)
        output = self.conv_cat(torch.cat([output, output1], 1))
        output = self.conv4_x(output)

        output3 = torch.cat([output, output2], 1)
        output3 = self.cbam(output3)

        feature = self.conv2(output3)

        L2norm_feature = L2NormDense()(feature)
        
        return feature, L2norm_feature

class Block2ConvBR(nn.Module):

    def __init__(self, in_channels, out_channels):
        super().__init__()

        # residual function
        self.block = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 
            kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 
            kernel_size=3, padding=1, bias=False), 
            nn.BatchNorm2d(out_channels), 
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.block(x)

class DDFN(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.in_channels = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = Block2ConvBR(32, 32)
        self.conv3_x = Block2ConvBR(32, 64)
        self.conv4_x = Block2ConvBR(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class BlockRes2ConvBR(nn.Module):

    def __init__(self, in_channels, out_channels):
        super().__init__()

        # residual function
        self.residual_function = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 
            kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 
            kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels)
        )

        # shortcut
        self.shortcut = nn.Sequential()

        # the shortcut output dimension is not the same with residual function
        # use 1*1 convolution to match the dimension
        if in_channels !=  out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 
                kernel_size=1, bias=False),
                nn.BatchNorm2d(out_channels)
            )

    def forward(self, x):
        return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))

class ResDDFN(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.in_channels = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBR(32, 32)
        self.conv3_x = BlockRes2ConvBR(32, 64)
        self.conv4_x = BlockRes2ConvBR(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature
        
class ResDDFNa(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.in_channels = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBR(32, 32)
        self.conv3_x = BlockRes2ConvBR(32, 64)
        self.conv4_x = BlockRes2ConvBR(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, out_ch, kernel_size=1,
                bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class ResDDFN256(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.in_channels = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBR(32, 64)
        self.conv3_x = BlockRes2ConvBR(64, 128)
        self.conv4_x = BlockRes2ConvBR(128, 256)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(256, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class ResDDFN4(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.in_channels = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBR(32, 32)
        self.conv3_x = BlockRes2ConvBR(32, 64)
        self.conv4_x = BlockRes2ConvBR(64, 128)
        self.conv5_x = BlockRes2ConvBR(128, 256)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(256, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        input = self.conv5_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class BlockRes2ConvBRCAM(BlockRes2ConvBR):

    def __init__(self, in_channels, out_channels):
        super(BlockRes2ConvBRCAM, self).__init__(
            in_channels, out_channels
        )
        self.cam = ChannelGate(out_channels, reduction_ratio=1)

    def forward(self, x):
        return nn.ReLU(inplace=True)(
            self.cam(self.residual_function(x))
            + self.shortcut(x)
        )

class ResCAMDDFN(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.in_channels = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBRCAM(32, 32)
        self.conv3_x = BlockRes2ConvBRCAM(32, 64)
        self.conv4_x = BlockRes2ConvBRCAM(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class SCBlock(nn.Module):

    def __init__(
        self, planes, stride=1, padding=1, 
        dilation=1, groups=1, pooling_r=4
    ):
        super(SCBlock, SCBlock).__init__(self)
        self.k2 = nn.Sequential(
                    nn.AvgPool2d(kernel_size=pooling_r, stride=pooling_r), 
                    nn.Conv2d(planes, planes, kernel_size=3, stride=1,
                        padding=padding, dilation=dilation,
                        groups=groups, bias=False),
                    nn.BatchNorm2d(planes),
                    )
        self.k3 = nn.Sequential(
                    nn.Conv2d(planes, planes, kernel_size=3, stride=1,
                        padding=padding, dilation=dilation,
                        groups=groups, bias=False),
                    nn.BatchNorm2d(planes),
                    )
        self.k4 = nn.Sequential(
                    nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                        padding=padding, dilation=dilation,
                        groups=groups, bias=False),
                    nn.BatchNorm2d(planes),
                    )

    def forward(self, x):
        identity = x

        out = torch.sigmoid(
            torch.add(identity, F.interpolate(self.k2(x), identity.size()[2:]))
        ) # sigmoid(identity + k2)
        out = torch.mul(self.k3(x), out) # k3 * sigmoid(identity + k2)
        out = self.k4(out) # k4

        return out

class SCBottleneck(nn.Module):
    pooling_r = 4 

    def __init__(self, inplanes, planes, stride=1, downsample=None,
                cardinality=1, bottleneck_width=32,
                avd=False, dilation=1, is_first=False,
                norm_layer=nn.BatchNorm2d):
        super(SCBottleneck, self).__init__()
        self.conv1_a = nn.Conv2d(
            inplanes, planes, kernel_size=1, bias=False
        )
        self.bn1_a = norm_layer(planes)

        self.conv1_b = nn.Conv2d(
            inplanes, planes, kernel_size=1, bias=False
        )
        self.bn1_b = norm_layer(planes)
        
        self.sc = SCBlock(
            planes, stride=stride, dilation=dilation, 
        )

    def forward(self, x):

        out = self.conv1_a(x)
        out = self.bn1_a(out)
        out = nn.ReLU(inplace=True)(out)
        out = self.sc(out)

        residual = self.conv1_b(x)
        residual = self.bn1_b(residual)
        residual = nn.ReLU(inplace=True)(residual)

        out += residual
        out = nn.ReLU(inplace=True)(out)

        return out

class ResSCDDFN(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBRCAM(32, 32)
        self.conv3_x = BlockRes2ConvBRCAM(32, 64)
        self.conv4_x = SCBottleneck(64, 128, norm_layer=nn.BatchNorm2d)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class ResDDFNnoReduct(nn.Module):

    def __init__(self):
        super().__init__()

        self.in_channels = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBR(32, 32)
        self.conv3_x = BlockRes2ConvBR(32, 64)
        self.conv4_x = BlockRes2ConvBR(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(128)
        )

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class ResDDFNCAM3SAM(nn.Module):
    def __init__(self, out_ch=9):
        super().__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBRCAM(32, 32)
        self.conv3_x = BlockRes2ConvBRCAM(32, 64)
        self.conv4_x = BlockRes2ConvBRCAM(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )
        self.sam = SpatialGate()

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        input = self.conv2(input)
        feature = self.sam(input)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class SSLCCAM3SAM(nn.Module):
    def __init__(self, out_ch=9):
        super().__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBRCAM(32, 32)
        self.conv3_x = BlockRes2ConvBRCAM(32, 64)
        self.conv4_x = BlockRes2ConvBRCAM(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(160, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )
        self.sam = SpatialGate()

        self.conv_cat = nn.Sequential(
            nn.Conv2d(96, 64, kernel_size=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True))

    def forward(self, input):
        output1 = self.conv1(input_norm(input))
        output2 = self.conv2_x(output1)
        output = self.conv3_x(output2)
        output = self.conv_cat(torch.cat([output, output1], 1))
        output = self.conv4_x(output)
        output = self.conv2(torch.cat([output, output2], 1))
        feature = self.sam(output)

        L2norm_feature = L2NormDense()(feature)
        return feature, L2norm_feature

class SSLC_SAMMask(nn.Module):

    def __init__(self, out_ch=9):
        super().__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBRCAM(32, 32)
        self.conv3_x = BlockRes2ConvBRCAM(32, 64)
        self.conv4_x = BlockRes2ConvBRCAM(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(160, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

        self.conv_cat = nn.Sequential(
            nn.Conv2d(96, 64, kernel_size=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True))

        self.compress = ChannelPool()
        self.spatial = BasicConv(2, 1, 7, stride=1, padding=3, relu=False)

    def forward(self, input):
        output1 = self.conv1(input_norm(input))
        output2 = self.conv2_x(output1)
        output = self.conv3_x(output2)
        output = self.conv_cat(torch.cat([output, output1], 1))
        output = self.conv4_x(output)

        cat_output = torch.cat([output, output2], 1)

        feature = self.conv2(cat_output)

        L2norm_feature = L2NormDense()(feature)

        x_compress = self.compress(cat_output)
        x_out = self.spatial(x_compress)
        mask = F.sigmoid(x_out)

        masked_featrue = L2norm_feature * mask

        return L2norm_feature, masked_featrue

class ResDDFNCAM3SAMMask(nn.Module):
    def __init__(self, out_ch=9):
        super().__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.conv2_x = BlockRes2ConvBRCAM(32, 32)
        self.conv3_x = BlockRes2ConvBRCAM(32, 64)
        self.conv4_x = BlockRes2ConvBRCAM(64, 128)

        self.conv2 = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(128, out_ch, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

        self.compress = ChannelPool()
        self.spatial = BasicConv(2, 1, 7, stride=1, padding=3, relu=False)

    def forward(self, input):
        input = input_norm(input)

        input = self.conv1(input)
        input = self.conv2_x(input)
        input = self.conv3_x(input)
        input = self.conv4_x(input)
        feature = self.conv2(input)

        x_compress = self.compress(input)
        x_out = self.spatial(x_compress)
        mask = F.sigmoid(x_out)

        L2norm_feature = L2NormDense()(feature)

        masked_featrue = L2norm_feature * mask

        return L2norm_feature, masked_featrue

class DeepResDDFN(nn.Module):

    def __init__(self, out_ch=9, 
        ch_list=[], block_nums=3, ret_ip=False
    ):
        super().__init__()

        self.in_channels = 32
        self.ret_ip = ret_ip

        if len(ch_list) == 0:
            rate = 1
            for i in range(1, block_nums+1):
                ch_list.append(rate*self.in_channels)
                rate *= 2

        self.ft_channels = ch_list[-1]

        self.extend = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        in_ch = self.in_channels
        for ids, r in enumerate(ch_list):
            self.add_module(f"block{ids}", BlockRes2ConvBR(in_ch, r))
            in_ch = r

        self.block_list = []
        for i in range(20):
            layer = getattr(self, f"block{i}", None)
            if layer is None:
                break
            self.block_list.append(layer)

        self.reduce = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(self.ft_channels, out_ch, 
                kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

    def forward(self, input):
        x = input_norm(input)

        in_ = self.extend(x)
        
        tmp = []
        for block in self.block_list:
            out_ = block(in_)
            in_ = out_
            tmp.append(in_)

        ret = None

        if not self.ret_ip:

            feature = self.reduce(out_)
            L2norm_feature = L2NormDense()(feature)
            ret = (feature, L2norm_feature)

        else:

            ret = tuple(tmp)

        return ret

class DeepResDDFN5(DeepResDDFN):

    def __init__(self, out_ch=9):
        super().__init__(
            out_ch=out_ch, block_nums=5
        )

class DeepResDDFN6a(DeepResDDFN):

    def __init__(self, out_ch=9):
        super().__init__(
            out_ch=out_ch, 
            ch_list=[32, 64, 64, 128, 128, 256]
        )

class DeepResDDFN6b(DeepResDDFN):

    def __init__(self, out_ch=9, ret_ip=False):
        super().__init__(
            out_ch=out_ch, 
            ch_list=[32, 32, 64, 64, 128, 128], 
            ret_ip=ret_ip
        )
        self.ret_ip = ret_ip

    def forward(self, x):
        ret = super().forward(x)

        if self.ret_ip:
            tmp = list(ret)
            ret = (tmp[1], tmp[3], tmp[5])

        return ret

class DeepResDDFN6(DeepResDDFN):

    def __init__(self, out_ch=9):
        super().__init__(
            out_ch=out_ch, block_nums=6
        )

class DeepResDDFN4(DeepResDDFN):

    def __init__(self, out_ch=9):
        super().__init__(
            out_ch=out_ch, block_nums=4
        )

class Resstruct(nn.Module):

    def __init__(
        self, in_ch, out_ch, block
    ):
        super().__init__()

        self.shortcut = nn.Sequential()
        if in_ch != out_ch:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_ch, out_ch, kernel_size=1, bias=False),
                nn.BatchNorm2d(out_ch)
            )

        self.block = block

    def forward(self, x):

        out = self.block(x)
        res = self.shortcut(x)

        out = res + out
        return out

class DeepRRDDFN(nn.Module):

    def __init__(self, out_ch=9, 
        ch_list=[], ret_ip=False
    ):
        super().__init__()

        self.in_channels = 32
        self.ret_ip = ret_ip

        # change to list of list
        tmp = []
        for c in ch_list:
            if type(c) == list:
                tmp.append(c)
            else:
                tmp.append([c])
        ch_list = tmp

        self.ft_channels = ch_list[-1][-1]

        self.extend = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True)
        )

        self.reduce = nn.Sequential(
            nn.Dropout(0.1),
            nn.Conv2d(self.ft_channels, out_ch, 
                kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_ch)
        )

        self.block_list = []
        in_ch = self.in_channels
        for ids, r in enumerate(ch_list):
            self.add_module(
                f"block{ids}", 
                self._make_block(in_ch, r)
            )
            module = getattr(self, f"block{ids}")
            self.block_list.append(module)
            in_ch = r[-1]

    def _make_block(self, in_ch: int, out_ch: list) -> nn.Module:
        b_list = []
        in_c = in_ch
        for o in out_ch:
            b_list.append(Block2ConvBR(in_c, o))
            in_c = o

        bs_block = nn.Sequential(*b_list)
        block = Resstruct(in_ch, out_ch[-1], block=bs_block)

        return block

    def forward(self, input):
        x = input_norm(input)

        in_ = self.extend(x)

        tmp = []
        for b in self.block_list:
            out_ = b(in_)
            in_ = out_
            tmp.append(out_)

        ret = None

        if not self.ret_ip:

            feature = self.reduce(out_)
            L2norm_feature = L2NormDense()(feature)
            ret = (feature, L2norm_feature)

        else:

            ret = tuple(tmp)

        return ret

class DeepRRDDFN6(DeepRRDDFN):

    def __init__(self, out_ch=9, 
        ch_list=[], ret_ip=False
    ):
        super().__init__(out_ch, 
        [[32, 32], [64, 64], [128, 128]], ret_ip)

if __name__ == "__main__":

    # net = DDFN()
    # net = ResCAMDDFN()
    # net = ResSCDDFN()
    # net = ResDDFNnoReduct()
    # net = SSLCCAM3SAM()
    # net = SSLC_SAMMask()
    # net = ResDDFNCAM3SAMMask()
    # net = DeepResDDFN5().to("cuda")
    net = DeepRRDDFN6().to("cuda")
    # net = Resstruct(10, 20, BlockRes2ConvBR(10, 20)).cuda()
    print(net)

    in_ = torch.zeros((16, 1, 128, 128)).to("cuda")

    output, _ = net(in_)
    print(output.shape)
