from torch import nn
import torch


# class SRCNN(nn.Module):
#     def __init__(self, num_channels=1):
#         super(SRCNN, self).__init__()
#         self.conv1 = nn.Conv2d(num_channels, 64, kernel_size=9, padding=9 // 2)
#         self.conv2 = nn.Conv2d(64, 32, kernel_size=5, padding=5 // 2)
#         self.conv3 = nn.Conv2d(32, 1, kernel_size=5, padding=5 // 2)
#         self.relu = nn.ReLU(inplace=True)
#
#     def forward(self, x):
#         x = self.relu(self.conv1(x))
#         x = self.relu(self.conv2(x))
#         x = self.conv3(x)
#         return x


## ************** 下面是Unet ****************  ###
## ************** 下面是Unet ****************  ###
# class VGGBlock(nn.Module):
#     def __init__(self, in_channels, middle_channels, out_channels):
#         super().__init__()
#         self.relu = nn.ReLU(inplace=True)
#         self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)
#         # self.bn1 = nn.BatchNorm2d(middle_channels)
#         self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)
#         # self.bn2 = nn.BatchNorm2d(out_channels)
#
#     def forward(self, x):
#         out = self.conv1(x)
#         # out = self.bn1(out)
#         out = self.relu(out)
#
#         out = self.conv2(out)
#         # out = self.bn2(out)
#         out = self.relu(out)
#
#         return out
#
# class UNet(nn.Module):
#     def __init__(self, input_channels=1, deep_supervision=False,**kwargs):
#         super().__init__()
#
#         nb_filter = [64, 128, 256, 512,1024]
#
#         self.pool = nn.MaxPool2d(2, 2)
#         self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
#
#         self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])
#         self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
#         self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
#         self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
#         self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])
#
#         self.conv3_1 = VGGBlock(nb_filter[3]+nb_filter[4], nb_filter[3], nb_filter[3])
#         self.conv2_2 = VGGBlock(nb_filter[2]+nb_filter[3], nb_filter[2], nb_filter[2])
#         self.conv1_3 = VGGBlock(nb_filter[1]+nb_filter[2], nb_filter[1], nb_filter[1])
#         self.conv0_4 = VGGBlock(nb_filter[0]+nb_filter[1], nb_filter[0], nb_filter[0])
#
#         self.final = nn.Conv2d(nb_filter[0], 1, kernel_size=1)
#
#     def forward(self, input):
#         x0_0 = self.conv0_0(input)
#         x1_0 = self.conv1_0(self.pool(x0_0))
#         x2_0 = self.conv2_0(self.pool(x1_0))
#         x3_0 = self.conv3_0(self.pool(x2_0))
#         x4_0 = self.conv4_0(self.pool(x3_0))
#
#         # x3_1 = self.conv3_1(torch.cat((x3_0, self.up(x4_0)), 1))
#         # x2_2 = self.conv2_2(torch.cat((x2_0, self.up(x3_1)), 1))
#         # x1_3 = self.conv1_3(torch.cat((x1_0, self.up(x2_2)), 1))
#         # x0_4 = self.conv0_4(torch.cat((x0_0, self.up(x1_3)), 1))
#
#         x3_1 = self.conv3_1(torch.cat((x3_0, self.up(x4_0)), 1))
#         x2_2 = self.conv2_2(torch.cat((x2_0, self.up(x3_1)), 1))
#         x1_3 = self.conv1_3(torch.cat((x1_0, self.up(x2_2)), 1))
#         x0_4 = self.conv0_4(torch.cat((x0_0, self.up(x1_3)), 1))
#
#         output = self.final(x0_4)
#         return output
# ## Unet ###
#
#
# ###****** ResAttUnet  *********###
class resconv_block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(resconv_block, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            # nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True),
            nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            # nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True)
        )
        self.Conv_1x1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        residual = self.Conv_1x1(x)
        x = self.conv(x)

        return residual + x

class up_conv(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(up_conv, self).__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor = 2),
            nn.Conv2d(ch_in, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
		    # nn.BatchNorm2d(ch_out),
			nn.ReLU(inplace = True)
        )

    def forward(self,x):
        x = self.up(x)
        return x


class Attention_block(nn.Module):
    def __init__(self, F_g, F_l, F_int):
        super(Attention_block, self).__init__()
        self.W_g = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            # nn.BatchNorm2d(F_int)
        )

        self.W_x = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            # nn.BatchNorm2d(F_int)
        )

        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
            # nn.BatchNorm2d(1),
            nn.Sigmoid()
        )

        self.relu = nn.ReLU(inplace=True)

    def forward(self, g, x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.relu(g1 + x1)
        psi = self.psi(psi)

        return x * psi

class conv_block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(conv_block, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(ch_in, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
            # nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace = True),
            nn.Conv2d(ch_out, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
            # nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace = True)
        )


    def forward(self,x):
        x = self.conv(x)
        return x


class ResAttU_Net(nn.Module):
    def __init__(self, UnetLayer=4, img_ch=1, output_ch=1, first_layer_numKernel=64):
        super(ResAttU_Net, self).__init__()

        self.UnetLayer = UnetLayer
        self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = resconv_block(ch_in=img_ch, ch_out=first_layer_numKernel)
        self.Conv2 = resconv_block(ch_in=first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Conv3 = resconv_block(ch_in=2 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Conv4 = resconv_block(ch_in=4 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Conv5 = resconv_block(ch_in=8 * first_layer_numKernel, ch_out=16 * first_layer_numKernel)

        self.Up5 = up_conv(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Att5 = Attention_block(F_g=8 * first_layer_numKernel, F_l=8 * first_layer_numKernel,
                                    F_int=4 * first_layer_numKernel)
        self.Up_conv5 = conv_block(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)

        self.Up4 = up_conv(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Att4 = Attention_block(F_g=4 * first_layer_numKernel, F_l=4 * first_layer_numKernel,
                                    F_int=2 * first_layer_numKernel)
        self.Up_conv4 = conv_block(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)

        self.Up3 = up_conv(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Att3 = Attention_block(F_g=2 * first_layer_numKernel, F_l=2 * first_layer_numKernel,
                                    F_int=first_layer_numKernel)
        self.Up_conv3 = conv_block(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)

        self.Up2 = up_conv(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)
        self.Att2 = Attention_block(F_g=first_layer_numKernel, F_l=first_layer_numKernel,
                                    F_int=int(first_layer_numKernel / 2))
        self.Up_conv2 = conv_block(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)

        self.Conv_1x1 = nn.Conv2d(first_layer_numKernel, output_ch, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        # encoding path
        x1 = self.Conv1(x)

        x2 = self.Maxpool(x1)
        x2 = self.Conv2(x2)

        x3 = self.Maxpool(x2)
        x3 = self.Conv3(x3)

        d3 = self.Up3(x3)

        if self.UnetLayer > 3:
            x4 = self.Maxpool(x3)
            x4 = self.Conv4(x4)

            d4 = self.Up4(x4)

            if self.UnetLayer > 4:
                x5 = self.Maxpool(x4)
                x5 = self.Conv5(x5)

                # decoding + concat path
                d5 = self.Up5(x5)
                x4 = self.Att5(g=d5, x=x4)
                d5 = torch.cat((x4, d5), dim=1)
                d5 = self.Up_conv5(d5)

                d4 = self.Up4(d5)

            x3 = self.Att4(g=d4, x=x3)
            d4 = torch.cat((x3, d4), dim=1)
            d4 = self.Up_conv4(d4)

            d3 = self.Up3(d4)
        x2 = self.Att3(g=d3, x=x2)
        d3 = torch.cat((x2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        x1 = self.Att2(g=d2, x=x1)
        d2 = torch.cat((x1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        d1 = self.Conv_1x1(d2)

        return d1
### --------下面是BNANet----------
class bn_resconv_block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(bn_resconv_block, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True),
            nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True)
        )
        self.Conv_1x1 = nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        residual = self.Conv_1x1(x)
        x = self.conv(x)

        return residual + x

class bn_up_conv(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(bn_up_conv, self).__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor = 2),
            nn.Conv2d(ch_in, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
		    # nn.BatchNorm2d(ch_out),
			nn.ReLU(inplace = True)
        )

    def forward(self,x):
        x = self.up(x)
        return x


class bn_Attention_block(nn.Module):
    def __init__(self, F_g, F_l, F_int):
        super(bn_Attention_block, self).__init__()
        self.W_g = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.W_x = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(1),
            nn.Sigmoid()
        )

        self.relu = nn.ReLU(inplace=True)

    def forward(self, g, x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.relu(g1 + x1)
        psi = self.psi(psi)

        return x * psi

class bn_conv_block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(bn_conv_block, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(ch_in, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace = True),
            nn.Conv2d(ch_out, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace = True)
        )


    def forward(self,x):
        x = self.conv(x)
        return x
class BNAnet(nn.Module):
    def __init__(self, UnetLayer=4, img_ch=1, output_ch=1, first_layer_numKernel=64):
        super(BNAnet, self).__init__()

        self.UnetLayer = UnetLayer
        self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = bn_resconv_block(ch_in=img_ch, ch_out=first_layer_numKernel)
        self.Conv2 = bn_resconv_block(ch_in=first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Conv3 = bn_resconv_block(ch_in=2 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Conv4 = bn_resconv_block(ch_in=4 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Conv5 = bn_resconv_block(ch_in=8 * first_layer_numKernel, ch_out=16 * first_layer_numKernel)

        self.Up5 = bn_up_conv(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Att5 = bn_Attention_block(F_g=8 * first_layer_numKernel, F_l=8 * first_layer_numKernel,
                                    F_int=4 * first_layer_numKernel)
        self.Up_conv5 = bn_conv_block(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)

        self.Up4 = bn_up_conv(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Att4 = bn_Attention_block(F_g=4 * first_layer_numKernel, F_l=4 * first_layer_numKernel,
                                    F_int=2 * first_layer_numKernel)
        self.Up_conv4 = bn_conv_block(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)

        self.Up3 = bn_up_conv(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Att3 = bn_Attention_block(F_g=2 * first_layer_numKernel, F_l=2 * first_layer_numKernel,
                                    F_int=first_layer_numKernel)
        self.Up_conv3 = bn_conv_block(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)

        self.Up2 = bn_up_conv(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)
        self.Att2 = bn_Attention_block(F_g=first_layer_numKernel, F_l=first_layer_numKernel,
                                    F_int=int(first_layer_numKernel / 2))
        self.Up_conv2 = bn_conv_block(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)

        self.Conv_1x1 = nn.Conv2d(first_layer_numKernel, output_ch, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        # encoding path
        x1 = self.Conv1(x)

        x2 = self.Maxpool(x1)
        x2 = self.Conv2(x2)

        x3 = self.Maxpool(x2)
        x3 = self.Conv3(x3)

        d3 = self.Up3(x3)

        if self.UnetLayer > 3:
            x4 = self.Maxpool(x3)
            x4 = self.Conv4(x4)

            d4 = self.Up4(x4)

            if self.UnetLayer > 4:
                x5 = self.Maxpool(x4)
                x5 = self.Conv5(x5)

                # decoding + concat path
                d5 = self.Up5(x5)
                x4 = self.Att5(g=d5, x=x4)
                d5 = torch.cat((x4, d5), dim=1)
                d5 = self.Up_conv5(d5)

                d4 = self.Up4(d5)

            x3 = self.Att4(g=d4, x=x3)
            d4 = torch.cat((x3, d4), dim=1)
            d4 = self.Up_conv4(d4)

            d3 = self.Up3(d4)
        x2 = self.Att3(g=d3, x=x2)
        d3 = torch.cat((x2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        x1 = self.Att2(g=d2, x=x1)
        d2 = torch.cat((x1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        d1 = self.Conv_1x1(d2)

        return d1


### ----------下面是nBRAnet-----------
class ResAttU_Net_noBN(nn.Module):
    def __init__(self, UnetLayer=4, img_ch=1, output_ch=1, first_layer_numKernel=64):
        super(ResAttU_Net_noBN, self).__init__()

        self.UnetLayer = UnetLayer
        self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = resconv_block(ch_in=img_ch, ch_out=first_layer_numKernel)
        self.Conv2 = resconv_block(ch_in=first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Conv3 = resconv_block(ch_in=2 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Conv4 = resconv_block(ch_in=4 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Conv5 = resconv_block(ch_in=8 * first_layer_numKernel, ch_out=16 * first_layer_numKernel)

        self.Up5 = up_conv(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Att5 = Attention_block(F_g=8 * first_layer_numKernel, F_l=8 * first_layer_numKernel,
                                    F_int=4 * first_layer_numKernel)
        self.Up_conv5 = conv_block(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)

        self.Up4 = up_conv(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Att4 = Attention_block(F_g=4 * first_layer_numKernel, F_l=4 * first_layer_numKernel,
                                    F_int=2 * first_layer_numKernel)
        self.Up_conv4 = conv_block(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)

        self.Up3 = up_conv(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Att3 = Attention_block(F_g=2 * first_layer_numKernel, F_l=2 * first_layer_numKernel,
                                    F_int=first_layer_numKernel)
        self.Up_conv3 = conv_block(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)

        self.Up2 = up_conv(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)
        self.Att2 = Attention_block(F_g=first_layer_numKernel, F_l=first_layer_numKernel,
                                    F_int=int(first_layer_numKernel / 2))
        self.Up_conv2 = conv_block(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)

        self.Conv_1x1 = nn.Conv2d(first_layer_numKernel, output_ch, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        # encoding path
        x1 = self.Conv1(x)

        x2 = self.Maxpool(x1)
        x2 = self.Conv2(x2)

        x3 = self.Maxpool(x2)
        x3 = self.Conv3(x3)

        d3 = self.Up3(x3)

        if self.UnetLayer > 3:
            x4 = self.Maxpool(x3)
            x4 = self.Conv4(x4)

            d4 = self.Up4(x4)

            if self.UnetLayer > 4:
                x5 = self.Maxpool(x4)
                x5 = self.Conv5(x5)

                # decoding + concat path
                d5 = self.Up5(x5)
                x4 = self.Att5(g=d5, x=x4)
                d5 = torch.cat((x4, d5), dim=1)
                d5 = self.Up_conv5(d5)

                d4 = self.Up4(d5)

            x3 = self.Att4(g=d4, x=x3)
            d4 = torch.cat((x3, d4), dim=1)
            d4 = self.Up_conv4(d4)

            d3 = self.Up3(d4)
        x2 = self.Att3(g=d3, x=x2)
        d3 = torch.cat((x2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        x1 = self.Att2(g=d2, x=x1)
        d2 = torch.cat((x1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        d1 = self.Conv_1x1(d2)

        return d1

### -----------下面是VDSR-----------
#博客地址：https://blog.csdn.net/weixin_43847596/article/details/121432177
#代码地址：https://github.com/jayepep/pytorch-vdsr-recurrence/blob/d74a11583fd6110a193e4f48a27b5c04792c08b6/vdsr.py#L10
from math import sqrt

# 神经网络结构块
class Conv_ReLU_Block(nn.Module):
    def __init__(self):
        super(Conv_ReLU_Block, self).__init__()
        # stride步长为1 padding填充为1 bias不添加偏置参数作为可学习参数
        self.conv = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
        # 对从上层网络Conv2d中传递下来的tensor直接进行修改，inplace变量替换能够节省运算内存，不用多存储其他变量
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        return self.relu(self.conv(x))


# 主要网络结构
class VDSR(nn.Module):
    def __init__(self):
        super(VDSR, self).__init__()
        self.residual_layer = self.make_layer(Conv_ReLU_Block, 18)
        self.input = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
        self.output = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
        self.relu = nn.ReLU(inplace=True)

        # Conv2d中参数的初始化 normal高斯
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # 3 3 64  最后一次3 3 1
                # print(m.kernel_size[0], m.kernel_size[1], m.out_channels)
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, sqrt(2. / n))

    def make_layer(self, block, num_of_layer):
        layers = []
        for _ in range(num_of_layer):
            layers.append(block())
        # Sequential一个有序的容器，神经网络模块将按照在传入构造器的顺序依次被添加到计算图中执行
        return nn.Sequential(*layers)

    def forward(self, x):
        residual = x
        out = self.relu(self.input(x))
        out = self.residual_layer(out)
        out = self.output(out)
        out = torch.add(out, residual)
        return out

## ************** 下面是Unet ****************  ###
class unet_conv_block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(unet_conv_block, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(ch_in, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace = True),
            nn.Conv2d(ch_out, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace = True)
        )

    def forward(self,x):
        x = self.conv(x)
        return x

class unet_up_conv(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(unet_up_conv, self).__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor = 2),
            nn.Conv2d(ch_in, ch_out, kernel_size = 3, stride = 1, padding = 1, bias = True),
		    nn.BatchNorm2d(ch_out),
			nn.ReLU(inplace = True)
        )

    def forward(self,x):
        x = self.up(x)
        return x

class UNet(nn.Module):
    def __init__(self, UnetLayer=4, img_ch = 1, output_ch = 1, first_layer_numKernel = 64):
        super(UNet, self).__init__()

        # nb_filter = [64, 128, 256, 512,1024]
        self.Unetlayer = UnetLayer
        self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = unet_conv_block(ch_in=img_ch, ch_out=first_layer_numKernel)
        self.Conv2 = unet_conv_block(ch_in=first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Conv3 = unet_conv_block(ch_in=2 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Conv4 = unet_conv_block(ch_in=4 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Conv5 = unet_conv_block(ch_in=8 * first_layer_numKernel, ch_out=16 * first_layer_numKernel)

        self.Up5 = unet_up_conv(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Up_conv5 = unet_conv_block(ch_in=16 * first_layer_numKernel, ch_out=8 * first_layer_numKernel)
        self.Up4 = unet_up_conv(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Up_conv4 = unet_conv_block(ch_in=8 * first_layer_numKernel, ch_out=4 * first_layer_numKernel)
        self.Up3 = unet_up_conv(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Up_conv3 = unet_conv_block(ch_in=4 * first_layer_numKernel, ch_out=2 * first_layer_numKernel)
        self.Up2 = unet_up_conv(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)
        self.Up_conv2 = unet_conv_block(ch_in=2 * first_layer_numKernel, ch_out=first_layer_numKernel)

        self.Conv_1x1 = nn.Conv2d(first_layer_numKernel, output_ch, kernel_size=1, stride=1, padding=0)

    def forward(self, input):
        # encoding path
        x1 = self.Conv1(input)

        x2 = self.Maxpool(x1)
        x2 = self.Conv2(x2)

        x3 = self.Maxpool(x2)
        x3 = self.Conv3(x3)

        d3 = self.Up3(x3)


        x4 = self.Maxpool(x3)
        x4 = self.Conv4(x4)

        d4 = self.Up4(x4)


        d4 = torch.cat((x3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((x2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((x1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        d1 = self.Conv_1x1(d2)

        return d1