""" Parts of the U-Net model """

import torch
import torch.nn as nn
import torch.nn.functional as F


class DoubleConv(nn.Module):
    """(convolution => [BN] => ReLU) * 2"""

    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        if not mid_channels:
            mid_channels = out_channels
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.double_conv(x)


class Down(nn.Module):
    """Downscaling with maxpool then double conv"""

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_channels, out_channels)
        )

    def forward(self, x):
        return self.maxpool_conv(x)


class Up(nn.Module):
    """Upscaling then double conv"""

    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()

        # if bilinear, use the normal convolutions to reduce the number of channels
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
            self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
        else:
            self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
            self.conv = DoubleConv(in_channels, out_channels)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2])
        # if you have padding issues, see
        # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
        # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


class OutConv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(OutConv, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)

    def forward(self, x):
        return self.conv(x)



#----------------------------------------------
#                  myself
#----------------------------------------------
# 定义卷积块
class ReBNConv(nn.Module):  # ReBNConv

    def __init__(self, in_ch=3, out_ch=3, dirate=1):  # dirate膨胀率
        super(ReBNConv, self).__init__()
        self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate)
        self.bn_s1 = nn.BatchNorm2d(out_ch)
        self.relu_s1 = nn.ReLU(inplace=True)

    def forward(self, x):
        hx = x
        xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
        return xout

# 定义上采样过程
def _upsample_like(src, tar, scale=2):
    # 对HW进行上采样
    # src = F.upsample(src, size=tar.shape[2:], mode='bilinear', align_corners=False)
    src = F.interpolate(src, scale_factor=scale, mode='bilinear', align_corners=False)
    return src


# RSU-7 #
class RSU7(nn.Module):

    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
        super(RSU7, self).__init__()
        self.rebnconvin = ReBNConv(in_ch, out_ch, dirate=1)
        self.rebnconv1 = ReBNConv(out_ch, mid_ch, dirate=1)

        # down
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv2 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv3 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv4 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv5 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
        self.rebnconv6 = ReBNConv(mid_ch, mid_ch, dirate=1)

        self.rebnconv7 = ReBNConv(mid_ch, mid_ch, dirate=2)

        self.rebnconv6d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv5d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv4d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv3d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv2d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv1d = ReBNConv(mid_ch * 2, out_ch, dirate=1)

    def forward(self, x):
        hx = x
        hxin = self.rebnconvin(hx)  # [2, 64, 224, 224]
        hx1 = self.rebnconv1(hxin)  # [2, 32, 224, 224]
        hx = self.pool1(hx1)  # [2, 32, 112, 112]
        hx2 = self.rebnconv2(hx)  # [2, 32, 56, 56]

        hx = self.pool2(hx2)  # [2, 32, 56, 56]
        hx3 = self.rebnconv3(hx)  # [2, 32, 56, 56]

        hx = self.pool3(hx3)  # [2, 32, 28, 28]
        hx4 = self.rebnconv4(hx)  # [2, 32, 28, 28]

        hx = self.pool4(hx4)  # [2, 32, 14, 14]
        hx5 = self.rebnconv5(hx)  # [2, 32, 14, 14]

        hx = self.pool5(hx5)  # [2, 32, 7, 7]
        hx6 = self.rebnconv6(hx)  # [2, 32, 7, 7]

        hx7 = self.rebnconv7(hx6)  # [2, 32, 7, 7]

        # RSU7

        hx6d = self.rebnconv6d(torch.cat([hx7, hx6], dim=1))  # [2, 32, 7, 7]
        hx6dup = _upsample_like(hx6d, hx5)  # [2, 32, 14, 14]

        hx5d = self.rebnconv5d(torch.cat([hx6dup, hx5], dim=1))  # [2, 32, 14, 14]
        hx5dup = _upsample_like(hx5d, hx4)  # [2, 32, 28, 28]

        hx4d = self.rebnconv4d(torch.cat([hx5dup, hx4], dim=1))  # [2, 32, 28, 28]
        hx4dup = _upsample_like(hx4d, hx3)  # [2, 32, 56, 56]

        hx3d = self.rebnconv3d(torch.cat([hx4dup, hx3], dim=1))  # [2, 32, 56, 56]
        hx3dup = _upsample_like(hx3d, hx2)  # [2, 32, 112, 112]

        hx2d = self.rebnconv2d(torch.cat([hx3dup, hx2], dim=1))  # [2, 32, 112, 112]
        hx2dup = _upsample_like(hx2d, hx1)  # [2, 32, 224, 224]

        hx1d = self.rebnconv1d(torch.cat([hx2dup, hx1], dim=1))  # [2, 64, 224, 224]

        return hx1d + hxin


# RSU-6 #
class RSU6(nn.Module):

    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
        super(RSU6, self).__init__()
        self.rebnconvin = ReBNConv(in_ch, out_ch, dirate=1)
        self.rebnconv1 = ReBNConv(out_ch, mid_ch, dirate=1)

        # down
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv2 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv3 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv4 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv5 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.rebnconv6 = ReBNConv(mid_ch, mid_ch, dirate=1)

        self.rebnconv5d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv4d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv3d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv2d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv1d = ReBNConv(mid_ch * 2, out_ch, dirate=1)

    def forward(self, x):
        hx = x
        hxin = self.rebnconvin(hx)
        hx1 = self.rebnconv1(hxin)
        hx = self.pool1(hx1)
        hx2 = self.rebnconv2(hx)

        hx = self.pool2(hx2)
        hx3 = self.rebnconv3(hx)

        hx = self.pool3(hx3)
        hx4 = self.rebnconv4(hx)

        hx = self.pool4(hx4)
        hx5 = self.rebnconv5(hx)

        hx6 = self.rebnconv6(hx)

        # RSU6

        hx5d = self.rebnconv5d(torch.cat([hx6, hx5], dim=1))
        hx5dup = _upsample_like(hx5d, hx4)

        hx4d = self.rebnconv4d(torch.cat([hx5dup, hx4], dim=1))
        hx4dup = _upsample_like(hx4d, hx3)

        hx3d = self.rebnconv3d(torch.cat([hx4dup, hx3], dim=1))
        hx3dup = _upsample_like(hx3d, hx2)

        hx2d = self.rebnconv2d(torch.cat([hx3dup, hx2], dim=1))
        hx2dup = _upsample_like(hx2d, hx1)

        hx1d = self.rebnconv1d(torch.cat([hx2dup, hx1], dim=1))

        return hx1d + hxin


# RSU-5 #
class RSU5(nn.Module):

    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
        super(RSU5, self).__init__()
        self.rebnconvin = ReBNConv(in_ch, out_ch, dirate=1)
        self.rebnconv1 = ReBNConv(out_ch, mid_ch, dirate=1)

        # down
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv2 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv3 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv4 = ReBNConv(mid_ch, mid_ch, dirate=1)

        self.rebnconv5 = ReBNConv(mid_ch, mid_ch, dirate=1)

        self.rebnconv4d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv3d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv2d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv1d = ReBNConv(mid_ch * 2, out_ch, dirate=1)

    def forward(self, x):
        hx = x
        hxin = self.rebnconvin(hx)
        hx1 = self.rebnconv1(hxin)
        hx = self.pool1(hx1)
        hx2 = self.rebnconv2(hx)

        hx = self.pool2(hx2)
        hx3 = self.rebnconv3(hx)

        hx = self.pool3(hx3)
        hx4 = self.rebnconv4(hx)

        hx5 = self.rebnconv5(hx)
        # RSU5

        hx4d = self.rebnconv4d(torch.cat([hx5, hx4], dim=1))
        hx4dup = _upsample_like(hx4d, hx3)

        hx3d = self.rebnconv3d(torch.cat([hx4dup, hx3], dim=1))
        hx3dup = _upsample_like(hx3d, hx2)

        hx2d = self.rebnconv2d(torch.cat([hx3dup, hx2], dim=1))
        hx2dup = _upsample_like(hx2d, hx1)

        hx1d = self.rebnconv1d(torch.cat([hx2dup, hx1], dim=1))

        return hx1d + hxin


# RSU-4 #
class RSU4(nn.Module):

    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
        super(RSU4, self).__init__()
        self.rebnconvin = ReBNConv(in_ch, out_ch, dirate=1)
        self.rebnconv1 = ReBNConv(out_ch, mid_ch, dirate=1)

        # down
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv2 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.rebnconv3 = ReBNConv(mid_ch, mid_ch, dirate=1)

        # down
        self.rebnconv4 = ReBNConv(mid_ch, mid_ch, dirate=1)

        self.rebnconv3d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv2d = ReBNConv(mid_ch * 2, mid_ch, dirate=1)
        self.rebnconv1d = ReBNConv(mid_ch * 2, out_ch, dirate=1)

    def forward(self, x):
        hx = x
        hxin = self.rebnconvin(hx)
        hx1 = self.rebnconv1(hxin)
        hx = self.pool1(hx1)
        hx2 = self.rebnconv2(hx)

        hx = self.pool2(hx2)
        hx3 = self.rebnconv3(hx)

        hx4 = self.rebnconv4(hx)

        hx3d = self.rebnconv3d(torch.cat([hx4, hx3], dim=1))
        hx3dup = _upsample_like(hx3d, hx2)

        hx2d = self.rebnconv2d(torch.cat([hx3dup, hx2], dim=1))
        hx2dup = _upsample_like(hx2d, hx1)

        hx1d = self.rebnconv1d(torch.cat([hx2dup, hx1], dim=1))

        return hx1d + hxin


# RSU-4F #
class RSU4F(nn.Module):

    def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
        super(RSU4F, self).__init__()
        self.rebnconvin = ReBNConv(in_ch, out_ch, dirate=1)

        self.rebnconv1 = ReBNConv(out_ch, mid_ch, dirate=1)
        self.rebnconv2 = ReBNConv(mid_ch, mid_ch, dirate=2)
        self.rebnconv3 = ReBNConv(mid_ch, mid_ch, dirate=4)

        self.rebnconv4 = ReBNConv(mid_ch, mid_ch, dirate=8)

        self.rebnconv3d = ReBNConv(mid_ch * 2, mid_ch, dirate=4)
        self.rebnconv2d = ReBNConv(mid_ch * 2, mid_ch, dirate=2)
        self.rebnconv1d = ReBNConv(mid_ch * 2, out_ch, dirate=1)

    def forward(self, x):
        hx = x

        hxin = self.rebnconvin(hx)

        hx1 = self.rebnconv1(hxin)
        hx2 = self.rebnconv2(hx1)
        hx3 = self.rebnconv2(hx2)

        hx4 = self.rebnconv2(hx3)

        hx3d = self.rebnconv3d(torch.cat([hx4, hx3], dim=1))
        hx2d = self.rebnconv2d(torch.cat([hx3d, hx2], dim=1))
        hx1d = self.rebnconv1d(torch.cat([hx2d, hx1], dim=1))

        return hx1d + hxin



