from __future__ import print_function, division
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch
import SwinTransformer.models.swin_transformer as swinTransformer

class block(nn.Module):
    def __init__(self, inChannel, outChannel):
        super(block, self).__init__()
        self.conv = nn.Conv2d(inChannel, outChannel, kernel_size=3, padding=1)
        self.bn = nn.BatchNorm2d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return self.reLu(x)


class decoder(nn.Module):
    def __init__(self, depths=[64, 64, 64, 64, 64]):
        super(decoder, self).__init__()

        self.ConvTranspose1_0 = nn.ConvTranspose2d(depths[1], depths[0], 2, 2)
        self.Conv1_0 = nn.Sequential(block(depths[0], depths[0]),
                                     block(depths[0], depths[0]))

        self.ConvTranspose2_1 = nn.ConvTranspose2d(depths[2], depths[1], 2, 2)
        self.Conv2_1 = nn.Sequential(block(depths[1] * 2, depths[1]),
                                     block(depths[1], depths[1]))
        self.ConvTranspose2_0 = nn.ConvTranspose2d(depths[1], depths[0], 2, 2)
        self.Conv2_0 = nn.Sequential(block(depths[0], depths[0]),
                                     block(depths[0], depths[0]))

        self.ConvTranspose3_2 = nn.ConvTranspose2d(depths[3], depths[2], 2, 2)
        self.Conv3_2 = nn.Sequential(block(depths[2] * 2, depths[2]),
                                     block(depths[2], depths[2]))
        self.ConvTranspose3_1 = nn.ConvTranspose2d(depths[2], depths[1], 2, 2)
        self.Conv3_1 = nn.Sequential(block(depths[1] * 2, depths[1]),
                                     block(depths[1], depths[1]))
        self.ConvTranspose3_0 = nn.ConvTranspose2d(depths[1], depths[0], 2, 2)
        self.Conv3_0 = nn.Sequential(block(depths[0], depths[0]),
                                     block(depths[0], depths[0]))

        self.ConvTranspose4_3 = nn.ConvTranspose2d(depths[4], depths[3], 2, 2)
        self.Conv4_3 = nn.Sequential(block(depths[3] * 2, depths[3]),
                                     block(depths[3], depths[3]))
        self.ConvTranspose4_2 = nn.ConvTranspose2d(depths[3], depths[2], 2, 2)
        self.Conv4_2 = nn.Sequential(block(depths[2] * 2, depths[2]),
                                     block(depths[2], depths[2]))
        self.ConvTranspose4_1 = nn.ConvTranspose2d(depths[2], depths[1], 2, 2)
        self.Conv4_1 = nn.Sequential(block(depths[1] * 2, depths[1]),
                                     block(depths[1], depths[1]))
        self.ConvTranspose4_0 = nn.ConvTranspose2d(depths[1], depths[0], 2, 2)
        self.Conv4_0 = nn.Sequential(block(depths[0], depths[0]),
                                     block(depths[0], depths[0]))

        self.ConvAll = nn.Sequential(block(depths[0] * 5, depths[0]),
                                     block(depths[0], depths[0]))

    def forward(self, levels):
        level0 = levels[0]
        # level1的解码
        level1 = self.ConvTranspose1_0(levels[1])
        level1 = self.Conv1_0(level1)

        # level2的解码
        level2 = self.ConvTranspose2_1(levels[2])
        level2 = torch.cat([level2, levels[1]], dim=1)
        level2 = self.Conv2_1(level2)

        level2 = self.ConvTranspose2_0(level2)
        level2 = self.Conv2_0(level2)

        # level3的解码
        level3 = self.ConvTranspose3_2(levels[3])
        level3 = torch.cat([level3, levels[2]], dim=1)
        level3 = self.Conv3_2(level3)

        level3 = self.ConvTranspose3_1(level3)
        level3 = torch.cat([level3, levels[1]], dim=1)
        level3 = self.Conv3_1(level3)

        level3 = self.ConvTranspose3_0(level3)
        level3 = self.Conv3_0(level3)

        # level4的解码
        level4 = self.ConvTranspose4_3(levels[4])
        level4 = torch.cat([level4, levels[3]], dim=1)
        level4 = self.Conv4_3(level4)

        level4 = self.ConvTranspose4_2(level4)
        level4 = torch.cat([level4, levels[2]], dim=1)
        level4 = self.Conv4_2(level4)

        level4 = self.ConvTranspose4_1(level4)
        level4 = torch.cat([level4, levels[1]], dim=1)
        level4 = self.Conv4_1(level4)

        level4 = self.ConvTranspose4_0(level4)
        level4 = self.Conv4_0(level4)

        # 所有level的融合
        out = torch.cat([level0, level1, level2, level3, level4], dim=1)
        out = self.ConvAll(out)

        return out


class LearnedAttention(nn.Module):
    def __init__(self, H, inChannel, outChannel, rel_pos=True):
        super(LearnedAttention, self).__init__()
        self.attn = nn.Sequential(
            nn.Linear(H, H, bias=True),
            # nn.BatchNorm2d(H),
            nn.ReLU(inplace=True),
        )
        self.attnChannel = nn.Sequential(
            nn.Conv2d(inChannel, outChannel, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(outChannel),
            nn.ReLU(inplace=True),
        )

    def forward(self, x):
        B, C, H, W = x.shape
        # B, inner_dim, H, W
        # x = x.reshape(B, C, H * W)
        x = self.attn(x)
        x = x.transpose(-1, -2)
        x = self.attn(x)
        x = x.transpose(-1, -2)
        # x = x.reshape(B, C, H, W)
        x = self.attnChannel(x)

        return x


def window_partition(x, window_size):
    """
    Args:
        x: (B, H, W, C)
        window_size (int): window size

    Returns:
        windows: (num_windows*B, window_size, window_size, C)
    """
    B, C, H, W = x.shape
    x = x.view(B, C, H // window_size, window_size, W // window_size, window_size)
    x = x.permute(0, 2, 4, 1, 3, 5)  # B, H // window_size, W // window_size,C, window_size, window_size

    windows = x.reshape(-1, C, window_size, window_size)  # -1,C,winH,winW
    return windows


def window_reverse(windows, H, W):
    """
    Args:
        windows: (num_windows*B, window_size, window_size, C)
        window_size (int): Window size
        H (int): Height of image
        W (int): Width of image

    Returns:
        x: (B, H, W, C)
    """
    _, C, window_size, _ = windows.shape

    B = int(windows.shape[0] / (H * W / window_size / window_size))
    x = windows.view(B, H // window_size, W // window_size, C, window_size, window_size)
    x = x.permute(0, 3, 1, 4, 2, 5)  # B, C, H // window_size, window_size, W // window_size, window_size
    x = x.reshape(B, C, H, W)
    return x


class PatchMerging(nn.Module):
    r""" Patch Merging Layer.

    Args:
        input_resolution (tuple[int]): Resolution of input feature.
        dim (int): Number of input channels.
        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm
    """

    def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
        super().__init__()
        self.input_resolution = input_resolution
        self.dim = dim
        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
        self.norm = norm_layer(4 * dim)

    def forward(self, x):
        """
        x: B, H*W, C
        """
        H, W = self.input_resolution

        B, C, H, W = x.shape

        x = x.permute(0, 2, 3, 1)
        # x = x.view(B, H, W, C)
        x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C
        x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C
        x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C
        x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C
        x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C
        # x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C

        # x = self.norm(x)
        x = self.reduction(x)

        x = x.permute(0, 3, 1, 2)  # B,C,L
        # x=x.reshape(B,C,H,W)
        return x

    def extra_repr(self) -> str:
        return f"input_resolution={self.input_resolution}, dim={self.dim}"

    def flops(self):
        H, W = self.input_resolution
        flops = H * W * self.dim
        flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
        return flops


class WinLearnedAttention(nn.Module):
    def __init__(self, winSize):
        super(WinLearnedAttention, self).__init__()
        self.attn = nn.Sequential(
            nn.Linear(winSize * winSize, winSize * winSize, bias=True),
            # nn.BatchNorm2d(inDim),
            # nn.ReLU(inplace=True),
        )
        # self.attnH=nn.Sequential(
        #     nn.Linear(H,H, bias=True),
        #     # nn.BatchNorm2d(H),
        #     nn.ReLU(inplace=True),
        # )
        # self.norm= nn.BatchNorm2d(H)
        # self.attnChannel = nn.Sequential(
        #     nn.Conv2d(inDim, outDim, kernel_size=1, stride=1, padding=0, bias=True),
        #     nn.BatchNorm2d(outDim),
        #     nn.ReLU(inplace=True),
        # )
        # self.patchMerge=PatchMerging([H,H],inDim)

    def forward(self, x):
        B, C, H, W = x.shape

        x = x.reshape(B, C, H * W)
        x = self.attn(x)
        # x=x.transpose(-1,-2)
        # x = self.patchMerge(x)
        # x=x.transpose(-1,-2)
        # x = self.attn(x)
        x = x.reshape(B, C, H, W)

        return x


def shiftPooling(x, winSize):
    padSize = winSize // 2
    x = F.pad(x, (padSize, padSize, padSize, padSize), "replicate")  # 上下左右都填充winSize一半
    x = F.max_pool2d(x, winSize, winSize)  # 使用最大池化
    x = F.upsample_nearest(x, scale_factor=winSize)  # 上采样到池化前大小
    x = x[:, :, padSize:-padSize, padSize:-padSize]  # 切掉填充的数值
    return x


class LAttnBlock(nn.Module):
    def __init__(self, winSize, inDim, outDim):
        super(LAttnBlock, self).__init__()
        self.winAttn1 = WinLearnedAttention(winSize)
        self.winAttn2 = WinLearnedAttention(winSize)
        # self.winAttn3 = WinLearnedAttention(winSize)
        # self.winAttn4 = WinLearnedAttention(winSize)
        self.winSize = winSize

        self.attnChannel1 = nn.Sequential(
            nn.Conv2d(inDim, outDim, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(outDim),
            nn.ReLU(inplace=True),
        )
        self.attnChannel2 = nn.Sequential(
            nn.Conv2d(outDim, outDim, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(outDim),
            nn.ReLU(inplace=True),
        )
        # self.attnChannel3 = nn.Sequential(
        #     nn.Conv2d(outDim, outDim, kernel_size=1, stride=1, padding=0, bias=True),
        #     nn.BatchNorm2d(outDim),
        #     nn.ReLU(inplace=True),
        # )
        # self.attnChannel4 = nn.Sequential(
        #     nn.Conv2d(outDim, outDim, kernel_size=1, stride=1, padding=0, bias=True),
        #     nn.BatchNorm2d(outDim),
        #     nn.ReLU(inplace=True),
        # )

    def forward(self, x):
        B, C, H, W = x.shape

        x = window_partition(x, self.winSize)  # 切分窗口
        x = self.winAttn1(x)  # 窗口化attention
        x = window_reverse(x, H, W)  # 拉回原始大小

        x = self.attnChannel1(x)

        if self.winSize <= (H // 2):
            # 使用roll
            shift_size = self.winSize // 2
            x = torch.roll(x, shifts=(-shift_size, -shift_size), dims=(2, 3))
            x = window_partition(x, self.winSize)  # 切分窗口
            x = self.winAttn2(x)  # 窗口化attention
            x = window_reverse(x, H, W)  # 拉回原始大小
            x = torch.roll(x, shifts=(shift_size, shift_size), dims=(2, 3))

            x = self.attnChannel2(x)

        # x = window_partition(x, self.winSize)  # 切分窗口
        # x = self.winAttn3(x)  # 窗口化attention
        # x = window_reverse(x, H, W)  # 拉回原始大小

        # x = self.attnChannel3(x)
        #
        # if self.winSize <= (H // 2):
        #     # 使用roll
        #     shift_size = self.winSize // 2
        #     x = torch.roll(x, shifts=(-shift_size, -shift_size), dims=(2, 3))
        #     x = window_partition(x, self.winSize)  # 切分窗口
        #     x = self.winAttn4(x)  # 窗口化attention
        #     x = window_reverse(x, H, W)  # 拉回原始大小
        #     x = torch.roll(x, shifts=(shift_size, shift_size), dims=(2, 3))
        #     # #使用pading
        #     # padSize = self.winSize // 2
        #     # x = F.pad(x, (padSize, padSize, padSize, padSize), "constant",0)  # 上下左右都填充winSize一半
        #     # padB,padC,padH,padW=x.shape
        #     # x = window_partition(x, self.winSize)  # 切分窗口
        #     # x = self.winAttn2(x)  # 窗口化attention
        #     # x = window_reverse(x, padH, padW)  # 拉回原始大小
        #     # x = x[:, :, padSize:-padSize, padSize:-padSize]  # 切掉填充的数值
        #     # 使用shiftpooling
        #     #     x=shiftPooling(x,self.winSize)#shift pooling
        #     # x = window_partition(x, self.winSize)  # 切分窗口
        #     # x = self.winAttn1(x)  # 窗口化attention
        #     # x = window_reverse(x, H, W)  # 拉回原始大小`
        #     x = self.attnChannel4(x)

        return x


class LA_UNet(nn.Module):
    def __init__(self, rel_pos=True):
        super(LA_UNet, self).__init__()

        depth = [32, 64, 128, 256, 512]
        self.down1 = nn.Sequential(
            LearnedAttention(512, 3, depth[0]),
            LearnedAttention(512, depth[0], depth[0]),
            LearnedAttention(512, depth[0], depth[0]),
            LearnedAttention(512, depth[0], depth[0]),
            LearnedAttention(512, depth[0], depth[0]),
            LearnedAttention(512, depth[0], depth[0])
        )
        # self.down1=nn.Sequential(
        #     block(3,depth[0]),
        #     block(depth[0], depth[0])
        # )
        self.down2 = nn.Sequential(
            LearnedAttention(256, depth[0], depth[1]),
            LearnedAttention(256, depth[1], depth[1]),
            LearnedAttention(256, depth[1], depth[1]),
            LearnedAttention(256, depth[1], depth[1]),
            LearnedAttention(256, depth[1], depth[1]),
            LearnedAttention(256, depth[1], depth[1]),
        )
        # self.down2 = nn.Sequential(
        #     block(depth[0], depth[1]),
        #     block(depth[1], depth[1])
        # )
        self.down3 = nn.Sequential(
            LearnedAttention(128, depth[1], depth[2]),
            LearnedAttention(128, depth[2], depth[2]),
            LearnedAttention(128, depth[2], depth[2]),
            LearnedAttention(128, depth[2], depth[2]),
            LearnedAttention(128, depth[2], depth[2]),
            LearnedAttention(128, depth[2], depth[2])
        )
        # self.down3 = nn.Sequential(
        #     block(depth[1], depth[2]),
        #     block(depth[2], depth[2])
        # )
        self.down4 = nn.Sequential(
            LearnedAttention(64, depth[2], depth[3]),
            LearnedAttention(64, depth[3], depth[3]),
            LearnedAttention(64, depth[3], depth[3]),
            LearnedAttention(64, depth[3], depth[3]),
            LearnedAttention(64, depth[3], depth[3]),
            LearnedAttention(64, depth[3], depth[3]),
        )
        # self.down4 = nn.Sequential(
        #     block(depth[2], depth[3]),
        #     block(depth[3], depth[3])
        # )
        self.down5 = nn.Sequential(
            LearnedAttention(32, depth[3], depth[4]),
            LearnedAttention(32, depth[4], depth[4]),
            LearnedAttention(32, depth[4], depth[4]),
            LearnedAttention(32, depth[4], depth[4]),
            LearnedAttention(32, depth[4], depth[4]),
            LearnedAttention(32, depth[4], depth[4])
        )
        # self.down5 = nn.Sequential(
        #     block(depth[3], depth[4]),
        #     block(depth[4], depth[4])
        # )

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.decode = decoder(depths=depth)

        self.out = nn.Conv2d(depth[0], 2, kernel_size=1, padding=0)

    def forward(self, x):
        down1 = self.down1(x)
        t = self.Maxpool1(down1)

        down2 = self.down2(t)
        t = self.Maxpool2(down2)

        down3 = self.down3(t)
        t = self.Maxpool3(down3)

        down4 = self.down4(t)
        t = self.Maxpool4(down4)

        down5 = self.down5(t)

        # t = self.upSample4(down5)
        # t = torch.cat([t, down4], dim=1)
        # up4 = self.up4(t)
        #
        # t = self.upSample3(up4)
        # t = torch.cat([t, down3], dim=1)
        # up3 = self.up3(t)
        #
        # t = self.upSample2(up3)
        # t = torch.cat([t, down2], dim=1)
        # up2 = self.up2(t)
        #
        # t = self.upSample1(up2)
        # t = torch.cat([t, down1], dim=1)
        # up1 = self.up1(t)
        out = self.decode([down1, down2, down3, down4, down5])

        return self.out(out)


class conv_block(nn.Module):
    """
    Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super(conv_block, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True),

            nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True))

    def forward(self, x):
        x = self.conv(x)

        return x


class MSAconv_block(nn.Module):
    """
    Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super(MSAconv_block, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True))

        self.conv2 = nn.Sequential(
            nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True))

        # self.maxpooling=nn.MaxPool2d(H,H)
        # self.up=nn.Upsample(scale_factor=H,mode='bilinear')

    def forward(self, x):
        B, C, H, W = x.shape
        x = self.conv1(x)
        x = self.conv2(x)
        # 全局池化
        p1 = F.max_pool2d(x, H)
        p1 = F.upsample_nearest(p1, H)

        p2 = F.max_pool2d(x, H // 2)
        p2 = F.upsample_nearest(p2, H)

        x = x + p1 + p2

        return x


class up_conv(nn.Module):
    """
    Up Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super(up_conv, self).__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2),
            nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        x = self.up(x)
        return x


class U_Net(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch=3, out_ch=1):
        super(U_Net, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(in_ch, filters[0])
        self.Conv2 = conv_block(filters[0], filters[1])
        self.Conv3 = conv_block(filters[1], filters[2])
        self.Conv4 = conv_block(filters[2], filters[3])
        self.Conv5 = conv_block(filters[3], filters[4])

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

    # self.active = torch.nn.Sigmoid()

    def forward(self, x):
        e1 = self.Conv1(x)

        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out


class ConvBNRelu1D(nn.Module):
    def __init__(self, inChannel, outChannel, kernel_size=3, padding=1, bias=False):
        super(ConvBNRelu1D, self).__init__()
        self.conv = nn.Conv1d(inChannel, outChannel, kernel_size=kernel_size, padding=padding, bias=bias)
        self.bn = nn.BatchNorm1d(outChannel, momentum=0.1, affine=True)
        self.reLu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return self.reLu(x)


class interWin(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch, hide_ch, out_ch, inWidth, hideWidth, outWidth):
        super().__init__()

        self.layer = nn.Sequential(
            nn.Linear(inWidth * inWidth, hideWidth * hideWidth),
            ConvBNRelu1D(in_ch, hide_ch, kernel_size=1, padding=0, bias=True),

            nn.Linear(hideWidth * hideWidth ,hideWidth * hideWidth),
            ConvBNRelu1D(hide_ch, hide_ch, kernel_size=1, padding=0, bias=True),

            nn.Linear(hideWidth * hideWidth, outWidth * outWidth),
            ConvBNRelu1D(hide_ch, out_ch, kernel_size=1, padding=0, bias=True)
        )
        self.outDim= out_ch

    def forward(self, x):
        # windows间交互
        b, c, w, h = x.shape
        x = x.view(b, c, w * h)
        x = self.layer(x)
        x = x.view(b, self.outDim, w, h)
        x = x.reshape(b, self.outDim, w, h)

        return x


class LGFO_Net(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch=3, out_ch=1):
        super(NewU_Net, self).__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(in_ch, filters[0])
        self.Conv2 = conv_block(filters[0], filters[1])
        self.Conv3 = conv_block(filters[1], filters[2])
        self.Conv4 = conv_block(filters[2], filters[3])
        # self.Conv4=WinLearnedAttention(64,filters[2], filters[3])
        self.Conv5 = conv_block(filters[3], filters[4])
        # self.Conv5=WinLearnedAttention(32,filters[3], filters[4])

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])
        # self.Up_conv5 = WinLearnedAttention(64,filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

        # self.active = torch.nn.Sigmoid()
        self.interWin1 = interWin(3, filters[0], filters[0], 256, 32, 256)
        self.interWin2 = interWin(filters[0], filters[1], filters[1], 128, 32, 128)
        self.interWin3 = interWin(filters[1], filters[2], filters[2], 64, 32, 64)
        self.interWin4 = interWin(filters[2], filters[3], filters[3], 32, 32, 32)
        self.interWin5 = interWin(filters[3], filters[4], filters[4], 16, 16, 16)

    def forward(self, input):
        g1=self.interWin1(input)

        g2=self.Maxpool1(g1)
        g2=self.interWin2(g2)

        g3=self.Maxpool1(g2)
        g3=self.interWin3(g3)

        g4=self.Maxpool1(g3)
        g4=self.interWin4(g4)

        g5=self.Maxpool1(g4)
        g5=self.interWin5(g5)



        e1 = self.Conv1(input)


        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        e1=e1+g1
        e2=e2+g2
        e3=e3+g3
        e4=e4+g4
        e5=e5+g5


        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out


class NewU_Net1(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch=3, out_ch=1):
        super(NewU_Net1, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
        self.head = nn.Conv2d(3, filters[0], 1)

        self.encode1 = LAttnBlock(32, filters[0], filters[0])
        self.encode2 = LAttnBlock(32, filters[0], filters[1])
        self.encode3 = LAttnBlock(32, filters[1], filters[2])
        self.encode4 = LAttnBlock(32, filters[2], filters[3])
        self.encode5 = LAttnBlock(32, filters[3], filters[4])

        # self.merge1 = PatchMerging([512, 512], filters[0])
        # self.merge2 = PatchMerging([256, 256], filters[1])
        # self.merge3 = PatchMerging([128, 128], filters[2])
        # self.merge4 = PatchMerging([64, 64], filters[3])

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        x = self.head(x)
        e1 = self.encode1(x)

        # e2 = self.merge1(e1)
        e2 = F.max_pool2d(e1, 2, 2)
        e2 = self.encode2(e2)

        # e3 = self.merge2(e2)
        e3 = F.max_pool2d(e2, 2, 2)
        e3 = self.encode3(e3)

        # e4 = self.merge3(e3)
        e4 = F.max_pool2d(e3, 2, 2)
        e4 = self.encode4(e4)

        # e5 = self.merge4(e4)
        e5 = F.max_pool2d(e4, 2, 2)
        e5 = self.encode5(e5)

        # win3=self.Win3(e3)

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)
        # d5+=e4
        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        # d4+=e3
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        # d3+=e2
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        # d2+=e1
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out


class mutiScaleAttU_Net(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch=3, out_ch=2, imageH=512):
        super(mutiScaleAttU_Net, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = MSAconv_block(in_ch, filters[0])
        self.Conv2 = MSAconv_block(filters[0], filters[1])
        self.Conv3 = MSAconv_block(filters[1], filters[2])
        self.Conv4 = MSAconv_block(filters[2], filters[3])
        self.Conv5 = MSAconv_block(filters[3], filters[4])

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_conv5 = conv_block(filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

    # self.active = torch.nn.Sigmoid()

    def forward(self, x):
        e1 = self.Conv1(x)

        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out


class Recurrent_block(nn.Module):
    """
    Recurrent Block for R2Unet_CNN
    """

    def __init__(self, out_ch, t=2):
        super(Recurrent_block, self).__init__()

        self.t = t
        self.out_ch = out_ch
        self.conv = nn.Sequential(
            nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        for i in range(self.t):
            if i == 0:
                x = self.conv(x)
            out = self.conv(x + x)
        return out


class RRCNN_block(nn.Module):
    """
    Recurrent Residual Convolutional Neural Network Block
    """

    def __init__(self, in_ch, out_ch, t=2):
        super(RRCNN_block, self).__init__()

        self.RCNN = nn.Sequential(
            Recurrent_block(out_ch, t=t),
            Recurrent_block(out_ch, t=t)
        )
        self.Conv = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        x1 = self.Conv(x)
        x2 = self.RCNN(x1)
        out = x1 + x2
        return out


class R2U_Net(nn.Module):
    """
    R2U-Unet implementation
    Paper: https://arxiv.org/abs/1802.06955
    """

    def __init__(self, img_ch=3, output_ch=2, t=2):
        super(R2U_Net, self).__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Upsample = nn.Upsample(scale_factor=2)

        self.RRCNN1 = RRCNN_block(img_ch, filters[0], t=t)

        self.RRCNN2 = RRCNN_block(filters[0], filters[1], t=t)

        self.RRCNN3 = RRCNN_block(filters[1], filters[2], t=t)

        self.RRCNN4 = RRCNN_block(filters[2], filters[3], t=t)

        self.RRCNN5 = RRCNN_block(filters[3], filters[4], t=t)

        self.Up5 = up_conv(filters[4], filters[3])
        self.Up_RRCNN5 = RRCNN_block(filters[4], filters[3], t=t)

        self.Up4 = up_conv(filters[3], filters[2])
        self.Up_RRCNN4 = RRCNN_block(filters[3], filters[2], t=t)

        self.Up3 = up_conv(filters[2], filters[1])
        self.Up_RRCNN3 = RRCNN_block(filters[2], filters[1], t=t)

        self.Up2 = up_conv(filters[1], filters[0])
        self.Up_RRCNN2 = RRCNN_block(filters[1], filters[0], t=t)

        self.Conv = nn.Conv2d(filters[0], output_ch, kernel_size=1, stride=1, padding=0)

    # self.active = torch.nn.Sigmoid()

    def forward(self, x):
        e1 = self.RRCNN1(x)

        e2 = self.Maxpool(e1)
        e2 = self.RRCNN2(e2)

        e3 = self.Maxpool1(e2)
        e3 = self.RRCNN3(e3)

        e4 = self.Maxpool2(e3)
        e4 = self.RRCNN4(e4)

        e5 = self.Maxpool3(e4)
        e5 = self.RRCNN5(e5)

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)
        d5 = self.Up_RRCNN5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_RRCNN4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_RRCNN3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_RRCNN2(d2)

        out = self.Conv(d2)

        # out = self.active(out)

        return out


class Attention_block(nn.Module):
    """
    Attention Block
    """

    def __init__(self, F_g, F_l, F_int):
        super(Attention_block, self).__init__()

        self.W_g = nn.Sequential(
            nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.W_x = nn.Sequential(
            nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(F_int)
        )

        self.psi = nn.Sequential(
            nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
            nn.BatchNorm2d(1),
            nn.Sigmoid()
        )

        self.relu = nn.ReLU(inplace=True)

    def forward(self, g, x):
        g1 = self.W_g(g)
        x1 = self.W_x(x)
        psi = self.relu(g1 + x1)
        psi = self.psi(psi)
        out = x * psi
        return out


class AttU_Net(nn.Module):
    """
    Attention Unet implementation
    Paper: https://arxiv.org/abs/1804.03999
    """

    def __init__(self, img_ch=3, output_ch=2):
        super(AttU_Net, self).__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = conv_block(img_ch, filters[0])
        self.Conv2 = conv_block(filters[0], filters[1])
        self.Conv3 = conv_block(filters[1], filters[2])
        self.Conv4 = conv_block(filters[2], filters[3])
        self.Conv5 = conv_block(filters[3], filters[4])

        self.Up5 = up_conv(filters[4], filters[3])
        self.Att5 = Attention_block(F_g=filters[3], F_l=filters[3], F_int=filters[2])
        self.Up_conv5 = conv_block(filters[4], filters[3])

        self.Up4 = up_conv(filters[3], filters[2])
        self.Att4 = Attention_block(F_g=filters[2], F_l=filters[2], F_int=filters[1])
        self.Up_conv4 = conv_block(filters[3], filters[2])

        self.Up3 = up_conv(filters[2], filters[1])
        self.Att3 = Attention_block(F_g=filters[1], F_l=filters[1], F_int=filters[0])
        self.Up_conv3 = conv_block(filters[2], filters[1])

        self.Up2 = up_conv(filters[1], filters[0])
        self.Att2 = Attention_block(F_g=filters[0], F_l=filters[0], F_int=32)
        self.Up_conv2 = conv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], output_ch, kernel_size=1, stride=1, padding=0)

        # self.active = torch.nn.Sigmoid()

    def forward(self, x):
        e1 = self.Conv1(x)

        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        # print(x5.shape)
        d5 = self.Up5(e5)
        # print(d5.shape)
        x4 = self.Att5(g=d5, x=e4)
        d5 = torch.cat((x4, d5), dim=1)
        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        x3 = self.Att4(g=d4, x=e3)
        d4 = torch.cat((x3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        x2 = self.Att3(g=d3, x=e2)
        d3 = torch.cat((x2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        x1 = self.Att2(g=d2, x=e1)
        d2 = torch.cat((x1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        #  out = self.active(out)

        return out


class R2AttU_Net(nn.Module):
    """
    Residual Recuurent Block with attention Unet
    Implementation : https://github.com/LeeJunHyun/Image_Segmentation
    """

    def __init__(self, in_ch=3, out_ch=1, t=2):
        super(R2AttU_Net, self).__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.RRCNN1 = RRCNN_block(in_ch, filters[0], t=t)
        self.RRCNN2 = RRCNN_block(filters[0], filters[1], t=t)
        self.RRCNN3 = RRCNN_block(filters[1], filters[2], t=t)
        self.RRCNN4 = RRCNN_block(filters[2], filters[3], t=t)
        self.RRCNN5 = RRCNN_block(filters[3], filters[4], t=t)

        self.Up5 = up_conv(filters[4], filters[3])
        self.Att5 = Attention_block(F_g=filters[3], F_l=filters[3], F_int=filters[2])
        self.Up_RRCNN5 = RRCNN_block(filters[4], filters[3], t=t)

        self.Up4 = up_conv(filters[3], filters[2])
        self.Att4 = Attention_block(F_g=filters[2], F_l=filters[2], F_int=filters[1])
        self.Up_RRCNN4 = RRCNN_block(filters[3], filters[2], t=t)

        self.Up3 = up_conv(filters[2], filters[1])
        self.Att3 = Attention_block(F_g=filters[1], F_l=filters[1], F_int=filters[0])
        self.Up_RRCNN3 = RRCNN_block(filters[2], filters[1], t=t)

        self.Up2 = up_conv(filters[1], filters[0])
        self.Att2 = Attention_block(F_g=filters[0], F_l=filters[0], F_int=32)
        self.Up_RRCNN2 = RRCNN_block(filters[1], filters[0], t=t)

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

    # self.active = torch.nn.Sigmoid()

    def forward(self, x):
        e1 = self.RRCNN1(x)

        e2 = self.Maxpool1(e1)
        e2 = self.RRCNN2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.RRCNN3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.RRCNN4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.RRCNN5(e5)

        d5 = self.Up5(e5)
        e4 = self.Att5(g=d5, x=e4)
        d5 = torch.cat((e4, d5), dim=1)
        d5 = self.Up_RRCNN5(d5)

        d4 = self.Up4(d5)
        e3 = self.Att4(g=d4, x=e3)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_RRCNN4(d4)

        d3 = self.Up3(d4)
        e2 = self.Att3(g=d3, x=e2)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_RRCNN3(d3)

        d2 = self.Up2(d3)
        e1 = self.Att2(g=d2, x=e1)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_RRCNN2(d2)

        out = self.Conv(d2)

        #  out = self.active(out)

        return out


# For nested 3 channels are required

class conv_block_nested(nn.Module):

    def __init__(self, in_ch, mid_ch, out_ch):
        super(conv_block_nested, self).__init__()
        self.activation = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(in_ch, mid_ch, kernel_size=3, padding=1, bias=True)
        self.bn1 = nn.BatchNorm2d(mid_ch)
        self.conv2 = nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1, bias=True)
        self.bn2 = nn.BatchNorm2d(out_ch)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.activation(x)

        x = self.conv2(x)
        x = self.bn2(x)
        output = self.activation(x)

        return output


# Nested Unet

class NestedUNet(nn.Module):
    """
    Implementation of this paper:
    https://arxiv.org/pdf/1807.10165.pdf
    """

    def __init__(self, in_ch=3, out_ch=1):
        super(NestedUNet, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

        self.conv0_0 = conv_block_nested(in_ch, filters[0], filters[0])
        self.conv1_0 = conv_block_nested(filters[0], filters[1], filters[1])
        self.conv2_0 = conv_block_nested(filters[1], filters[2], filters[2])
        self.conv3_0 = conv_block_nested(filters[2], filters[3], filters[3])
        self.conv4_0 = conv_block_nested(filters[3], filters[4], filters[4])

        self.conv0_1 = conv_block_nested(filters[0] + filters[1], filters[0], filters[0])
        self.conv1_1 = conv_block_nested(filters[1] + filters[2], filters[1], filters[1])
        self.conv2_1 = conv_block_nested(filters[2] + filters[3], filters[2], filters[2])
        self.conv3_1 = conv_block_nested(filters[3] + filters[4], filters[3], filters[3])

        self.conv0_2 = conv_block_nested(filters[0] * 2 + filters[1], filters[0], filters[0])
        self.conv1_2 = conv_block_nested(filters[1] * 2 + filters[2], filters[1], filters[1])
        self.conv2_2 = conv_block_nested(filters[2] * 2 + filters[3], filters[2], filters[2])

        self.conv0_3 = conv_block_nested(filters[0] * 3 + filters[1], filters[0], filters[0])
        self.conv1_3 = conv_block_nested(filters[1] * 3 + filters[2], filters[1], filters[1])

        self.conv0_4 = conv_block_nested(filters[0] * 4 + filters[1], filters[0], filters[0])

        self.final = nn.Conv2d(filters[0], out_ch, kernel_size=1)

    def forward(self, x):
        x0_0 = self.conv0_0(x)
        x1_0 = self.conv1_0(self.pool(x0_0))
        x0_1 = self.conv0_1(torch.cat([x0_0, self.Up(x1_0)], 1))

        x2_0 = self.conv2_0(self.pool(x1_0))
        x1_1 = self.conv1_1(torch.cat([x1_0, self.Up(x2_0)], 1))
        x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.Up(x1_1)], 1))

        x3_0 = self.conv3_0(self.pool(x2_0))
        x2_1 = self.conv2_1(torch.cat([x2_0, self.Up(x3_0)], 1))
        x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.Up(x2_1)], 1))
        x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.Up(x1_2)], 1))

        x4_0 = self.conv4_0(self.pool(x3_0))
        x3_1 = self.conv3_1(torch.cat([x3_0, self.Up(x4_0)], 1))
        x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.Up(x3_1)], 1))
        x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.Up(x2_2)], 1))
        x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.Up(x1_3)], 1))

        output = self.final(x0_4)
        return output


# Dictioary Unet
# if required for getting the filters and model parameters for each step

class ConvolutionBlock(nn.Module):
    """Convolution block"""

    def __init__(self, in_filters, out_filters, kernel_size=3, batchnorm=True, last_active=F.relu):
        super(ConvolutionBlock, self).__init__()

        self.bn = batchnorm
        self.last_active = last_active
        self.c1 = nn.Conv2d(in_filters, out_filters, kernel_size, padding=1)
        self.b1 = nn.BatchNorm2d(out_filters)
        self.c2 = nn.Conv2d(out_filters, out_filters, kernel_size, padding=1)
        self.b2 = nn.BatchNorm2d(out_filters)

    def forward(self, x):
        x = self.c1(x)
        if self.bn:
            x = self.b1(x)
        x = F.relu(x)
        x = self.c2(x)
        if self.bn:
            x = self.b2(x)
        x = self.last_active(x)
        return x


class ContractiveBlock(nn.Module):
    """Deconvuling Block"""

    def __init__(self, in_filters, out_filters, conv_kern=3, pool_kern=2, dropout=0.5, batchnorm=True):
        super(ContractiveBlock, self).__init__()
        self.c1 = ConvolutionBlock(in_filters=in_filters, out_filters=out_filters, kernel_size=conv_kern,
                                   batchnorm=batchnorm)
        self.p1 = nn.MaxPool2d(kernel_size=pool_kern, ceil_mode=True)
        self.d1 = nn.Dropout2d(dropout)

    def forward(self, x):
        c = self.c1(x)
        return c, self.d1(self.p1(c))


class ExpansiveBlock(nn.Module):
    """Upconvole Block"""

    def __init__(self, in_filters1, in_filters2, out_filters, tr_kern=3, conv_kern=3, stride=2, dropout=0.5):
        super(ExpansiveBlock, self).__init__()
        self.t1 = nn.ConvTranspose2d(in_filters1, out_filters, tr_kern, stride=2, padding=1, output_padding=1)
        self.d1 = nn.Dropout(dropout)
        self.c1 = ConvolutionBlock(out_filters + in_filters2, out_filters, conv_kern)

    def forward(self, x, contractive_x):
        x_ups = self.t1(x)
        x_concat = torch.cat([x_ups, contractive_x], 1)
        x_fin = self.c1(self.d1(x_concat))
        return x_fin


class Unet_dict(nn.Module):
    """Unet which operates with filters dictionary values"""

    def __init__(self, n_labels, n_filters=32, p_dropout=0.5, batchnorm=True):
        super(Unet_dict, self).__init__()
        filters_dict = {}
        filt_pair = [3, n_filters]

        for i in range(4):
            self.add_module('contractive_' + str(i), ContractiveBlock(filt_pair[0], filt_pair[1], batchnorm=batchnorm))
            filters_dict['contractive_' + str(i)] = (filt_pair[0], filt_pair[1])
            filt_pair[0] = filt_pair[1]
            filt_pair[1] = filt_pair[1] * 2

        self.bottleneck = ConvolutionBlock(filt_pair[0], filt_pair[1], batchnorm=batchnorm)
        filters_dict['bottleneck'] = (filt_pair[0], filt_pair[1])

        for i in reversed(range(4)):
            self.add_module('expansive_' + str(i),
                            ExpansiveBlock(filt_pair[1], filters_dict['contractive_' + str(i)][1], filt_pair[0]))
            filters_dict['expansive_' + str(i)] = (filt_pair[1], filt_pair[0])
            filt_pair[1] = filt_pair[0]
            filt_pair[0] = filt_pair[0] // 2

        self.output = nn.Conv2d(filt_pair[1], n_labels, kernel_size=1)
        filters_dict['output'] = (filt_pair[1], n_labels)
        self.filters_dict = filters_dict

    # final_forward
    def forward(self, x):
        c00, c0 = self.contractive_0(x)
        c11, c1 = self.contractive_1(c0)
        c22, c2 = self.contractive_2(c1)
        c33, c3 = self.contractive_3(c2)
        bottle = self.bottleneck(c3)
        u3 = F.relu(self.expansive_3(bottle, c33))
        u2 = F.relu(self.expansive_2(u3, c22))
        u1 = F.relu(self.expansive_1(u2, c11))
        u0 = F.relu(self.expansive_0(u1, c00))
        return F.softmax(self.output(u0), dim=1)

# Need to check why this Unet is not workin properly
#
# class Convolution2(nn.Module):
#     """Convolution Block using 2 Conv2D
#     Args:
#         in_channels = Input Channels
#         out_channels = Output Channels
#         kernal_size = 3
#         activation = Relu
#         batchnorm = True
#
#     Output:
#         Sequential Relu output """
#
#     def __init__(self, in_channels, out_channels, kernal_size=3, activation='Relu', batchnorm=True):
#         super(Convolution2, self).__init__()
#
#         self.in_channels = in_channels
#         self.out_channels = out_channels
#         self.kernal_size = kernal_size
#         self.batchnorm1 = batchnorm
#
#         self.batchnorm2 = batchnorm
#         self.activation = activation
#
#         self.conv1 = nn.Conv2d(self.in_channels, self.out_channels, self.kernal_size,  padding=1, bias=True)
#         self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, self.kernal_size, padding=1, bias=True)
#
#         self.b1 = nn.BatchNorm2d(out_channels)
#         self.b2 = nn.BatchNorm2d(out_channels)
#
#         if self.activation == 'LRelu':
#             self.a1 = nn.LeakyReLU(inplace=True)
#         if self.activation == 'Relu':
#             self.a1 = nn.ReLU(inplace=True)
#
#         if self.activation == 'LRelu':
#             self.a2 = nn.LeakyReLU(inplace=True)
#         if self.activation == 'Relu':
#             self.a2 = nn.ReLU(inplace=True)
#
#     def forward(self, x):
#         x1 = self.conv1(x)
#
#         if self.batchnorm1:
#             x1 = self.b1(x1)
#
#         x1 = self.a1(x1)
#
#         x1 = self.conv2(x1)
#
#         if self.batchnorm2:
#             x1 = self.b1(x1)
#
#         x = self.a2(x1)
#
#         return x
#
#
# class UNet(nn.Module):
#     """Implementation of U-Net: Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015)
#         https://arxiv.org/abs/1505.04597
#         Args:
#             n_class = no. of classes"""
#
#     def __init__(self, n_class, dropout=0.4):
#         super(UNet, self).__init__()
#
#         in_ch = 3
#         n1 = 64
#         n2 = n1*2
#         n3 = n2*2
#         n4 = n3*2
#         n5 = n4*2
#
#         self.dconv_down1 = Convolution2(in_ch, n1)
#         self.dconv_down2 = Convolution2(n1, n2)
#         self.dconv_down3 = Convolution2(n2, n3)
#         self.dconv_down4 = Convolution2(n3, n4)
#         self.dconv_down5 = Convolution2(n4, n5)
#
#         self.maxpool1 = nn.MaxPool2d(2)
#         self.maxpool2 = nn.MaxPool2d(2)
#         self.maxpool3 = nn.MaxPool2d(2)
#         self.maxpool4 = nn.MaxPool2d(2)
#
#         self.upsample1 = nn.Upsample(scale_factor=2)#, mode='bilinear', align_corners=True)
#         self.upsample2 = nn.Upsample(scale_factor=2)#, mode='bilinear', align_corners=True)
#         self.upsample3 = nn.Upsample(scale_factor=2)#, mode='bilinear', align_corners=True)
#         self.upsample4 = nn.Upsample(scale_factor=2)#, mode='bilinear', align_corners=True)
#
#         self.dropout1 = nn.Dropout(dropout)
#         self.dropout2 = nn.Dropout(dropout)
#         self.dropout3 = nn.Dropout(dropout)
#         self.dropout4 = nn.Dropout(dropout)
#         self.dropout5 = nn.Dropout(dropout)
#         self.dropout6 = nn.Dropout(dropout)
#         self.dropout7 = nn.Dropout(dropout)
#         self.dropout8 = nn.Dropout(dropout)
#
#         self.dconv_up4 = Convolution2(n4 + n5, n4)
#         self.dconv_up3 = Convolution2(n3 + n4, n3)
#         self.dconv_up2 = Convolution2(n2 + n3, n2)
#         self.dconv_up1 = Convolution2(n1 + n2, n1)
#
#         self.conv_last = nn.Conv2d(n1, n_class, kernel_size=1, stride=1, padding=0)
#       #  self.active = torch.nn.Sigmoid()
#
#
#
#     def forward(self, x):
#         conv1 = self.dconv_down1(x)
#         x = self.maxpool1(conv1)
#        # x = self.dropout1(x)
#
#         conv2 = self.dconv_down2(x)
#         x = self.maxpool2(conv2)
#        # x = self.dropout2(x)
#
#         conv3 = self.dconv_down3(x)
#         x = self.maxpool3(conv3)
#        # x = self.dropout3(x)
#
#         conv4 = self.dconv_down4(x)
#         x = self.maxpool4(conv4)
#         #x = self.dropout4(x)
#
#         x = self.dconv_down5(x)
#
#         x = self.upsample4(x)
#         x = torch.cat((x, conv4), dim=1)
#         #x = self.dropout5(x)
#
#         x = self.dconv_up4(x)
#         x = self.upsample3(x)
#         x = torch.cat((x, conv3), dim=1)
#        # x = self.dropout6(x)
#
#         x = self.dconv_up3(x)
#         x = self.upsample2(x)
#         x = torch.cat((x, conv2), dim=1)
#         #x = self.dropout7(x)
#
#         x = self.dconv_up2(x)
#         x = self.upsample1(x)
#         x = torch.cat((x, conv1), dim=1)
#         #x = self.dropout8(x)
#
#         x = self.dconv_up1(x)
#
#         x = self.conv_last(x)
#      #   out = self.active(x)
#
#         return x
class DepthShareCBN(nn.Module):
    """
    Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super().__init__()

        self.Conv =nn.Conv2d(1, 1, kernel_size=3, stride=3, padding=1, bias=True)
        self.PointConv =nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=True)
        self.BN = nn.BatchNorm2d(out_ch)
        self.Relu= nn.ReLU(inplace=True)



    def forward(self, x):
        B,C,H,W=x.size()
        x=x.reshape(-1,1,H,W)
        x = self.Conv(x)
        x=x.reshape(B,C,H,W)
        x=self.PointConv(x)
        x=self.BN(x)
        x=self.Relu(x)
        return x
class InDepthShareCBN(nn.Module):
    """
    Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super().__init__()

        self.Conv =nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=True,groups=out_ch)
        # self.Conv =nn.DepthwiseConv2d(out_ch, out_ch, kernel_size=3, stride=3, padding=1, bias=True)
        self.PointConv =nn.Conv2d(in_ch*out_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=True)
        self.BN = nn.BatchNorm2d(out_ch)
        self.Relu= nn.ReLU(inplace=True)
        self.out_ch=out_ch


    def forward(self, x):
        B,C,H,W=x.size()
        x=x.reshape(-1,1,H,W)
        x=x.repeat(1,self.out_ch,1,1)
        x = self.Conv(x)
        x=x.reshape(B,C*(self.out_ch),H,W)
        x=self.PointConv(x)
        x=self.BN(x)
        x=self.Relu(x)
        return x

class DepthShareUp_conv(nn.Module):
    """
    Up Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2),
            DepthShareCBN(in_ch, out_ch)
        )

    def forward(self, x):
        x = self.up(x)
        return x



class DepthShareconv_block(nn.Module):
    """
    Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super().__init__()

        self.layer1 = DepthShareCBN(in_ch, out_ch)


        self.conv=conv_block(in_ch,out_ch)

        # self.layer2=swinTransformer.ViTSwinTransformerBlock([8,8],out_ch,num_heads=2,window_size=8)

    def forward(self, input):

        x = self.conv(input)
        x=self.layer1(input)+x


        return x

class DepthShareNestedUNet(nn.Module):
    """
    Implementation of this paper:
    https://arxiv.org/pdf/1807.10165.pdf
    """

    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

        self.conv0_0 = DepthShareconv_block(in_ch,  filters[0])
        self.conv1_0 = DepthShareconv_block(filters[0],  filters[1])
        self.conv2_0 = DepthShareconv_block(filters[1],  filters[2])
        self.conv3_0 = DepthShareconv_block(filters[2], filters[3])
        self.conv4_0 = DepthShareconv_block(filters[3],  filters[4])

        self.conv0_1 = DepthShareconv_block(filters[0] + filters[1],  filters[0])
        self.conv1_1 = DepthShareconv_block(filters[1] + filters[2], filters[1])
        self.conv2_1 = DepthShareconv_block(filters[2] + filters[3], filters[2])
        self.conv3_1 = DepthShareconv_block(filters[3] + filters[4], filters[3])

        self.conv0_2 = DepthShareconv_block(filters[0] * 2 + filters[1],  filters[0])
        self.conv1_2 = DepthShareconv_block(filters[1] * 2 + filters[2], filters[1])
        self.conv2_2 = DepthShareconv_block(filters[2] * 2 + filters[3],  filters[2])

        self.conv0_3 = DepthShareconv_block(filters[0] * 3 + filters[1], filters[0])
        self.conv1_3 = DepthShareconv_block(filters[1] * 3 + filters[2], filters[1])

        self.conv0_4 = DepthShareconv_block(filters[0] * 4 + filters[1],  filters[0])

        self.final = nn.Conv2d(filters[0], out_ch, kernel_size=1)

    def forward(self, x):
        x0_0 = self.conv0_0(x)
        x1_0 = self.conv1_0(self.pool(x0_0))
        x0_1 = self.conv0_1(torch.cat([x0_0, self.Up(x1_0)], 1))

        x2_0 = self.conv2_0(self.pool(x1_0))
        x1_1 = self.conv1_1(torch.cat([x1_0, self.Up(x2_0)], 1))
        x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.Up(x1_1)], 1))

        x3_0 = self.conv3_0(self.pool(x2_0))
        x2_1 = self.conv2_1(torch.cat([x2_0, self.Up(x3_0)], 1))
        x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.Up(x2_1)], 1))
        x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.Up(x1_2)], 1))

        x4_0 = self.conv4_0(self.pool(x3_0))
        x3_1 = self.conv3_1(torch.cat([x3_0, self.Up(x4_0)], 1))
        x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.Up(x3_1)], 1))
        x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.Up(x2_2)], 1))
        x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.Up(x1_3)], 1))

        output = self.final(x0_4)
        return output
class DepthShareU_Net(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 64
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = DepthShareconv_block(in_ch, filters[0])
        self.Conv2 = DepthShareconv_block(filters[0], filters[1])
        self.Conv3 = DepthShareconv_block(filters[1], filters[2])
        self.Conv4 = DepthShareconv_block(filters[2], filters[3])
        self.Conv5 = DepthShareconv_block(filters[3], filters[4])

        self.Up5 = DepthShareUp_conv(filters[4], filters[3])
        self.Up_conv5 = DepthShareconv_block(filters[4], filters[3])

        self.Up4 = DepthShareUp_conv(filters[3], filters[2])
        self.Up_conv4 = DepthShareconv_block(filters[3], filters[2])

        self.Up3 = DepthShareUp_conv(filters[2], filters[1])
        self.Up_conv3 = DepthShareconv_block(filters[2], filters[1])

        self.Up2 = DepthShareUp_conv(filters[1], filters[0])
        self.Up_conv2 = DepthShareconv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

    # self.active = torch.nn.Sigmoid()

    def forward(self, x):
        e1 = self.Conv1(x)

        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out
class InDepthShareconv_block(nn.Module):
    """
    Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super().__init__()

        self.layer1 = nn.Sequential(
            InDepthShareCBN(in_ch, out_ch),
            InDepthShareCBN(out_ch, out_ch)
        )

    def forward(self, input):


        x=self.layer1(input)


        return x
class InDepthShareUp_conv(nn.Module):
    """
    Up Convolution Block
    """

    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2),
            InDepthShareCBN(in_ch, out_ch)
        )

    def forward(self, x):
        x = self.up(x)
        return x
class InDepthShareU_Net(nn.Module):
    """
    UNet - Basic Implementation
    Paper : https://arxiv.org/abs/1505.04597
    """

    def __init__(self, in_ch=3, out_ch=1):
        super().__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.Conv1 = InDepthShareconv_block(in_ch, filters[0])
        self.Conv2 = InDepthShareconv_block(filters[0], filters[1])
        self.Conv3 = InDepthShareconv_block(filters[1], filters[2])
        self.Conv4 = InDepthShareconv_block(filters[2], filters[3])
        self.Conv5 = InDepthShareconv_block(filters[3], filters[4])

        self.Up5 = InDepthShareUp_conv(filters[4], filters[3])
        self.Up_conv5 = InDepthShareconv_block(filters[4], filters[3])

        self.Up4 = InDepthShareUp_conv(filters[3], filters[2])
        self.Up_conv4 = InDepthShareconv_block(filters[3], filters[2])

        self.Up3 = InDepthShareUp_conv(filters[2], filters[1])
        self.Up_conv3 = InDepthShareconv_block(filters[2], filters[1])

        self.Up2 = InDepthShareUp_conv(filters[1], filters[0])
        self.Up_conv2 = InDepthShareconv_block(filters[1], filters[0])

        self.Conv = nn.Conv2d(filters[0], out_ch, kernel_size=1, stride=1, padding=0)

    # self.active = torch.nn.Sigmoid()

    def forward(self, x):
        e1 = self.Conv1(x)

        e2 = self.Maxpool1(e1)
        e2 = self.Conv2(e2)

        e3 = self.Maxpool2(e2)
        e3 = self.Conv3(e3)

        e4 = self.Maxpool3(e3)
        e4 = self.Conv4(e4)

        e5 = self.Maxpool4(e4)
        e5 = self.Conv5(e5)

        d5 = self.Up5(e5)
        d5 = torch.cat((e4, d5), dim=1)

        d5 = self.Up_conv5(d5)

        d4 = self.Up4(d5)
        d4 = torch.cat((e3, d4), dim=1)
        d4 = self.Up_conv4(d4)

        d3 = self.Up3(d4)
        d3 = torch.cat((e2, d3), dim=1)
        d3 = self.Up_conv3(d3)

        d2 = self.Up2(d3)
        d2 = torch.cat((e1, d2), dim=1)
        d2 = self.Up_conv2(d2)

        out = self.Conv(d2)

        # d1 = self.active(out)

        return out