import torch
import torch.nn as nn
import torch.nn.functional as F

from models.sar_sam import _residual_channel_attention_block_

class MultiEncoder(nn.Module):


    def __init__(self, in_channels=1, out_channels=128):
        super().__init__()

        bias = True

        self.extend = nn.Conv2d(
            in_channels = in_channels,
            out_channels = out_channels,
            kernel_size = 3,
            stride = 1,
            padding = 1,
            dilation = 1,
            bias = bias
        )

        self.RDMSB = RDMSB(out_channels)

        self.block1 = REncoder(out_channels)
        self.block2 = REncoder(out_channels)
        self.block3 = REncoder(out_channels)

        self.conv_out = nn.Conv2d(
            in_channels = out_channels,
            out_channels = in_channels,
            kernel_size = 3,
            stride = 1,
            padding = 1,
            dilation = 1,
            bias = bias
        )


    def forward(self, x):
        
        in_x = x
        x = self.extend(x)

        context0 = self.RDMSB(x)
        context1 = self.block1(context0)
        context2 = self.block2(context1)
        context3 = self.block3(context2)
        noise = self.conv_out(context3)

        out = in_x + noise

        return out, -noise


class RDMSB(nn.Module):


    def __init__(self, channels):
        super().__init__()

        bias = True

        self.down = nn.MaxPool2d(kernel_size = 2, stride = 2)
        self.conv_in = nn.Conv2d(
            in_channels = channels,
            out_channels = channels,
            kernel_size = 3,
            stride = 1,
            padding = 1,
            dilation = 1,
            bias = bias
        )

        self.dilated1 = _residual_channel_attention_block_(channels, 3, 1, 1, bias)
        self.dilated2 = _residual_channel_attention_block_(channels, 3, 1, 2, bias)
        self.dilated3 = _residual_channel_attention_block_(channels, 3, 1, 3, bias)
        self.dilated4 = _residual_channel_attention_block_(channels, 3, 1, 4, bias)
        
        self.up = nn.PixelShuffle(2)
        self.fusion = nn.Sequential(
            nn.Conv2d(
                in_channels = channels,
                out_channels = channels,
                kernel_size = 3,
                stride = 1,
                padding = 1,
                dilation = 1,
                bias = bias
            ), 
            nn.LeakyReLU(0.2, True)
        )
        self.reduce = nn.Conv2d(
            in_channels = 2*channels,
            out_channels = channels,
            kernel_size = 3,
            stride = 1,
            padding = 1,
            dilation = 1,
            bias = bias
        )


    def forward(self, x):
        
        down_x = self.down(x)
        down_x = self.conv_in(down_x)

        ft = []
        ft.append(self.dilated1(down_x))
        ft.append(self.dilated2(down_x))
        ft.append(self.dilated3(down_x))
        ft.append(self.dilated4(down_x))
        feature = torch.cat(ft, dim=1)

        up_x = self.up(feature)
        up_x = self.fusion(up_x)
        up_x = self.reduce(
            torch.cat([up_x, x], dim=1)
        )

        out = x + up_x

        return out


class REncoder(nn.Module):


    def __init__(self, channels):
        super().__init__()

        bias = True

        self.down = nn.MaxPool2d(kernel_size = 2, stride = 2)

        self.b1 = _residual_channel_attention_block_(channels, 3, 1, 1, bias)
        self.b2 = _residual_channel_attention_block_(channels, 3, 1, 1, bias)
        self.b3 = _residual_channel_attention_block_(channels, 3, 1, 1, bias)
        self.b4 = nn.Conv2d(
            in_channels = channels,
            out_channels = channels,
            kernel_size = 3,
            stride = 1,
            padding = 1,
            dilation = 1,
            bias = bias
        )
        
        self.up = nn.PixelShuffle(2)
        self.fusion = nn.Sequential(
            nn.Conv2d(
                in_channels = channels,
                out_channels = channels,
                kernel_size = 3,
                stride = 1,
                padding = 1,
                dilation = 1,
                bias = bias
            ), 
            nn.LeakyReLU(0.2, True)
        )

        self.reduce = nn.Conv2d(
            in_channels = 2*channels,
            out_channels = channels,
            kernel_size = 3,
            stride = 1,
            padding = 1,
            dilation = 1,
            bias = bias
        )


    def forward(self, x):

        down_x = self.down(x)

        ft1 = self.b1(down_x)
        ft2 = self.b2(ft1)
        ft3 = self.b3(ft2)
        ft4 = self.b4(ft3)

        ft = torch.cat([ft1, ft2, ft3, ft4], dim=1)
        up_x = self.up(ft)
        fusion_x = self.fusion(up_x)

        cat_x = torch.cat([fusion_x, x], dim=1)
        out = self.reduce(cat_x)

        return out


if __name__ == "__main__":

    in_tensor = torch.ones(4, 1, 256, 256)

    ME = MultiEncoder()
    out = ME(in_tensor)

    print(out.shape)

