import math
from numpy import block

import torch
import torch.nn as nn

from models.cbam import CBAM
from models.sar_sam import _upsample_, _ResBlock_CBAM_


class RConv2(nn.Module):


    def __init__(self, in_channels=128, out_channels=128, **kwargs):
        super().__init__()

        self.net = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False)
        )

        if in_channels != out_channels:
            self.connect = nn.Conv2d(in_channels, out_channels, kernel_size=1,  bias=False)
        else:
            self.connect = nn.Sequential()


    def forward(self, x):
        
        out = self.net(x)
        x = self.connect(x)
        out = out + x

        return out
        

class DAB(nn.Module):


    def __init__(self, channels=128, **kwargs):
        super().__init__()

        self.block1 = RConv2(channels, channels)
        self.block2 = RConv2(2*channels, channels)

        self.reduce = nn.Conv2d(3*channels, channels, kernel_size=1, bias=False)


    def forward(self, x):

        map1 = x
        map2 = self.block1(map1)
        x = torch.cat([map1, map2], dim=1)
        map3 = self.block2(x)
        x = torch.cat([map1, map2, map3], dim=1)
        reduced = self.reduce(x)
        x = map1 + reduced

        return x


class DAB_CBAM(DAB):


    def __init__(self, channels=128, **kwargs):
        super().__init__(channels, **kwargs)

        self.conv = nn.Sequential(
            nn.Conv2d(channels, channels, kernel_size=3, 
            padding=1, bias=False)
        )

        self.Atten = nn.Sequential(
            CBAM(channels)
        )


    def forward(self, x):
        input = super().forward(x)

        ft = self.conv(input)
        ft = self.Atten(ft)

        output = ft + input

        return output


class DMSB(nn.Module):

    def __init__(self, in_channels=1, out_channels=128, **kwargs):
        super().__init__()

        self.extend = nn.Sequential(
            nn.Conv2d(
                in_channels, out_channels, kernel_size=3, padding=1, bias=False
            ), 
            nn.ReLU(True)
        )

        reduce_factor = 3
        extend_factor = 1
        dilated_out = math.ceil(out_channels // reduce_factor) * extend_factor

        self.dilated1 = nn.Conv2d(
            out_channels, dilated_out, kernel_size=3, 
            dilation=1, padding=1, bias=False
        )
        self.dilated2 = nn.Conv2d(
            out_channels, dilated_out, kernel_size=3, 
            dilation=2, padding=2, bias=False
        )
        self.dilated4 = nn.Conv2d(
            out_channels, dilated_out, kernel_size=3, 
            dilation=4, padding=4, bias=False
        )
        # self.dilated3 = nn.Conv2d(
        #     out_channels, dilated_out, kernel_size=3, 
        #     dilation=3, padding=3, bias=False
        # )

        self.fusion = nn.Sequential(
            nn.Conv2d(
                dilated_out*reduce_factor, out_channels, kernel_size=1, 
                bias=False
            )
        )

    
    def forward(self, x):

        extended = self.extend(x)

        multiscale = []
        multiscale.append(self.dilated1(extended))
        multiscale.append(self.dilated2(extended))
        # multiscale.append(self.dilated3(extended))
        multiscale.append(self.dilated4(extended))

        ms_feature = torch.cat(multiscale, dim=1)
        fusion_ft = self.fusion(ms_feature)

        fusion_ft = extended + fusion_ft

        return fusion_ft


class RDANet(nn.Module):


    def __init__(self, in_channels=1, out_channels=128, **kwargs):
        super().__init__()

        if "block" in kwargs and kwargs["block"] != DAB:
            self.basic_block = kwargs["block"]
        else:
            self.basic_block = DAB

        self.extend = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False)
        self.block1 = self.basic_block(out_channels)
        self.block2 = self.basic_block(out_channels)
        self.block3 = self.basic_block(out_channels)
        self.reduce = nn.Sequential(
            nn.Conv2d(3*out_channels, out_channels, kernel_size=1, bias=False), 
            nn.Conv2d(out_channels, 1, kernel_size=3, padding=1, bias=False)
        )

    def forward(self, x):

        img = x
        feature = self.extend(x)
        feature1 = self.block1(feature) + feature
        feature2 = self.block2(feature1) + feature1
        feature3 = self.block3(feature2) + feature2
        fusion = torch.cat([feature1, feature2, feature3], dim=1)
        noise = self.reduce(fusion)

        out = img + noise

        return out


class RDANet_DAB_CBAM(RDANet):


    def __init__(self, in_channels=1, out_channels=128, **kwargs):
        super().__init__(in_channels, out_channels, block=DAB_CBAM)


class RDANetDMSB(RDANet):


    def __init__(self, in_channels=1, out_channels=128, **kwargs):
        super().__init__(in_channels, out_channels, **kwargs)

        self.extend = DMSB(in_channels=in_channels, out_channels=out_channels)

    
    def forward(self, x):
        return super().forward(x)


class RDANetDecoder(RDANet):


    def __init__(self, in_channels=1, out_channels=128, **kwargs):
        super().__init__(in_channels, out_channels, **kwargs)

        self.down = nn.MaxPool2d(kernel_size=2, stride=2)
        self.up = _upsample_(
            scale=2, in_channels=out_channels, kernel_size=3, 
            stride=1, dilation=1, bias=False
        )

        self.atten = _ResBlock_CBAM_(
            out_channels, 3, 1, 1, False
        )
        self.fusion = nn.Sequential(
            nn.Conv2d(out_channels, in_channels, kernel_size=3, 
            padding=1, bias=False)
        )

    
    def forward(self, x):

        img = x

        feature = self.extend(x)
        down_ft = self.down(feature)
        down_ft1 = self.block1(down_ft) + down_ft
        down_ft2 = self.block2(down_ft1) + down_ft1
        down_ft3 = self.block3(down_ft2) + down_ft2

        down_ft4 = self.atten(down_ft1+down_ft2+down_ft3)
        ft = torch.cat([down_ft1, down_ft2, down_ft3, down_ft4], dim=1)
        ft = self.up(ft, feature)

        noise = self.fusion(ft)

        return noise + img


if __name__ == "__main__":

    device = torch.device("cuda:0")
    a = torch.ones(4, 1, 256, 256).to(device)
    net = RDANet().to(device)

    b = net(a)
    print(b.shape)
