import torch
import torch.nn as nn
from FFC import FFC_BN_ACT, FFCResnetBlock

'''
generator:
  kind: ffc_resnet
  input_nc: 4
  output_nc: 3
  ngf: 64
  n_downsampling: 3
  n_blocks: 18
  add_out_act: sigmoid
  init_conv_kwargs:
    ratio_gin: 0
    ratio_gout: 0
    enable_lfu: false
  downsample_conv_kwargs:
    ratio_gin: ${generator.init_conv_kwargs.ratio_gout}
    ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin}
    enable_lfu: false
  resnet_conv_kwargs:
    ratio_gin: 0.75
    ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin}
    enable_lfu: false
'''

# class ResNetBlock(nn.Module):
#     def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1):
#         super(ResNetBlock, self).__init__()
#         self.net = nn.Sequential(
#             nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=False),
#             nn.InstanceNorm2d(out_channels, affine=True),
#             nn.SiLU(inplace=True),
#             nn.Conv2d(out_channels, out_channels, 3, 1, 1),
#             nn.InstanceNorm2d(out_channels, affine=True)
#         )
#         self.identity = nn.Sequential(
#             nn.Conv2d(in_channels+out_channels, out_channels, 1, 1, 0, bias=False),
#             nn.SiLU(inplace=True)
#         )
#     def forward(self, x):
#         out = self.net(x)
#         out = self.identity(torch.cat([out, x], dim=1))
#         return out

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()

        self.init_layer = FFC_BN_ACT(4, 64, 7, stride=1, padding=3, enable_lfu=False)
        self.downsample = nn.Sequential(
            FFC_BN_ACT(64, 128, 3, stride=2, padding=1, enable_lfu=False),
            FFC_BN_ACT(128, 256, 3, stride=2, padding=1, enable_lfu=False),
            FFC_BN_ACT(256, 512, 3, stride=2, padding=1, enable_lfu=False),
        )
        self.bottleneck = nn.Sequential(
            *[FFCResnetBlock(512, ratio_gin=0.75, ratio_gout=0.75, enable_lfu=False) for _ in range(18)]
        )

        self.upsample = nn.Sequential(
            nn.Upsample(scale_factor=2, mode="bilinear"),
            FFC_BN_ACT(512, 256, 3, stride=1, padding=1, enable_lfu=False),
            nn.Upsample(scale_factor=2, mode="bilinear"),
            FFC_BN_ACT(256, 128, 3, stride=1, padding=1, enable_lfu=False),
            nn.Upsample(scale_factor=2, mode="bilinear"),
            FFC_BN_ACT(128, 64, 3, stride=1, padding=1, enable_lfu=False),
        )

        self.final_layer = nn.Sequential(
            nn.ReflectionPad2d(3),
            nn.Conv2d(64, 3, 7, 1, 0),
            nn.Sigmoid()
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
                if m.bias is not None:
                    m.bias.data.zero_()
    
    def Padding(self, small_tensor, big_tensor):
        b, c, ph, pw = small_tensor.shape
        h, w = big_tensor.shape[2:]
        padding_tensor = torch.zeros(b, c, h, w, device=big_tensor.device)
        padding_tensor[:, :, :ph, :pw] = small_tensor.clone()
        return padding_tensor
    
    def forward(self, x:torch.Tensor, mask):
        cat_x = torch.cat([x, mask], dim=1)

        ini = self.init_layer(cat_x)
        down_x = self.downsample(ini)
        emb = self.bottleneck(down_x)
        up_x = self.upsample(emb)
        out = self.final_layer(up_x)

        x = self.Padding(x, out)
        mask = self.Padding(mask, out)
        return out*mask + x

class GAN(nn.Module):
    def __init__(self):
        super(GAN, self).__init__()
        self.global_net = nn.Sequential(
            self.conv_block(3, 64, 5, 2, 1),
            self.conv_block(64, 128, 5, 2, 1),
            self.conv_block(128, 256, 5, 2, 1),
            self.conv_block(256, 512, 5, 2, 1),
            self.conv_block(512, 512, 5, 2, 1),
            self.conv_block(512, 512, 5, 2, 1),
            nn.AdaptiveAvgPool2d((1, 1))
        )
        self.global_fc = nn.Sequential(
            nn.Linear(512, 1024),
            nn.LeakyReLU(0.2, inplace=True)
        )

        self.local_net = nn.Sequential(
            self.conv_block(3, 64, 5, 2, 1),
            self.conv_block(64, 128, 5, 2, 1),
            self.conv_block(128, 256, 5, 2, 1),
            self.conv_block(256, 512, 5, 2, 1),
            self.conv_block(512, 512, 5, 2, 1),
            self.conv_block(512, 512, 5, 2, 1),
            nn.AdaptiveAvgPool2d((1, 1))
        )
        self.local_fc = nn.Sequential(
            nn.Linear(512, 1024),
            nn.LeakyReLU(0.2, inplace=True)
        )

        self.final_layer = nn.Sequential(
            nn.Linear(2048, 1),
            # nn.Sigmoid()
        )
    
    def conv_block(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=False),
            # nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(0.2, inplace=True)
        )
    
    def forward(self, global_X, mask):
        local_X = global_X * mask
        global_Y = self.global_net(global_X)
        global_Y = self.global_fc(torch.squeeze(global_Y))
        local_Y = self.local_net(local_X)
        local_Y = self.local_fc(torch.squeeze(local_Y))
        return self.final_layer(torch.cat([global_Y, local_Y], dim=1))

if __name__ == '__main__':
    model = MyModel()
    # gan = GAN()
    inp = torch.rand(2, 3, 256, 256)
    mask = mask = torch.zeros([2, 1, inp.shape[2], inp.shape[3]])
    out = model(inp, mask)
    # out2 = gan(out, out[:, :, :320, :320])
    print(out.shape)
    # print(out2.shape)