import functools

import torch
import torch.nn as nn


# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck


class UnetGenerator(nn.Module):
    def __init__(self, input_nc, output_nc, num_downs=5, ngf=64, norm_layer=None, use_dropout=False, output_function=nn.Sigmoid):
        super(UnetGenerator, self).__init__()
        # construct unet structure
        unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
        for _ in range(num_downs - 5):
            unet_block = UnetSkipConnectionBlock(
                ngf * 8,
                ngf * 8,
                input_nc=None,
                submodule=unet_block,
                norm_layer=norm_layer,
                use_dropout=use_dropout,
            )
        unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
        unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
        unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
        unet_block = UnetSkipConnectionBlock(
            output_nc,
            ngf,
            input_nc=input_nc,
            submodule=unet_block,
            outermost=True,
            norm_layer=norm_layer,
            output_function=output_function,
        )

        self.model = unet_block
        self.tanh = output_function == nn.Tanh
        self.factor = 10 / 255 if self.tanh else 1.0

    def forward(self, input):
        return self.factor * self.model(input)


# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
#   |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
    def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=None, use_dropout=False, output_function=nn.Sigmoid):
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d
        if norm_layer is None:
            use_bias = True
        if input_nc is None:
            input_nc = outer_nc
        downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
        downrelu = nn.LeakyReLU(0.2, True)
        uprelu = nn.ReLU(True)
        if norm_layer != None:
            downnorm = norm_layer(inner_nc)
            upnorm = norm_layer(outer_nc)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
            down = [downconv]
            if output_function == nn.Tanh:
                up = [uprelu, upconv, nn.Tanh()]
            else:
                up = [uprelu, upconv, nn.Sigmoid()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
            down = [downrelu, downconv]
            up = [uprelu, upconv] if norm_layer is None else [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
            if norm_layer is None:
                down = [downrelu, downconv]
                up = [uprelu, upconv]
            else:
                down = [downrelu, downconv, downnorm]
                up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)

    def forward(self, x):
        return self.model(x) if self.outermost else torch.cat([x, self.model(x)], 1)


if __name__ == "__main__":
    from torchinfo import summary

    model = UnetGenerator(12, 3, 5, 64, norm_layer=nn.BatchNorm2d)

    batch_size = 8
    # print(model)
    summary(model, input_size=(batch_size, 12, 128, 128))
    for name in model.state_dict():
        print(name)
    # for name, parameters in model.named_parameters():
    #     print(name, ':', parameters)

"""
==================================================================================================================================
Layer (type:depth-idx)                                                           Output Shape              Param #
==================================================================================================================================
UnetGenerator                                                                    --                        --
├─UnetSkipConnectionBlock: 1-1                                                   [8, 3, 128, 128]          --
│    └─Sequential: 2-1                                                           [8, 3, 128, 128]          --
│    │    └─Conv2d: 3-1                                                          [8, 64, 64, 64]           12,288
│    │    └─UnetSkipConnectionBlock: 3-2                                         [8, 128, 64, 64]          16,649,856
│    │    └─ReLU: 3-3                                                            [8, 128, 64, 64]          --
│    │    └─ConvTranspose2d: 3-4                                                 [8, 3, 128, 128]          6,147
│    │    └─Sigmoid: 3-5                                                         [8, 3, 128, 128]          --
==================================================================================================================================
Total params: 16,668,291
Trainable params: 16,668,291
Non-trainable params: 0
Total mult-adds (G): 32.88
==================================================================================================================================
Input size (MB): 6.29
Forward/backward pass size (MB): 112.72
Params size (MB): 66.67
Estimated Total Size (MB): 185.69
==================================================================================================================================
"""
