from collections import OrderedDict

import torch.nn as nn
import torch.nn.functional as F
# ResNet block that uses SPADE.
# It differs from the ResNet block of pix2pixHD in that
# it takes in the segmentation map as input, learns the skip connection if necessary,
# and applies normalization first and then convolution.
# This architecture seemed like a standard architecture for unconditional or
# class-conditional GAN architecture using residual block.
# The code was inspired from https://github.com/LMescheder/GAN_stability.
from torch.nn.utils import spectral_norm
from torch.nn.utils.spectral_norm import SpectralNorm

from models.component.normlization import SPADE

class ConvBn2d(nn.Sequential):
    def __init__(self,
                 ic: int,
                 oc: int,
                 kernel_size: int,
                 stride: int = 1,
                 padding: int = 0,
                 dilation: int = 1,
                 groups: int = 1,
                 bias: bool = False,
                 ) -> None:
        super(ConvBn2d, self).__init__(OrderedDict(
            conv=nn.Conv2d(
                ic,
                oc,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                dilation=dilation,
                groups=groups,
                bias=bias,
            ),
            bn=nn.BatchNorm2d(oc),
        ))

class ConvBlock(nn.Module):
    def __init__(self, dim_in, dim_out, spec_norm=False, LR=0.01, stride=1, up=False):
        super(ConvBlock, self).__init__()

        self.up = up
        if self.up:
            self.up_smaple = nn.UpsamplingBilinear2d(scale_factor=2)
        else:
            self.up_smaple = None

        if spec_norm:
            self.main = nn.Sequential(
                SpectralNorm(nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=False)),
                nn.BatchNorm2d(dim_out, affine=True, track_running_stats=True),
                nn.LeakyReLU(LR, inplace=False),
            )

        else:
            self.main = nn.Sequential(
                nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=False),
                nn.BatchNorm2d(dim_out, affine=True, track_running_stats=True),
                nn.LeakyReLU(LR, inplace=False),
            )

    def forward(self, x1, x2=None):
        if self.up_smaple is not None:
            x1 = self.up_smaple(x1)
            # input is CHW
            diffY = x2.size()[2] - x1.size()[2]
            diffX = x2.size()[3] - x1.size()[3]

            x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                            diffY // 2, diffY - diffY // 2])
            # if you have padding issues, see
            # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
            # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
            x = torch.cat([x2, x1], dim=1)
            return self.main(x)
        else:
            return self.main(x1)

class ResBlock(nn.Module):
    def __init__(self, in_planes, planes, cardinality=1, stride=1, dilation=1):
        super(ResBlock, self).__init__()
        self.inplanes = in_planes
        self.planes = planes
        self.cardinality = cardinality
        self.stride = stride
        self.dilation = dilation
        self.learned_shortcut = (in_planes != planes)
        conv = ConvBn2d
        c_mid = min(in_planes, planes)
        self.conv0 = conv(in_planes, c_mid, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation,
                          bias=False)
        self.conv1 = conv(c_mid, planes, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation,
                          bias=False)
        # self.atv = nn.LeakyReLU(negative_slope=1e-2, inplace=True)
        self.atv = nn.ReLU(inplace=True)
        self.convs = conv(in_planes, planes, kernel_size=1, bias=False)

    def shortcut(self, x):
        if self.learned_shortcut:
            x_s = self.convs(x)
        else:
            x_s = x
        return x_s

    def forward(self, x):
        h = self.shortcut(x)
        dx = self.atv(self.conv0(x))
        dx = self.conv1(dx)
        return dx + h

class SPADEResnetBlock(nn.Module):
    def __init__(self, fin, fout, label_nc, norm_type='IN', dilation=1):
        super(SPADEResnetBlock, self).__init__()
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)
        self.pad_type = 'nozero'

        if self.pad_type != 'zero':
            self.pad = nn.ReflectionPad2d(dilation)
            self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=(3, 3), padding=0, dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=(3, 3), padding=0, dilation=dilation)
        else:
            self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=(3, 3), padding=dilation, dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=(3, 3), padding=dilation, dilation=dilation)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=(1, 1), bias=False)

        # use spectral norm
        else:
            self.conv_0 = spectral_norm(self.conv_0)
            self.conv_1 = spectral_norm(self.conv_1)
            if self.learned_shortcut:
                self.conv_s = spectral_norm(self.conv_s)

        self.norm_0 = SPADE(fin, label_nc, normtype=norm_type)
        self.norm_1 = SPADE(fmiddle, label_nc, normtype=norm_type)
        if self.learned_shortcut:
            self.norm_s = SPADE(fin, label_nc, normtype=norm_type)


    def shortcut(self, x, seg):
        if self.learned_shortcut:
            x_s = self.conv_s(self.norm_s(x, seg))
        else:
            x_s = x
        return x_s

    def actvn(self, x):
        return F.leaky_relu(x, 2e-1)

    def forward(self, x ,seg):
        x_s = self.shortcut(x, seg)
        if self.pad_type != 'zero':
            dx = self.conv_0(self.pad(self.actvn(self.norm_0(x, seg))))
            dx = self.conv_1(self.pad(self.actvn(self.norm_1(dx, seg))))
        else:
            dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
            dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
            if self.use_se:
                dx = self.se_layar(dx)
        out = x_s + dx
        return out

class ResidualBlock(nn.Module):

    def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1):
        super(ResidualBlock, self).__init__()
        self.padding1 = nn.ReflectionPad2d(padding)
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
        self.bn1 = nn.InstanceNorm2d(out_channels)
        self.prelu = nn.PReLU()
        self.padding2 = nn.ReflectionPad2d(padding)
        self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
        self.bn2 = nn.InstanceNorm2d(out_channels)

    def forward(self, x):
        residual = x
        out = self.padding1(x)
        out = self.conv1(out)
        out = self.bn1(out)
        out = self.prelu(out)
        out = self.padding2(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out += residual
        out = self.prelu(out)
        return out

# ResNet block used in pix2pixHD
class ResnetBlock(nn.Module):
    def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3):
        super().__init__()

        pw = (kernel_size - 1) // 2
        self.conv_block = nn.Sequential(
            nn.ReflectionPad2d(pw),
            norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
            activation,
            nn.ReflectionPad2d(pw),
            norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size))
        )

    def forward(self, x):
        y = self.conv_block(x)
        out = x + y
        return out