from collections import OrderedDict

import torch.nn as nn
import torch.nn.functional as F
# ResNet block that uses SPADE.
# It differs from the ResNet block of pix2pixHD in that
# it takes in the segmentation map as input, learns the skip connection if necessary,
# and applies normalization first and then convolution.
# This architecture seemed like a standard architecture for unconditional or
# class-conditional GAN architecture using residual block.
# The code was inspired from https://github.com/LMescheder/GAN_stability.
from torch.nn import LeakyReLU, ReLU, Conv2d
from torch.nn.utils import spectral_norm
from torch.nn.utils.spectral_norm import SpectralNorm
import torch
from models.component.normlization import SPADE
from utils.util import conv2d_get_padding
from torch.nn.modules.utils import _pair

class ConvBn2d(nn.Sequential):
    def __init__(self,
                 ic: int,
                 oc: int,
                 kernel_size: int,
                 stride: int = 1,
                 padding: int = 0,
                 dilation: int = 1,
                 groups: int = 1,
                 bias: bool = False,
                 ) -> None:
        super(ConvBn2d, self).__init__(OrderedDict(
            conv=nn.Conv2d(
                ic,
                oc,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                dilation=dilation,
                groups=groups,
                bias=bias,
            ),
            bn=nn.BatchNorm2d(oc),
        ))

class ConvBlock(nn.Module):
    def __init__(self, dim_in, dim_out, spec_norm=False, LR=0.01, stride=1, up=False):
        super(ConvBlock, self).__init__()

        self.up = up
        if self.up:
            self.up_smaple = nn.UpsamplingBilinear2d(scale_factor=2)
        else:
            self.up_smaple = None

        if spec_norm:
            self.main = nn.Sequential(
                SpectralNorm(nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=False)),
                nn.BatchNorm2d(dim_out, affine=True, track_running_stats=True),
                nn.LeakyReLU(LR, inplace=False),
            )

        else:
            self.main = nn.Sequential(
                nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=stride, padding=1, bias=False),
                nn.BatchNorm2d(dim_out, affine=True, track_running_stats=True),
                nn.LeakyReLU(LR, inplace=False),
            )

    def forward(self, x1, x2=None):
        if self.up_smaple is not None:
            x1 = self.up_smaple(x1)
            # input is CHW
            diffY = x2.size()[2] - x1.size()[2]
            diffX = x2.size()[3] - x1.size()[3]

            x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                            diffY // 2, diffY - diffY // 2])
            # if you have padding issues, see
            # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
            # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
            x = torch.cat([x2, x1], dim=1)
            return self.main(x)
        else:
            return self.main(x1)

class ResBlock(nn.Module):
    def __init__(self, in_planes, planes, cardinality=1, stride=1, dilation=1):
        super(ResBlock, self).__init__()
        self.inplanes = in_planes
        self.planes = planes
        self.cardinality = cardinality
        self.stride = stride
        self.dilation = dilation
        self.learned_shortcut = (in_planes != planes)
        conv = ConvBn2d
        c_mid = min(in_planes, planes)
        self.conv0 = conv(in_planes, c_mid, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation,
                          bias=False)
        self.conv1 = conv(c_mid, planes, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation,
                          bias=False)
        # self.atv = nn.LeakyReLU(negative_slope=1e-2, inplace=True)
        self.atv = nn.ReLU(inplace=True)
        self.convs = conv(in_planes, planes, kernel_size=1, bias=False)

    def shortcut(self, x):
        if self.learned_shortcut:
            x_s = self.convs(x)
        else:
            x_s = x
        return x_s

    def forward(self, x):
        h = self.shortcut(x)
        dx = self.atv(self.conv0(x))
        dx = self.conv1(dx)
        return dx + h

class SPADEResnetBlock(nn.Module):
    def __init__(self, fin, fout, label_nc, norm_type='IN', dilation=1):
        super(SPADEResnetBlock, self).__init__()
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)
        self.pad_type = 'nozero'

        if self.pad_type != 'zero':
            self.pad = nn.ReflectionPad2d(dilation)
            self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=(3, 3), padding=0, dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=(3, 3), padding=0, dilation=dilation)
        else:
            self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=(3, 3), padding=dilation, dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=(3, 3), padding=dilation, dilation=dilation)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=(1, 1), bias=False)

        # use spectral norm
        else:
            self.conv_0 = spectral_norm(self.conv_0)
            self.conv_1 = spectral_norm(self.conv_1)
            if self.learned_shortcut:
                self.conv_s = spectral_norm(self.conv_s)

        self.norm_0 = SPADE(fin, label_nc, normtype=norm_type)
        self.norm_1 = SPADE(fmiddle, label_nc, normtype=norm_type)
        if self.learned_shortcut:
            self.norm_s = SPADE(fin, label_nc, normtype=norm_type)


    def shortcut(self, x, seg):
        if self.learned_shortcut:
            x_s = self.conv_s(self.norm_s(x, seg))
        else:
            x_s = x
        return x_s

    def actvn(self, x):
        return F.leaky_relu(x, 2e-1)

    def forward(self, x ,seg):
        x_s = self.shortcut(x, seg)
        if self.pad_type != 'zero':
            dx = self.conv_0(self.pad(self.actvn(self.norm_0(x, seg))))
            dx = self.conv_1(self.pad(self.actvn(self.norm_1(dx, seg))))
        else:
            dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
            dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
            if self.use_se:
                dx = self.se_layar(dx)
        out = x_s + dx
        return out

class ResidualBlock(nn.Module):

    def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1):
        super(ResidualBlock, self).__init__()
        self.padding1 = nn.ReflectionPad2d(padding)
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
        self.bn1 = nn.InstanceNorm2d(out_channels)
        self.prelu = nn.PReLU()
        self.padding2 = nn.ReflectionPad2d(padding)
        self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=0, stride=stride)
        self.bn2 = nn.InstanceNorm2d(out_channels)

    def forward(self, x):
        residual = x
        out = self.padding1(x)
        out = self.conv1(out)
        out = self.bn1(out)
        out = self.prelu(out)
        out = self.padding2(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out += residual
        out = self.prelu(out)
        return out

class rSoftMax(nn.Module):
    def __init__(self, radix, cardinality):
        super().__init__()
        self.radix = radix
        self.cardinality = cardinality

    def forward(self, x):
        batch = x.size(0)
        if self.radix > 1:
            x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
            x = F.softmax(x, dim=1)
            x = x.reshape(batch, -1)
        else:
            x = torch.sigmoid(x)
        return x

class SpFieldConv2d(nn.Module):
    def __init__(self, in_channels, channels, kernel_sizes=[3,5], stride=[1,1], dilation=1,
                 groups=1, radix=1, bias=True, reduction_factor=4, norm_layer=None, IMAGE_SIZE=256, **kwargs):
        super(SpFieldConv2d, self).__init__()
        # padding = _pair(padding)
        inter_channels = max(in_channels * radix // reduction_factor, 32)
        self.radix = radix
        self.cardinality = groups
        self.channels = channels
        padding = [
            conv2d_get_padding(IMAGE_SIZE, IMAGE_SIZE, kernel_size=kernel_sizes[i], stride=stride[i], dilation=dilation)
            for i in range(len(kernel_sizes))]
        self.convs = nn.ModuleList([Conv2d(in_channels, channels * radix, _pair(kernel_sizes[i]), _pair(stride[i]),
                                           padding=padding[i], dilation=_pair(dilation), groups=groups * radix,
                                           bias=bias,padding_mode='replicate',**kwargs) for i in range(len(kernel_sizes))])

        self.use_norm = norm_layer is not None
        self.bn0 = norm_layer(channels * radix)
        self.bn1 = norm_layer(inter_channels)
        self.activate = LeakyReLU(inplace=True)
        self.relu = ReLU(inplace=True)
        self.fc1 = Conv2d(channels, inter_channels, _pair(1), groups=self.cardinality)
        self.fc2 = Conv2d(inter_channels, channels * radix, _pair(1), groups=self.cardinality)
        self.rsoftmax = rSoftMax(radix, groups)

    def forward(self, x):
        feature_map_list = []
        for c in self.convs:
            f = c(x)
            if self.use_norm:
                f = self.bn0(f)
            f = self.activate(f)
            feature_map_list.append(f)
        U = sum(feature_map_list)
        batch, rchannel = U.shape[:2]
        if self.radix > 1:
            if torch.__version__ < '1.5':
                splited = torch.split(U, int(rchannel // self.radix), dim=1)
            else:
                splited = torch.split(U, rchannel // self.radix, dim=1)
            gap = sum(splited)
        else:
            gap = U
        gap = F.adaptive_avg_pool2d(gap, 1)
        gap = self.fc1(gap)

        if self.use_norm:
            gap = self.bn1(gap)
        gap = self.relu(gap)

        atten = self.fc2(gap)
        atten = self.rsoftmax(atten).view(batch, -1, 1, 1)

        if self.radix > 1:
            if torch.__version__ < '1.5':
                attens = torch.split(atten, int(rchannel // self.radix), dim=1)
            else:
                attens = torch.split(atten, rchannel // self.radix, dim=1)
            out = sum([att * split for (att, split) in zip(attens, splited)])
        else:
            out = atten * U
        return out.contiguous()

# ResNet block used in pix2pixHD
class ResnetBlock(nn.Module):
    def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3):
        super().__init__()

        pw = (kernel_size - 1) // 2
        self.conv_block = nn.Sequential(
            nn.ReflectionPad2d(pw),
            norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
            activation,
            nn.ReflectionPad2d(pw),
            norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size))
        )

    def forward(self, x):
        y = self.conv_block(x)
        out = x + y
        return out

class ResNetSFtBottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes: int, planes: int, stride=1, radix=1, cardinality=1,
                 bottleneck_width=64, dilation=1,norm_layer=None):
        super(ResNetSFtBottleneck, self).__init__()
        group_width = int(planes * (bottleneck_width / 64.)) * cardinality
        self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=_pair(1), bias=False)
        self.bn1 = norm_layer(group_width)
        self.radix = radix
        if radix >= 1:
            self.conv2 = SpFieldConv2d(
                group_width, group_width, groups=cardinality, bias=True, dilation=dilation,
                kernel_sizes=[3, 5], stride=[1, 1],
                radix=radix, norm_layer=norm_layer)
        else:
            self.conv2 = nn.Conv2d(
                group_width, group_width, kernel_size=_pair(3), stride=_pair(stride),
                padding=dilation, dilation=_pair(dilation),
                groups=cardinality, bias=True)
            self.bn2 = norm_layer(group_width)

        self.conv3 = nn.Conv2d(
            group_width, planes, kernel_size=_pair(1), bias=True)
        self.bn3 = norm_layer(planes)

        self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
        if planes != inplanes:
            self.downsample = nn.Sequential(
                nn.Conv2d(inplanes, planes, bias=True, kernel_size=_pair(1), stride=_pair(1)),
                nn.BatchNorm2d(planes)
            )
        else:
            self.downsample = None

        self.dilation = dilation
        self.stride = stride

    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        if self.radix == 0:
            out = self.bn2(out)
            out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out

if __name__ == '__main__':
    bs = 2
    input = torch.randn((bs, 4, 256, 256)).cuda()
    # scon = SplAtConv2d(4,128,3,padding=3,groups=2,radix=2,dilation=3,norm_layer=nn.InstanceNorm2d)
    scon = ResNetSFtBottleneck(4,128,1,4,4,64,1,nn.BatchNorm2d).cuda()
    print(scon(input).shape)