from collections import OrderedDict

import torch
import torch.nn.utils.spectral_norm as SpectralNorm
import torch.nn as nn
import torch.nn.functional as F
# from torch.nn.utils.parametrizations import spectral_norm


class ConvBn2d(nn.Sequential):
    def __init__(self,
                 ic: int,
                 oc: int,
                 kernel_size: int,
                 stride: int = 1,
                 padding: int = 0,
                 dilation: int = 1,
                 groups: int = 1,
                 bias: bool = False,
                 ) -> None:
        super(ConvBn2d, self).__init__(OrderedDict(
            conv=nn.Conv2d(
                ic,
                oc,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                dilation=dilation,
                groups=groups,
                bias=bias,
            ),
            bn=nn.BatchNorm2d(oc),
        ))

class ResBlock(nn.Module):
    def __init__(self, in_planes, planes, cardinality=1, stride=1, dilation=1):
        super(ResBlock, self).__init__()
        self.inplanes = in_planes
        self.planes = planes
        self.cardinality = cardinality
        self.stride = stride
        self.dilation = dilation
        self.learned_shortcut = (in_planes != planes)
        conv = ConvBn2d
        c_mid = min(in_planes, planes)
        self.conv0 = conv(in_planes, c_mid, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation, bias=False)
        self.conv1 = conv(c_mid, planes, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation, bias=False)
        # self.atv = nn.LeakyReLU(negative_slope=1e-2, inplace=True)
        self.atv = nn.ReLU(inplace=True)
        self.convs = conv(in_planes,planes,kernel_size=1,bias=False)


    def shortcut(self, x):
        if self.learned_shortcut:
            x_s = self.convs(x)
        else:
            x_s = x
        return x_s

    def forward(self, x):
        h = self.shortcut(x)
        dx = self.atv(self.conv0(x))
        dx = self.conv1(dx)
        return dx + h


class ResNetBlockWithSN(nn.Module):
    def __init__(self, dim_in, dim_out, spec_norm=True, LR=0.01, stride=1, up=False):
        self.conv0 = ConvBlock(dim_in,dim_out,spec_norm,LR,stride,False)
        self.conv1 = ConvBlock(dim_out,dim_out,spec_norm,LR,stride,up)
        self.learned_shortcut = (dim_in != dim_out)
        if self.learned_shortcut:
            self.convs = nn.Conv2d(dim_in, dim_out, kernel_size=(1,1), bias=False)

    def shortcut(self, x):
        if self.learned_shortcut:
            x_s = self.convs()
        else:
            x_s = x
        return x_s

    def forward(self, x):
        h = self.shortcut(x)
        dx = self.conv0(x)
        dx = self.conv1(dx)
        return dx + h


class ConvBlock(nn.Module):
    def __init__(self, dim_in, dim_out, spec_norm=False, LR=0.01,ks=3,pd=1, stride=1, up=False):
        super(ConvBlock, self).__init__()

        self.up = up
        if self.up:
            self.up_smaple = nn.UpsamplingBilinear2d(scale_factor=2)
        else:
            self.up_smaple = None

        if spec_norm:
            self.main = nn.Sequential(
                SpectralNorm(nn.Conv2d(dim_in, dim_out, kernel_size=ks, stride=stride, padding=pd, bias=False)),
                # nn.BatchNorm2d(dim_out, affine=True, track_running_stats=True),
                nn.LeakyReLU(LR, inplace=False),
            )

        else:
            self.main = nn.Sequential(
                nn.Conv2d(dim_in, dim_out, kernel_size=ks, stride=stride, padding=pd, bias=False),
                nn.BatchNorm2d(dim_out, affine=True, track_running_stats=True),
                nn.LeakyReLU(LR, inplace=False),
            )


    def forward(self, x1, x2=None):
        if self.up_smaple is not None:
            x1 = self.up_smaple(x1)
            # input is CHW
            diffY = x2.size()[2] - x1.size()[2]
            diffX = x2.size()[3] - x1.size()[3]

            x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                            diffY // 2, diffY - diffY // 2])
            # if you have padding issues, see
            # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
            # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
            x = torch.cat([x2, x1], dim=1)
            return self.main(x)
        else:
            return self.main(x1)

class Self_Attn(nn.Module):
    """ Self attention Layer"""

    def __init__(self, in_dim, activation):
        super(Self_Attn, self).__init__()
        self.chanel_in = in_dim
        self.activation = activation

        self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.gamma = nn.Parameter(torch.zeros(1))

        self.softmax = nn.Softmax(dim=-1)  #

    def forward(self, x):
        """
            inputs :
                x : input feature maps( B X C X W X H)
            returns :
                out : self attention value + input feature
                attention: B X N X N (N is Width*Height)
        """
        m_batchsize, C, width, height = x.size()
        proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1)  # B X CX(N)
        proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)  # B X C x (*W*H)
        energy = torch.bmm(proj_query, proj_key)  # transpose check
        attention = self.softmax(energy)  # BX (N) X (N)
        proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)  # B X C X N

        out = torch.bmm(proj_value, attention.permute(0, 2, 1))
        out = out.view(m_batchsize, C, width, height)

        out = self.gamma * out + x
        return out, attention

# |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE
# |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE
#  content_features的shape = norm_nc(output shape)  label_nc ->segmap shape
class SPADE(nn.Module):
    def __init__(self, norm_nc, label_nc):
        super(SPADE, self).__init__()
        # self.conv128 = spectral_norm(
        #     nn.Sequential(nn.Conv2d(in_channels=inchanel, out_channels=128, kernel_size=3, stride=1, padding=1),
        #                   nn.ReLU()))
        # self.conv_scal = spectral_norm(nn.Conv2d(128, k, kernel_size=3, stride=1, padding=1))
        # self.conv_bias = spectral_norm(nn.Conv2d(128, k, kernel_size=3, stride=1, padding=1))
        n_hidden = 128
        ks = 3
        pw = ks // 2
        self.nomorlized = nn.InstanceNorm2d(norm_nc, affine=False)
        self.mlp_shared = nn.Sequential(
            nn.Conv2d(in_channels=label_nc, out_channels=n_hidden, kernel_size=ks, padding=pw),
            nn.ReLU())
        self.mlp_gamma = nn.Conv2d(in_channels=n_hidden, out_channels=norm_nc, kernel_size=ks, padding=pw)
        self.mlp_beta = nn.Conv2d(in_channels=n_hidden, out_channels=norm_nc, kernel_size=ks, padding=pw)

    def forward(self, content_features, condition_features):
        nomorlized = self.nomorlized(content_features)
        segmap = F.interpolate(condition_features, size=content_features.size()[2:], mode='nearest')
        actv = self.mlp_shared(segmap)
        gamma = self.mlp_gamma(actv)
        beta = self.mlp_beta(actv)
        out = nomorlized * (1 + gamma) + beta
        return out

class SPADEResnetBlock(nn.Module):
    # label_nc ->segmap shape
    # c_in 必须等于 x的通道数
    def __init__(self, c_in: int, c_out: int, label_nc: int, dilation: int, cardinality: int):
        super(SPADEResnetBlock, self).__init__()
        self.learned_shortcut = (c_in != c_out)
        c_mid = min(c_in, c_out)

        # conv layers
        self.conv0 = nn.Conv2d(c_in, c_mid, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation)
        self.conv1 = nn.Conv2d(c_mid, c_out, kernel_size=3, padding=dilation, groups=cardinality, dilation=dilation)
        # h(x) = W'x 恒等映射 保持通道一致
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(c_in, c_out, kernel_size=1, bias=False)
        # 谱归一化
        self.conv0 = spectral_norm(self.conv0)
        self.conv1 = spectral_norm(self.conv1)
        if self.learned_shortcut:
            self.conv_s = spectral_norm(self.conv_s)

        # SPADE
        self.norm0 = SPADE(c_in, label_nc)
        self.norm1 = SPADE(c_mid, label_nc)
        if self.learned_shortcut:
            self.norm_s = SPADE(c_in, label_nc)

    def shortcut(self, x, seg):
        if self.learned_shortcut:
            x_s = self.conv_s(self.norm_s(x, seg))
        else:
            x_s = x
        return x_s

    def actvn(self, x):
        return F.leaky_relu(x, 2e-1)

    def forward(self, x, seg):
        hx = self.shortcut(x, seg)
        dx = self.conv0(self.actvn(self.norm0(x, seg)))
        dx = self.conv1(self.actvn(self.norm1(dx, seg)))
        out = dx + hx
        return out

if __name__ == '__main__':
    bs = 2
    input = torch.randn((bs,3,256,256))
    block = ResBlock(3,128)
    out = block(input)
    print(out.shape)




