import math
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.activation import ReLU
from einops import rearrange, repeat
from .norm_module import *

# adopted from https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py#L280
class NoiseInjection(nn.Module):
    def __init__(self, full=False):
        super().__init__()
        # self.noise_weight_seed = nn.Parameter(torch.tensor(0.0))
        self.register_buffer('noise_weight', torch.tensor(0.01))
        self.full = full

    def forward(self, image, noise=None):
        if noise is None:
            batch, channel, height, width = image.shape
            noise = image.new_empty(batch, [1, channel][self.full], height, width).normal_()
        # return image + F.softplus(self.noise_weight_seed) * noise
        return image + self.noise_weight * noise

class ChannelwiseNoiseInjection(nn.Module):
    def __init__(self, num_channels, full=False):
        super().__init__()
        self.noise_weight_seed = nn.Parameter(torch.zeros((1, num_channels, 1, 1)))
        self.num_channels = num_channels
        self.full = full
        self.noise = None

    def forward(self, image, noise=None):
        if noise is None:
            batch, channel, height, width = image.shape
            noise = image.new_empty(batch, [1, channel][self.full], height, width).normal_()
        #     # noise = torch.ones(batch, [1, channel][self.full], height, width).type_as(image)
        # if self.noise is None:
        #     self.noise = noise
        # else:
        #     noise = self.noise    
        return image + F.softplus(self.noise_weight_seed) * noise
    
    def __repr__(self):
        return self.__class__.__name__ + '(' + str(self.num_channels) + ')'

# 全部使用的地方都是有upsample的，preact结构，全部是SN-conv
# num_w 是全局z和label的cat，也就是外面128+180的那个
""" 加上了rezero, noise injection, 换LeakyReLU"""
class ResBlockG(nn.Module):
    def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1, upsample=False, num_w=128):
        super().__init__()
        self.upsample = upsample
        self.h_ch = h_ch if h_ch else out_ch
        self.conv1 = conv2d(in_ch, self.h_ch, ksize, pad=pad, bias=False)
        self.conv2 = conv2d(self.h_ch, out_ch, ksize, pad=pad, bias=False)
        self.b1 = SpatialAdaptiveSynBatchGroupNorm2d(in_ch, num_w=num_w)
        self.b2 = SpatialAdaptiveSynBatchGroupNorm2d(self.h_ch, num_w=num_w)
        self.learnable_sc = in_ch != out_ch or upsample
        if self.learnable_sc:
            self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
        self.activation = nn.LeakyReLU(0.01)

        self.alpha = nn.Parameter(torch.tensor(0.0))
        self.out_ch = out_ch
        # self.noise1 = NoiseInjection()
        # self.noise2 = NoiseInjection()
        self.noise1 = ChannelwiseNoiseInjection(self.h_ch)
        self.noise2 = ChannelwiseNoiseInjection(out_ch)
        
    def residual(self, in_feat, w, bbox):
        x = in_feat
        x = self.b1(x, w, bbox)
        x = self.activation(x)
        if self.upsample:
            x = F.interpolate(x, scale_factor=2, mode='nearest')
        x = self.conv1(x)
        x = self.noise1(x)
        x = self.b2(x, w, bbox)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.noise2(x)
        return x

    def shortcut(self, x):
        if self.learnable_sc:
            if self.upsample:
                x = F.interpolate(x, scale_factor=2, mode='nearest')
            x = self.c_sc(x)
        return x

    # def shortcut(self, x):
    #     if self.learnable_sc:
    #         if self.upsample:
    #             x = F.interpolate(x, size=[x.size(0), self.out_ch, x.size(2)*2, x.size(3)*2], mode='bilinear')
    #         else:
    #             x = F.interpolate(x, size=[x.size(0), self.out_ch, x.size(2), x.size(3)], mode='bilinear')
    #     return x

    def forward(self, in_feat, w, bbox):
        return self.alpha * self.residual(in_feat, w, bbox) + self.shortcut(in_feat)





class SELayer(nn.Module):
    def __init__(self, inp, oup, reduction=4):
        super(SELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
                conv2d(inp, inp // reduction, 1, pad=0),
                nn.LeakyReLU(0.01),
                conv2d(inp // reduction, oup, 1, pad=0),
                nn.Sigmoid()
        )

    def forward(self, x):
        y = self.fc( self.avg_pool(x) )
        return x.mul(y)

class ResBlockD(nn.Module):
    def __init__(self, in_ch, out_ch, ksize=3, pad=1, downsample=False, SE=False):
        super().__init__()
        self.conv1 = conv2d(in_ch, out_ch, ksize, 1, pad)
        self.conv2 = conv2d(out_ch, out_ch, ksize, 1, pad)
        self.activation = nn.LeakyReLU(0.01)
        self.se = SELayer(out_ch, out_ch) if SE else nn.Identity()
        self.downsample = downsample
        self.learnable_sc = (in_ch != out_ch) or downsample
        if self.learnable_sc:
            self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
        self.alpha = nn.Parameter(torch.tensor(0.0))

    def residual(self, in_feat):
        x = in_feat
        x = self.conv1(self.activation(x))
        x = self.se(self.conv2(self.activation(x)))
        if self.downsample:
            x = F.avg_pool2d(x, 2)
        return x

    def shortcut(self, x):
        if self.learnable_sc:
            x = self.c_sc(x)
            if self.downsample:
                x = F.avg_pool2d(x, 2)
        return x

    def forward(self, in_feat):
        result = self.alpha.clamp(-1,1) * self.residual(in_feat) + self.shortcut(in_feat)
        return result * self.alpha.clamp(-1,1).pow(2).add(1).rsqrt()

""" 默认 bias = True """
def conv2d(in_feat, out_feat, kernel_size=3, stride=1, pad=1, groups = 1, bias=True, spectral_norm=True,):
    conv = nn.Conv2d(in_feat, out_feat, kernel_size, stride, padding=pad, groups = groups, bias=bias, padding_mode="replicate")
    if spectral_norm:
        return nn.utils.spectral_norm(conv, eps=1e-4)
    else:
        return conv



class MaskRegressBlock(nn.Module):
    def __init__(self, channels, kernel_size=3, bias = False):
        super().__init__()
        conv = list()
        conv.append(nn.BatchNorm2d(channels))
        conv.append(nn.LeakyReLU(0.01))
        conv.append(conv2d(channels, channels, kernel_size, bias = bias))
        self.conv = nn.Sequential(*conv)
        self.alpha = nn.Parameter(torch.tensor(0.0))
    
    def forward(self, x):
        return x + self.alpha * self.conv(x)


# BGN+SPADE 
class SpatialAdaptiveSynBatchGroupNorm2d(nn.Module):
    def __init__(self, num_features, num_w=512):
        super().__init__()
        self.num_features = num_features
        self.weight_proj = nn.utils.spectral_norm( nn.Linear(num_w, num_features))
        self.bias_proj = nn.utils.spectral_norm(nn.Linear(num_w, num_features))
        self.batch_norm2d = nn.BatchNorm2d(num_features, eps=1e-5, affine=False,
                            momentum=0.1, track_running_stats=True)

        self.group_norm = nn.GroupNorm(4, num_features, eps=1e-5, affine=False)
        self.rho = nn.Parameter(torch.tensor(0.1)) # the ratio of GN

        self.alpha = nn.Parameter(torch.tensor(0.0)) # the scale of the affined 

    def forward(self, x, vector, bbox):
        """
        :param x: input feature map (b, c, h, w)
        :param vector: latent vector (b*o, dim_w)
        :param bbox: bbox map (b, o, h, w)
        :return:
        """
        self.batch_norm2d._check_input_dim(x)
        # use BGN
        output_b = self.batch_norm2d(x)
        output_g = self.group_norm(x)
        output = output_b + self.rho.clamp(0,1) * (output_g - output_b)

        b, o, _, _ = bbox.size()
        _, _, h, w = x.size()
        bbox = F.interpolate(bbox, size=(h, w), mode='bilinear', align_corners=False) # b o h w
        weight, bias = self.weight_proj(vector), self.bias_proj(vector) # b*o d

        # 必定可以用BMM解决
        bbox_non_spatial = bbox.view(b, o, -1) # b o h*w
        bbox_non_spatial_margin = bbox_non_spatial.sum(dim=1, keepdim=True) + torch.tensor(1e-4) # b 1 h*w
        bbox_non_spatial.div_(bbox_non_spatial_margin)
        weight, bias = weight.view(b, o, -1), bias.view(b, o, -1) # b o d
        weight.transpose_(1, 2), bias.transpose_(1, 2) # b d o
        weight, bias = torch.bmm(weight, bbox_non_spatial), torch.bmm(bias, bbox_non_spatial) # b d h*w
        # weight.div_(bbox_non_spatial_margin), bias.div_(bbox_non_spatial_margin) # b d h*w
        weight, bias = weight.view(b, -1, h, w), bias.view(b, -1, h, w)

        # weight = torch.sum(bbox * weight, dim=1, keepdim=False) / \
        #     (torch.sum(bbox, dim=1, keepdim=False) + 1e-6) # b d h w
        # bias = torch.sum(bbox * bias, dim=1, keepdim=False) / \
        #     (torch.sum(bbox, dim=1, keepdim=False) + 1e-6) # b d h w
        affined = weight * output + bias
        return output + self.alpha.clamp(-1, 1) * affined

    def __repr__(self):
        return self.__class__.__name__ + '(' + str(self.num_features) + ')'


# https://github.com/lucidrains/stylegan2-pytorch/blob/eceb8aacb669f19b79cc74c7160a85252b1086d6/stylegan2_pytorch/stylegan2_pytorch.py

def DepthWiseConv2d(dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True):
    return nn.Sequential(
            conv2d(dim_in, dim_in, kernel_size = kernel_size, pad = padding, groups = dim_in, stride = stride, bias = bias),
            conv2d(dim_in, dim_out, kernel_size = 1, bias = bias)
        )

class LinearAttention(nn.Module):
    def __init__(self, dim, dim_head = 64, heads = 8):
        super().__init__()
        self.scale = dim_head ** -0.5
        self.heads = heads
        inner_dim = dim_head * heads

        self.nonlin = nn.GELU()
        self.to_q = conv2d(dim, inner_dim, 1, pad = 0, bias = False)
        self.to_kv = DepthWiseConv2d(dim, inner_dim * 2, 3, padding = 1, bias = False)
        self.to_out = conv2d(inner_dim, dim, 1, pad = 0)
        
    def forward(self, fmap):
        h, x, y = self.heads, *fmap.shape[-2:]
        q, k, v = (self.to_q(fmap), *self.to_kv(fmap).chunk(2, dim = 1))
        q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))

        q = q.softmax(dim = -1)
        k = k.softmax(dim = -2)

        q = q * self.scale

        context = torch.einsum('b n d, b n e -> b d e', k, v)
        out = torch.einsum('b n d, b d e -> b n e', q, context)
        out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)

        out = self.nonlin(out)
        return self.to_out(out)

# one layer of self-attention and feedforward, for images

class Residual(nn.Module):
    def __init__(self, fn):
        super().__init__()
        self.fn = fn
    def forward(self, x):
        result = self.fn(x)
        return result + x

class ChanNorm(nn.Module):
    def __init__(self, dim, eps = 1e-5):
        super().__init__()
        self.eps = eps
        self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
        self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))

    def forward(self, x):
        std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
        mean = torch.mean(x, dim = 1, keepdim = True)
        # return (x - mean) / (std + self.eps) * self.g + self.b
        return (x - mean) / (std + self.eps) * self.g / (self.g.norm() + self.eps) + self.b

class PreNorm(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.fn = fn
        self.norm = ChanNorm(dim)

    def forward(self, x):
        return self.fn(self.norm(x))

attn_and_ff = lambda chan: nn.Sequential(*[
    Residual(PreNorm(chan, LinearAttention(chan))),
    Residual(PreNorm(chan, nn.Sequential(conv2d(chan, chan * 2, 1, pad=0), nn.LeakyReLU(0.2), conv2d(chan * 2, chan, 1, pad=0))))
])

class ReZero(nn.Module):
    #@ x + alpha * f(x), from ReZero in UAI 2021
    """
    Examples:
        >>> rezero = ReZero()
        >>> rezero(main(x), residual(x))
    """
    def __init__(self, init:float=0.) -> None:
        super().__init__()
        self.rezero_alpha = nn.Parameter( torch.tensor(init) )

    def forward(self, main:torch.Tensor, residual:torch.Tensor) -> torch.Tensor:
        result = main + residual * self.rezero_alpha.clamp(-1. , 1.)
        return result / 2