import torch
import torchvision
from torch import nn
from math import pi, log2, sqrt
from utils import truncted_random


class HardReZero(nn.Module):
    def __init__(self, *, max:float=2_000) -> None:
        super().__init__()
        self.max = float(max)
        self.register_buffer("count", torch.tensor(0.))
        
    def forward(self, main:torch.Tensor, residual:torch.Tensor) -> torch.Tensor:
        self.count += 1 if self.count < self.max else 0
        rezero_alpha = min(1., self.count/self.max) if self.training else 1
        return main + residual * rezero_alpha

class ResnetDiscriminator(nn.Module):
    def __init__(self, img_size=64, num_classes=0, input_dim=3, base_ch=64):
        super().__init__()
        self.num_classes = num_classes
        ch = base_ch

        # shared 路径上，不同分辨率放缩 64: 1/4.0  128: 1/8.0,  256:1/16.0  ,  数量是log2()-4
        # 剩下都是16 x 16，img path 再接2+1个 block， adativeavg pool 
        # obj path都是 8 x 8，也是2+1就好了，全部channel不超过1024，
        channels = {
            64: [1, 2, 2, 4, 4],  # 2 + 1 
            128:[1, 2, 2, 4, 8, 8],  # 3 + 1
            256:[1, 2, 2, 4, 4, 4, 8]  # 4 + 1
        }[img_size]
        
        downsamples = {
            64: [True, False, True, False],
            128:[True, False, True, True, False],
            256:[True, False, True, True, False, True]  # 4 + 1
        }[img_size]
        
        # shared
        self.first = OptimizedBlock(input_dim, ch*channels[0], downsample=False)
        res = []
        for channel_in, channel_out, down in zip(channels[:-1], channels[1:], downsamples):
            res.append(
                ResBlockD(base_ch*channel_in, base_ch*channel_out, downsample=down,) # norm-leaky first, conv-noise last
            )
        self.shared = nn.Sequential(*res) # 16 x 16

        # divide paths
        self.roi_align = torchvision.ops.RoIAlign( (8, 8), 1.0/(log2(img_size)-4), 0, aligned=True )
        
        # img_path, begin 16*16
        res = []
        for channel_in, channel_out, down in zip([channels[-1], 8, 8, 8], [8,]*4, [True, False, True, False]):
            res.append(
                ResBlockD(base_ch*channel_in, base_ch*channel_out, downsample=down,) # norm-leaky first, conv-noise last
            )# 4*4
        self.img_path = nn.Sequential(*res) 
        self.l_im = nn.utils.spectral_norm(nn.Linear(ch * 8, 1, bias=False))

        self.activation = nn.LeakyReLU(0.01)
        self.avg_pool = nn.AdaptiveAvgPool2d([1,1])

        # obj_path, begin 8 x 8
        res = []
        for channel_in, channel_out, down in zip([channels[-1], 8, 8, 8], [8,]*4, [False, True, False, True]):
            res.append(
                ResBlockD(base_ch*channel_in, base_ch*channel_out, downsample=down,) # norm-leaky first, conv-noise last
            )# 2 x 2
        self.obj_path = nn.Sequential(*res) 

        self.obj_fc = nn.utils.spectral_norm(nn.Linear(ch * 8, ch * 8, bias=False)) 
        self.l_obj = nn.utils.spectral_norm(nn.Linear(ch * 8, 1, bias=False))
        self.l_y = nn.utils.spectral_norm(nn.Embedding(num_classes, ch * 8))

        self.dim_obj_feat = ch * 8

        self.init_parameter()

    def forward(self, x, labels, bbox):
        b, o = bbox.size(0), bbox.size(1)
        bbox_refmt  = torchvision.ops.box_convert(bbox, 'xywh', 'xyxy')
        bbox_refmt *= x.size(2)

        x1 = self.first(x)
        x2 = self.shared(x1)

        # img_path
        x_img = self.img_path(x2)
        x_img = self.avg_pool( self.activation(x_img) ).squeeze()
        out_im = self.l_im(x_img)

        # obj_path
        x_obj = self.roi_align(x2, [b for b in bbox_refmt]) # bo ch*8 8 8 
        x_obj = self.obj_path(x_obj) # bo ch*8 2 2 
        obj_feat = self.avg_pool(self.activation(x_obj)).squeeze() # bo ch*8
        obj_feat = self.obj_fc(obj_feat) # bo ch*4

        out_obj_TF = self.l_obj(obj_feat) # bo 1
        y = labels.view(b*o) # bo
        # out_obj_cls = torch.einsum(" Bd, Bd -> B ", self.l_y(y), obj_feat.view(b*o, -1) )
        obj_feat_view = obj_feat.view(b*o, -1)
        y_active = y[y>0]
        obj_feat_view_active = obj_feat_view[y>0, :]
        out_obj_cls = torch.sum(self.l_y(y_active) * obj_feat_view_active, dim=1, keepdim=True) # bo 1

        return out_im, out_obj_TF, out_obj_cls, [obj_feat.view(b,o,-1), bbox, labels]


    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1] )
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)


class ResnetDiscriminator64(nn.Module):
    def __init__(self, img_size=64, num_classes=0, input_dim=3, base_ch=64):
        super().__init__()
        self.num_classes = num_classes
        ch = base_ch

        self.block1 = OptimizedBlock(input_dim, ch, downsample=False)
        self.block2 = ResBlockD(ch, ch*2, downsample=False)
        self.block3 = ResBlockD(ch*2, ch*4, downsample=True)
        self.block4 = ResBlockD(ch*4, ch*8, downsample=True)
        self.block5 = ResBlockD(ch*8, ch*16, downsample=True)
        self.l_im = nn.utils.spectral_norm(nn.Linear(ch*16, 1, bias=False))
        self.activation = nn.LeakyReLU(0.01)

        # object path
        self.roi_align = torchvision.ops.RoIAlign( (8, 8), 1.0/2.0, 0 ) # bo ch*8 8 8 
        self.block_obj4 = ResBlockD(ch*4, ch*8, downsample=True)
        self.l_obj = nn.utils.spectral_norm(nn.Linear(ch * 8, 1, bias=False))
        self.l_y = nn.utils.spectral_norm(nn.Embedding(num_classes, ch*8))

        self.dim_obj_feat = ch * 8

        self.init_parameter()

        print(f"ResnetDiscriminator64 initialized")

    def forward(self, x, labels=None, bbox=None):
        y = labels
        b, o, _ = bbox.size()

        bbox_refmt  = torchvision.ops.box_convert(bbox, 'xywh', 'xyxy')
        bbox_refmt *= x.size(2)
        # 64x64
        x = self.block1(x)
        # 64x64
        x = self.block2(x)
        # 32x32
        x1 = self.block3(x)
        # 16x16
        x = self.block4(x1)
        # 8x8
        x = self.block5(x)
        x = self.activation(x)
        # global sum
        x = torch.mean(x, dim=(2, 3))
        out_im = self.l_im(x)

        # obj path
        obj_feat = self.roi_align(x1, [b for b in bbox_refmt]) # bo ch*4 8 8 
        obj_feat = self.block_obj4(obj_feat)
        obj_feat = self.activation(obj_feat)
        obj_feat = torch.sum(obj_feat, dim=(2, 3))
        out_obj_TF = self.l_obj(obj_feat)
        out_obj_cls = torch.sum(self.l_y(y).view(b, -1) * obj_feat.view(b, -1), dim=1, keepdim=True)

        return out_im, out_obj_TF, out_obj_cls, [obj_feat.view(b,o,-1), bbox, labels]

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1] )
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

class CombineDiscriminator64(nn.Module):
    def __init__(self, img_size=64, num_classes=0, input_dim=3, base_ch=64):
        super().__init__()
        self.obD = ResnetDiscriminator64(num_classes=num_classes, input_dim=3)
        self.dim_obj_feat = self.obD.dim_obj_feat

    def forward(self, images, bbox, labels, mask=None):
        idx = torch.arange(start=0, end=images.size(0)).view(images.size(0),
                            1, 1).expand(-1, bbox.size(1), -1).float().to(images.device)
        label = labels
        bbox[:, :, 2] = bbox[:, :, 2] + bbox[:, :, 0]
        bbox[:, :, 3] = bbox[:, :, 3] + bbox[:, :, 1]
        bbox = bbox * images.size(2)
        bbox = torch.cat((idx, bbox.float()), dim=2)
        bbox = bbox.view(-1, 5)
        label = label.view(-1)

        idx = (label != 0).nonzero().view(-1)
        bbox = bbox[idx]
        label = label[idx]
        return self.obD(images, label, bbox)



from torch import nn
from torch.nn import functional as F
# from .norm_module import *

# adopted from https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py#L280
class NoiseInjection(nn.Module):
    def __init__(self, full=True):
        super().__init__()
        # self.noise_weight_seed = nn.Parameter(torch.tensor(0.0))
        self.full = full

    def forward(self, image, noise=None):
        if noise is None:
            batch, channel, height, width = image.shape
            noise = image.new_empty(batch, [1, channel][self.full], height, width).normal_()
        # return image + F.softplus(self.noise_weight_seed) * noise
        return image + 1e-3 * noise


class ChannelwiseNoiseInjection(nn.Module):
    def __init__(self, num_channels, full=False):
        super().__init__()
        self.noise_weight_seed = nn.Parameter(torch.zeros((1, num_channels, 1, 1)))
        self.num_channels = num_channels
        self.full = full
        self.noise = None

    def forward(self, image, noise=None):
        if noise is None:
            batch, channel, height, width = image.shape
            noise = image.new_empty(batch, [1, channel][self.full], height, width).normal_()   
        return image + F.softplus(self.noise_weight_seed) * noise
    
    def __repr__(self):
        return self.__class__.__name__ + '(' + str(self.num_channels) + ')'

# 全部使用的地方都是有upsample的，preact结构，全部是SN-conv
# num_w 是全局z和label的cat，也就是外面128+180的那个
""" 加上了rezero, noise injection, 换LeakyReLU"""
class ResBlockG(nn.Module):
    def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1, upsample=False, num_w=128):
        super().__init__()
        self.upsample = upsample
        self.h_ch = h_ch if h_ch else out_ch
        self.conv1 = conv2d(in_ch, self.h_ch, ksize, pad=pad, bias=False)
        self.conv2 = conv2d(self.h_ch, out_ch, ksize, pad=pad, bias=False)
        self.b1 = SpatialAdaptiveSynBatchGroupNorm2d(in_ch, num_w=num_w)
        self.b2 = SpatialAdaptiveSynBatchGroupNorm2d(self.h_ch, num_w=num_w)
        self.learnable_sc = (in_ch != out_ch) or upsample
        if self.learnable_sc:
            self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
        self.activation = nn.LeakyReLU(0.01)

        # self.alpha = nn.Parameter(torch.tensor(0.0))
        self.rezero = HardReZero()
        self.out_ch = out_ch
        self.noise1 = ChannelwiseNoiseInjection(self.h_ch)
        self.noise2 = ChannelwiseNoiseInjection(out_ch)
        
    def residual(self, in_feat, w, bbox):
        x = in_feat
        x = self.b1(x, w, bbox)
        x = self.activation(x)
        if self.upsample:
            x = F.interpolate(x, scale_factor=2, mode='nearest')
        x = self.conv1(x)
        x = self.noise1(x)
        x = self.b2(x, w, bbox)
        x = self.activation(x)
        x = self.conv2(x)
        x = self.noise2(x)
        return x

    def shortcut(self, x):
        if self.learnable_sc:
            x = self.c_sc(x)
            if self.upsample:
                x = F.interpolate(x, scale_factor=2, mode='nearest')
        return x

    def forward(self, in_feat, w, bbox):
        return self.rezero( self.shortcut(in_feat), self.residual(in_feat, w, bbox) )
        # return self.alpha * self.residual(in_feat, w, bbox) + self.shortcut(in_feat)


""" Rezero套上hardtanh避免无度扩大差距，换LeakyReLU """
class ResBlockD(nn.Module):
    def __init__(self, in_ch, out_ch, ksize=3, pad=1, downsample=False):
        super().__init__()
        self.conv1 = conv2d(in_ch, out_ch, ksize, 1, pad)
        self.conv2 = conv2d(out_ch, out_ch, ksize, 1, pad)
        self.activation = nn.LeakyReLU(0.01)
        self.downsample = downsample
        self.learnable_sc = (in_ch != out_ch) or downsample
        if self.learnable_sc:
            self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
        # self.rezero_alpha = nn.Parameter(torch.tensor(0.0))
        self.rezero = HardReZero()

    def residual(self, in_feat):
        x = in_feat
        x = self.conv1(self.activation(x))
        x = self.conv2(self.activation(x))
        if self.downsample:
            x = F.avg_pool2d(x, 2)
        return x

    def shortcut(self, x):
        if self.learnable_sc:
            x = self.c_sc(x)
            if self.downsample:
                x = F.avg_pool2d(x, 2)
        return x

    def forward(self, in_feat):
        # return self.rezero_alpha.clamp(-1,1) * self.residual(in_feat) + self.shortcut(in_feat)
        return self.rezero(self.shortcut(in_feat), self.residual(in_feat))

""" 默认 bias = True """
# def conv2d(in_feat, out_feat, kernel_size=3, stride=1, pad=1, spectral_norm=True, bias=True):
#     conv = nn.Conv2d(in_feat, out_feat, kernel_size, stride, pad, bias=bias)
#     if spectral_norm:
#         return nn.utils.spectral_norm(conv, eps=1e-4)
#     else:
#         return conv

#! use reflection pad conv2d
def conv2d(in_feat:int, out_feat:int, kernel_size:int=3, stride:int=1, pad:int=1, bias: bool=True, 
            groups:int=1, spectral_norm: bool=True,) -> nn.Module:
    """@conv2d return customized cov

    Args:
        in_feat (int): in
        out_feat (int): out
        kernel_size (int, optional):  Defaults to 3.
        stride (int, optional):  Defaults to 1.
        pad (int, optional): Defaults to 1.
        groups (int, optional): Defaults to 1.
        bias (bool, optional):  Defaults to True.
        spectral_norm (bool, optional):  Defaults to True.

    Returns:
        nn.Module: conv
    """
    conv = nn.Conv2d(in_feat, out_feat, kernel_size, stride, 0, bias=bias, groups=groups)
    nn.init
    if spectral_norm:
        conv = nn.utils.spectral_norm(conv, eps=1e-4)
    if pad == 0:
        return conv
    else:
        return nn.Sequential( nn.ReplicationPad2d(pad), conv )

# # https://github.com/heykeetae/Self-Attention-GAN/blob/master/sagan_models.py#L8
# # 后续可以改成multihead的，加上了scaled的部分
# class SelfAttn(nn.Module):
#     """ Self attention Layer"""

#     def __init__(self, in_dim, out_dim = None):
#         super().__init__()
#         self.channel_in = in_dim
#         self.channel_out = in_dim if out_dim is None else out_dim

#         self.query_conv = nn.Conv1d(
#             in_channels=in_dim, out_channels=in_dim//4, kernel_size=1)
#         self.key_conv = nn.Conv1d(
#             in_channels=in_dim, out_channels=in_dim//4, kernel_size=1)
#         self.value_conv = nn.utils.spectral_norm(nn.Conv1d(
#             in_channels=in_dim, out_channels=self.channel_out, kernel_size=1))
#         self.gamma = nn.Parameter(torch.tensor(0.0))

#         self.softmax = nn.Softmax(dim=-1)

#     def forward(self, x):
#         """
#             inputs :
#                 x   : input latent vectors( B X O X L)
#             returns :
#                 out : self-attention features + input feature 
#                 attention: B X O X O (O is the number of objects)
#         """
#         m_batchsize, O, L = x.size()
#         assert L == self.channel_in
#         x_ = x.permute(0, 2, 1) # B L O

#         proj_query = self.query_conv(x_).permute(0, 2, 1)  # B X O    X L//4
#         proj_key = self.key_conv(x_)                       # B X L//4 x O
#         energy = torch.bmm(proj_query, proj_key)           # B X O x O
#         energy.div_(self.channel_in).mul_(2)               # scaled dot-product
#         attention = self.softmax(energy)                   # B X O X O
#         proj_value = self.value_conv(x_)                   # B X L X O

#         out = torch.bmm(proj_value, attention.permute(0, 2, 1)) # B X L X O
#         out = out.permute(0, 2, 1)  # B X O X L

#         out = self.gamma * out + x
#         return out, attention


class MaskRegressBlock(nn.Module):
    def __init__(self, channels, kernel_size=3, bias = False):
        super().__init__()
        conv = list()
        conv.append(nn.BatchNorm2d(channels))
        conv.append(nn.LeakyReLU(0.01))
        conv.append(conv2d(channels, channels, kernel_size, bias = bias))
        self.conv = nn.Sequential(*conv)
        # self.alpha = nn.Parameter(torch.tensor(0.0))
        self.rezero = HardReZero()
    
    def forward(self, x):
        # return x + self.alpha * self.conv(x)
        return self.rezero(x, self.conv(x))

# BGN+SPADE 
class SpatialAdaptiveSynBatchGroupNorm2d(nn.Module):
    def __init__(self, num_features, num_w=512):
        super().__init__()
        self.num_features = num_features
        self.weight_proj = nn.utils.spectral_norm(
            nn.Linear(num_w, num_features))
        self.bias_proj = nn.utils.spectral_norm(nn.Linear(num_w, num_features))
        self.batch_norm2d = nn.BatchNorm2d(num_features, eps=1e-5, affine=False,
                            momentum=0.1, track_running_stats=True)

        self.group_norm = nn.GroupNorm(4, num_features, eps=1e-5, affine=False)
        self.rho = nn.Parameter(torch.tensor(0.5)) # the ratio of GN

        self.fixed_weight = nn.Parameter(torch.zeros([1, num_features, 1, 1]))
        self.fixed_bias = nn.Parameter(torch.zeros([1, num_features, 1, 1]))
        self.alpha = nn.Parameter(torch.tensor(0.0)) # the scale of the affined 
        # self.rezero = HardReZero()

    def forward(self, x, vector, bbox):
        """
        :param x: input feature map (b, c, h, w)
        :param vector: latent vector (b*o, dim_w)
        :param bbox: bbox map (b, o, h, w)
        :return:
        """
        self.batch_norm2d._check_input_dim(x)
        # use BGN
        output_b = self.batch_norm2d(x)
        output_g = self.group_norm(x)
        output = output_b + self.rho.clamp(0, 1) * (output_g - output_b)

        b, o, _, _ = bbox.size()
        _, _, h, w = x.size()
        bbox = F.interpolate(bbox, size=(h, w), mode='bilinear', align_corners=True) # b o h w
        weight, bias = self.weight_proj(vector), self.bias_proj(vector) # b*o d

        # 必定可以用BMM解决
        bbox_non_spatial = bbox.view(b, o, -1) # b o h*w
        bbox_non_spatial_margin = bbox_non_spatial.sum(dim=1, keepdim=True) + torch.tensor(1e-4) # b 1 h*w
        bbox_non_spatial = bbox_non_spatial.div(bbox_non_spatial_margin)
        weight, bias = weight.view(b, o, -1), bias.view(b, o, -1) # b o d
        weight.transpose_(1, 2), bias.transpose_(1, 2) # b d o
        weight, bias = torch.bmm(weight, bbox_non_spatial), torch.bmm(bias, bbox_non_spatial) # b d h*w
        # weight.div_(bbox_non_spatial_margin), bias.div_(bbox_non_spatial_margin) # b d h*w
        weight, bias = weight.view(b, -1, h, w), bias.view(b, -1, h, w)
        weight = weight + self.fixed_weight
        bias = bias + self.fixed_bias
        
        # weight = torch.sum(bbox * weight, dim=1, keepdim=False) / \
        #     (torch.sum(bbox, dim=1, keepdim=False) + 1e-6) # b d h w
        # bias = torch.sum(bbox * bias, dim=1, keepdim=False) / \
        #     (torch.sum(bbox, dim=1, keepdim=False) + 1e-6) # b d h w
        affined = weight * output + bias
        return output + self.alpha.clamp(-1, 1) * affined
        # return self.rezero(output, affined)

    def __repr__(self):
        return self.__class__.__name__ + '(' + str(self.num_features) + ')'



""" 加上ReZero 和 leaky"""
class OptimizedBlock(nn.Module):
    def __init__(self, in_ch, out_ch, ksize=3, pad=1, downsample=False):
        super(OptimizedBlock, self).__init__()
        self.conv1 = conv2d(in_ch, out_ch, ksize, 1, pad)
        self.conv2 = conv2d(out_ch, out_ch, ksize, 1, pad)
        self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
        self.activation = nn.LeakyReLU(0.01)
        self.downsample = downsample
        # self.alpha = nn.Parameter(torch.tensor(0.0))
        self.rezero = HardReZero()

    def forward(self, in_feat):
        x = in_feat
        x = self.activation(self.conv1(x))
        x = self.conv2(x)
        if self.downsample:
            x = F.avg_pool2d(x, 2)
        # return self.alpha.clamp(-1,1) * x + self.shortcut(in_feat)
        return self.rezero(  self.shortcut(in_feat), x )

    def shortcut(self, x):
        if self.downsample:
            x = F.avg_pool2d(x, 2)
        return self.c_sc(x)


import torch
import torch.nn as nn
import torch.nn.functional as F
# from .norm_module import *
# from .mask_regression import *
# from model import ResBlockG

# 64和128里不同的是否有res1，还有y的处理，embedding的处理不一样
# resnet都是preact的结构，BN都是可以同步的SPADE，conv都是spectral norm，有bias，upsample在第一次relu之后，nearest的
# 需要输入num_w，也就是zdim和category的cat，给SPADE初始化，而中间都要给mask
# shortcut会跟着变化增加channel和upsample，没有normalization
# 使用orthogonal_来初始化weight，不是xavier
# mask
# final中补上了一个SBN，最后tanh输出
# 这里的z_dim是整个图片的style，而
# forward 中的z是每个object style
# bbox就是bounding box咯
# z_im是整个image的style
# y是每个object的category，应该是long的没有onehot
# 第一个fc可以改成一个TransposeConv2
# ResBlock的那里，只要每个输出w和bbox就可以实现重用了
# 可以尝试GELU，以及考虑同一个图片的不同bounding box一起决定那么mask
# 


# from models import RawMaskGenerator
from components import CausalSampleLayer, CLAMA
from copy import deepcopy


# self, img_size:int = 256, num_classes=10, output_dim=3, base_ch=64, num_o=8
class ResnetGenerator(nn.Module):
    def __init__(self, img_size:int = 256, num_classes=10, output_dim=3, base_ch=64, num_o=8, device=torch.device("cpu")):
        super().__init__()
        pac = {"device":device, "dtype":torch.float}
        ch = base_ch
        self.num_classes = num_classes

        self.label_embedding = nn.Embedding(num_classes, 128 + img_size//4 + 10 )
        self.emb_len = self.label_embedding.embedding_dim + 6
        self.z_dim = self.emb_len
        z_dim = self.z_dim


        # csl = CausalSampleLayer(self.emb_len, **pac) # LN last
        # self.causal_sampler = nn.Sequential(*[ByPassFilter(deepcopy(csl)) for _ in range(num_o)]) # LN last
        
        # self.raw_mask = RawMaskGenerator(self.emb_len, map_size=img_size) # fc first, fc-sigmoid last
        self.raw_mask = MaskRegressNet(self.emb_len, map_size=img_size)
        self.clama = CLAMA(self.emb_len) # fc's first, tanh last 

        # channels = {256:[8, 8, 4, 4, 4, 2, 2, 1, 1], 128:[8, 8, 4, 4, 2, 2, 1, 1], 64:[8, 4, 2, 2, 2, 1, 1]}[img_size]
        channels = {256:[8, 8, 4, 4, 4, 2, 2, 1, 1], 
            128:[8, 8, 8, 4, 4, 2, 2, 2, 2, 1], 
            64: [8, 8, 4, 4, 2, 2, 2, 2, 1]}[img_size]
        self.channels = channels
        upsamples = {256:[True, True, False, True, True, True, True, False], 
                    128:[False, True, True, False, True, False, True, False, True], 
                    # 64:[True, True, False, True, True, False]}[img_size]
                    64 : [False, True, False, True, False, True, False, True]}[img_size]
        
        self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*channels[0]*ch))
        res = []
        for channel_in, channel_out, up in zip(channels[:-1], channels[1:], upsamples):
            res.append(ByPassFilter(
                ResBlockG(base_ch*channel_in, base_ch*channel_out, upsample=up, num_w=self.emb_len))) # norm-swish first, conv-noise last
        self.res = nn.Sequential(*res)

        self.final = nn.Sequential(nn.BatchNorm2d(ch),
                                   nn.LeakyReLU(0.01),
                                   conv2d(ch, output_dim, 1, 1, 0),
                                   nn.Tanh())

        
        # self.self_attn = SelfAttn(num_w + 4)
        self.style_mapping = nn.Sequential(
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
            nn.LeakyReLU(0.01),
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
            nn.LeakyReLU(0.01),
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim))
        )
        self.init_parameter()



    def forward(self, bbox, labels, cg, *, random_trunc=False, return_mask=False, input_mask=None):
        b, o = bbox.size(0), bbox.size(1)
        
        label_embedding = self.label_embedding(labels)

        # z = z.view(b * o, -1)
        # label_embedding = label_embedding.view(b * o, -1)

        # # 这里是把object style的和label embedding连在一起，其实只要cat 的dim=2就可以了吧
        # latent_vector = torch.cat((z, label_embedding), dim=1).view(b, o, -1) # b*o*num_w
        
        # # mapping是list，所以w不做任何事情，仅仅是重新按照b*o再做一遍而已
        # w = self.mapping(latent_vector.view(b * o, -1))
        # # 按照catgory和style以及bbox的大小得到mask
        # mask = self.mask_regress(w, bbox)

        # latent vector self-attention
        # mask_latent_vector = self.self_attn(torch.cat([label_embedding, z, bbox], dim=2)) # b*o*(num_w+4)

        wh = bbox[:,:,2:]
        bbox_fft = torch.cat([wh, torch.sin(wh.mul(2*pi)), torch.sin(wh.mul(4*pi))], dim = 2)
        # mask_latent_vector = torch.cat([label_embedding, z, bbpx_fft], dim=2) # b*o*(num_w+2)
        
        z = torch.randn([b*o, self.z_dim], device=bbox.device)
        if random_trunc:
            z = truncted_random(z)
        z = self.style_mapping(z) # non-Gaussian noise
        w = torch.cat( [label_embedding , bbox_fft], dim=2)   # b*o*num_w
        emb = w + z.view(b,o,-1) # non-causal feature, category, size, z
        W_selected = cg(bbox, labels) # b o o
        C = torch.inverse(torch.eye(o, device=bbox.device)[None, :, :] - W_selected) # (I-W)^{-1} emb
        
        # style of objects
        emb_style = torch.bmm(C, emb )

        if input_mask is None:
            raw_mask = self.raw_mask(emb, bbox) # sampled from non-causal feature
            mask, adjust = self.clama(emb, raw_mask, C) # shape, adapted with causal relations
        else:
            raw_mask = input_mask
            mask = input_mask
            adjust = None
        # 整个image的style
        
        z_im = torch.randn((b, self.z_dim), device=bbox.device)
        if random_trunc:
            z_im = truncted_random(z_im)

        # 4x4
        x = self.fc(z_im).view(b, -1, 4, 4)
        
        x, _, _ = self.res([x, emb_style, mask]) # b channels[-1] w h

        x = self.final(x)
        return x if not return_mask else [x, mask, raw_mask, adjust]

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1] )
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

# 这个num_w是有坑的，其实是z_dim和上面embedding的大小的和


class ResnetGenerator_no_causal(ResnetGenerator):
    def forward(self, bbox, labels, *, random_trunc=False, return_mask=False, input_mask=None):
        b, o = bbox.size(0), bbox.size(1)
        label_embedding = self.label_embedding(labels)
        wh = bbox[:,:,2:]
        bbox_fft = torch.cat([wh, torch.sin(wh.mul(2*pi)), torch.sin(wh.mul(4*pi))], dim = 2)
        # mask_latent_vector = torch.cat([label_embedding, z, bbpx_fft], dim=2) # b*o*(num_w+2)
        
        z = torch.randn([b*o, self.z_dim], device=bbox.device)
        if random_trunc:
            z = truncted_random(z)
        z = self.style_mapping(z) # non-Gaussian noise
        w = torch.cat( [label_embedding , bbox_fft], dim=2)   # b*o*num_w
        emb = w + z.view(b,o,-1) # non-causal feature, category, size, z
        emb_style = emb
        
        if input_mask is None:
            raw_mask = self.raw_mask(emb, bbox) # sampled from non-causal feature
            mask = raw_mask
            adjust = torch.zeros_like(mask)
        else:
            raw_mask = input_mask
            mask = input_mask
            adjust = None
        
        z_im = torch.randn((b, self.z_dim), device=bbox.device)
        if random_trunc:
            z_im = truncted_random(z_im)

        # 4x4
        x = self.fc(z_im).view(b, -1, 4, 4)
        
        x, _, _ = self.res([x, emb_style, mask]) # b channels[-1] w h

        x = self.final(x)
        return x if not return_mask else [x, mask, raw_mask, adjust]


class ResnetGenerator128(nn.Module):
    def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3):
        super(ResnetGenerator128, self).__init__()
        self.num_classes = num_classes

        self.label_embedding = nn.Embedding(num_classes, 160)
        self.z_dim = z_dim
        num_w = z_dim + self.label_embedding.embedding_dim
        self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*8*ch))

        self.res1 = ResBlockG(ch*8, ch*8, upsample=True, num_w=num_w)
        self.res2 = ResBlockG(ch*8, ch*4, upsample=True, num_w=num_w)
        self.res3 = ResBlockG(ch*4, ch*4, upsample=True, num_w=num_w)
        self.res4 = ResBlockG(ch*4, ch*2, upsample=True, num_w=num_w)
        self.res5 = ResBlockG(ch*2, ch*1, upsample=True, num_w=num_w)
        self.final = nn.Sequential(nn.BatchNorm2d(ch),
                                   nn.LeakyReLU(0.01),
                                   conv2d(ch, output_dim, 3, 1, 1),
                                   nn.Tanh())

        # mapping function
        mapping = list()
        self.mapping = nn.Sequential(*mapping)

        self.mask_regress = MaskRegressNet(num_w+2, map_size=128)

        self.style_mapping = nn.Sequential(
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
            nn.LeakyReLU(0.01),
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
            nn.LeakyReLU(0.01),
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim))
        )
        self.init_parameter()
        print(f"ResnetGenerator128 initialized")

    def forward(self, z, bbox, z_im=None, y=None, return_mask=False, input_mask=None, mask_to_be_adjusted=None, tau=0.):
        b, o = z.size(0), z.size(1)
        z, bbox = z.cuda(), bbox.cuda()
        
        label_embedding = self.label_embedding(y)

        if mask_to_be_adjusted is not None:
            mask_latent_vector = torch.cat([label_embedding, z, bbox[:,:,2:]], dim=2) # b*o*(num_w+2)
            mask = self.mask_regress(mask_latent_vector, bbox, mask_to_be_adjusted=mask_to_be_adjusted)
        elif input_mask is None:
            mask_latent_vector = torch.cat([label_embedding, z, bbox[:,:,2:]], dim=2) # b*o*(num_w+2)
            if return_mask:
                mask, raw_mask = self.mask_regress(mask_latent_vector, bbox, return_raw=True, tau=tau)
            else:
                mask = self.mask_regress(mask_latent_vector, bbox, tau=tau)
        else:
            mask = input_mask
        w = torch.cat( [label_embedding, self.style_mapping(z.view(b*o, -1)).view(b,o,-1)], dim=2)  # b*o*num_w
        
        # 整个image的style
        if z_im is None:
            z_im = torch.randn((b, self.z_dim), device=z.device)

        # 4x4
        x = self.fc(z_im).view(b, -1, 4, 4)
        # 8x8
        x = self.res1(x, w, mask)
        # 16x16
        x = self.res2(x, w, mask)
        # 32x32
        x = self.res3(x, w, mask)
        # 64x64
        x = self.res4(x, w, mask)
        # 128x128
        x = self.res5(x, w, mask)
        # to RGB
        x = self.final(x)
        return x if not return_mask else [x, mask, raw_mask]

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1] )
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)



class ResnetGenerator256(nn.Module):
    def __init__(self, ch=64, z_dim=128, num_classes=10, output_dim=3):
        super(ResnetGenerator256, self).__init__()
        self.num_classes = num_classes

        self.label_embedding = nn.Embedding(num_classes, 192)
        self.z_dim = z_dim
        num_w = z_dim + self.label_embedding.embedding_dim
        
        channels = [16, 8, 8, 4, 4, 2, 1]
        self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*channels[0]*ch))

        res = []
        for channel_in, channel_out in zip(channels[:-1], channels[1:]):
            res.append(ByPassFilter(
                ResBlockG(ch*channel_in, ch*channel_out, upsample=True, num_w=num_w)))
        self.res = nn.Sequential(*res)
        
        self.final = nn.Sequential(nn.BatchNorm2d(ch),
                                   nn.LeakyReLU(0.01),
                                   conv2d(ch, output_dim, 3, 1, 1),
                                   nn.Tanh())
        
        
        # mapping function
        mapping = list()
        self.mapping = nn.Sequential(*mapping)

        self.mask_regress = MaskRegressNet(num_w+2, map_size=256)

        self.style_mapping = nn.Sequential(
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
            nn.LeakyReLU(0.01),
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
            nn.LeakyReLU(0.01),
            nn.utils.spectral_norm(nn.Linear(z_dim, z_dim))
        )
        self.init_parameter()
        print(f"ResnetGenerator256 initialized")

    def forward(self, z, bbox, z_im=None, y=None, return_mask=False, input_mask=None):
        b, o = z.size(0), z.size(1)
        # z, bbox = z.cuda(), bbox.cuda()
        
        label_embedding = self.label_embedding(y)
        z, bbox = z.type_as(label_embedding), bbox.type_as(label_embedding)

        if input_mask is None:
            mask_latent_vector = torch.cat([label_embedding, z, bbox[:,:,2:]], dim=2) # b*o*(num_w+2)
            if return_mask:
                mask, raw_mask = self.mask_regress(mask_latent_vector, bbox, return_raw=True)
            else:
                mask = self.mask_regress(mask_latent_vector, bbox)
        else:
            mask = input_mask
        w = torch.cat( [label_embedding, self.style_mapping(z.view(b*o, -1)).view(b,o,-1)], dim=2)  # b*o*num_w
        
        # 整个image的style
        if z_im is None:
            z_im = torch.randn((b, self.z_dim), device=z.device)

        # 4x4
        x = self.fc(z_im).view(b, -1, 4, 4)
        # 256x256
        x, w, mask = self.res([x, w, mask])
        # to RGB
        x = self.final(x)
        return x if not return_mask else [x, mask, raw_mask]

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1] )
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

class ByPassFilter(nn.Module):
    def __init__(self, model):
        super().__init__()
        self.model = model
        
    def forward(self, args):
        x = self.model(*args)
        return [x] + args[1:] # These are w and masks



class MaskRegressNet(nn.Module):
    def __init__(self, obj_feat=128, mask_size=32, map_size=64):
        super().__init__()
        self.mask_size = mask_size
        self.map_size = map_size
        self.hidden_feat, hidden_feat = 128, 128

        self.fc = nn.utils.spectral_norm(
            nn.Linear(obj_feat, hidden_feat * 4 * 4))

        self.conv1 = MaskRegressBlock(hidden_feat)
        self.conv2 = MaskRegressBlock(hidden_feat)
        self.conv3 = MaskRegressBlock(hidden_feat)
        # self.conv4 = MaskRegressBlock(hidden_feat)
        
        final = list()
        final.append(nn.BatchNorm2d(hidden_feat))
        final.append(nn.LeakyReLU(0.01))
        final.append(nn.utils.spectral_norm(nn.Conv2d(hidden_feat, 1, 1, 1)))
        final.append(nn.Sigmoid())
        self.final = nn.Sequential(*final)

        # match_dim = obj_feat // 4
        # self.query_affine = nn.utils.spectral_norm(nn.Linear(obj_feat, match_dim))
        # self.key_affine = nn.utils.spectral_norm(nn.Linear(obj_feat, match_dim))
        # self.query_local = nn.Sequential(
        #     MaskAdjustBlock(match_dim),
        #     MaskAdjustBlock(match_dim)
        # )
        # self.mask_adjust_alpha = nn.Parameter(torch.tensor(0.0))

        self.init_parameter()

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1] )
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

    def forward(self, obj_feat, bbox, return_raw=False, mask_to_be_adjusted=None, tau=0.):
        """
        :param obj_feat: (b*num_o, feat_dim)
        :param bbox: (b, num_o, 4)
        :return: bbmap: (b, num_o, map_size, map_size)
        """
        b, num_o, _ = bbox.size()
        obj_feat = obj_feat.view(b * num_o, -1)
        x = self.fc(obj_feat) # 4 * 4
        x = self.conv1(x.view(b * num_o, self.hidden_feat, 4, 4))  # 4 * 4
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
        x = self.conv2(x)     # 8 * 8
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
        x = self.conv3(x)     # 16 * 16
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
        # x = self.conv4(x)     # 32 * 32
        x = self.final(x)     # 32 * 32 
        x = x.view(b, num_o, self.mask_size, self.mask_size)

        # 这里是把masks对应到了map_size的图像上面的不同位置上，形成num_o通道
        bbmap = masks_to_layout(bbox, x, self.map_size).view(b, num_o, self.map_size, self.map_size) 

        return bbmap
        # if mask_to_be_adjusted is not None:
        #     bbmap = mask_to_be_adjusted
        
        # if tau > 0.:
        #     bbmap = bbmap * (1-tau) + bbmap.sum(dim=1, keepdim=True ) * tau / num_o

        # # the mask refined stage
        # key = self.key_affine(obj_feat).view(b, num_o, -1) # b o d
        # label_query = self.query_affine(obj_feat).view(b, num_o, -1).permute(0,2,1) # b d o
        # bbmap_resized = F.interpolate(bbmap, size=(64,64), mode='bilinear', align_corners=False)
        # bbmap_resized = bbmap_resized.view(b, num_o, 64*64) # b o w^2
        # pixel_query = torch.bmm(label_query, bbmap_resized).view(b, -1, 64, 64) # b d w w
        # local_query = self.query_local(pixel_query).view(b,-1,64**2) # b d w^2
        # energy = torch.bmm(key, local_query).view(b, num_o, 64, 64) # b o w w
        # # Tanh( f(bbmap)*alpha ) + 1 
        # adjust = torch.tanh(energy * self.mask_adjust_alpha.clamp(-1, 1) ) + 1
        # adjust = F.interpolate(adjust, size=(self.map_size, self.map_size), mode='bilinear', align_corners=False)

        # bbmap_adjust = bbmap * adjust
        
        # np.save('for_figure/objectmask.npy', x.detach().clone().cpu().numpy())
        # np.save('for_figure/bbmap_raw.npy', bbmap.detach().clone().cpu().numpy())
        # np.save('for_figure/pixel_query.npy', pixel_query.detach().clone().cpu().numpy())
        # np.save('for_figure/local_query.npy', local_query.detach().clone().cpu().view(b,-1,64,64).numpy())
        # np.save('for_figure/energy.npy', energy.detach().clone().cpu().numpy())
        # np.save('for_figure/adjust.npy', adjust.mul((bbmap>0).float()).detach().clone().cpu().numpy())
        # np.save('for_figure/bbmap_adjust.npy', bbmap_adjust.detach().clone().cpu().numpy())
        # return bbmap_adjust if not return_raw else [bbmap_adjust, bbmap] 

# Adjust block, use GN to avoid perturbation
class MaskAdjustBlock(nn.Module):
    def __init__(self, channels):
        super().__init__()
        conv = list()
        conv.append(nn.GroupNorm(4, channels))
        conv.append(nn.LeakyReLU(0.01))
        conv.append(
            nn.utils.spectral_norm(nn.Conv2d(channels, channels, 3, 1, 1, bias=False))
            )
        self.conv = nn.Sequential(*conv)
        # self.alpha = nn.Parameter(torch.tensor(0.0))
        self.rezero = HardReZero()
    
    def forward(self, x):
        # return x + self.alpha * self.conv(x)
        return self.rezero(x, self.conv(x))


def masks_to_layout(boxes, masks, H, W=None):
    """
    Inputs:
        - boxes: Tensor of shape (b, num_o, 4) giving bounding boxes in the format
            [x0, y0, x1, y1] in the [0, 1] coordinate space
        - masks: Tensor of shape (b, num_o, M, M) giving binary masks for each object
        - H, W: Size of the output image.
    Returns:
        - out: Tensor of shape (N, num_o, H, W)
    """
    b, num_o, _ = boxes.size()
    M = masks.size(2)
    assert masks.size() == (b, num_o, M, M)
    if W is None:
        W = H

    grid = _boxes_to_grid(boxes.view(b*num_o, -1), H, W).float().to(device=masks.device)

    img_in = masks.float().view(b*num_o, 1, M, M)
    sampled = F.grid_sample(img_in, grid, mode='bilinear', align_corners=True)

    return sampled.view(b, num_o, H, W)


def _boxes_to_grid(boxes, H, W):
    """
    Input:
    - boxes: FloatTensor of shape (O, 4) giving boxes in the [x0, y0, x1, y1]
      format in the [0, 1] coordinate space
    - H, W: Scalars giving size of output
    Returns:
    - grid: FloatTensor of shape (O, H, W, 2) suitable for passing to grid_sample
    """
    O = boxes.size(0)

    boxes = boxes.view(O, 4, 1, 1)

    # All these are (O, 1, 1)
    x0, y0 = boxes[:, 0], boxes[:, 1]
    ww, hh = boxes[:, 2], boxes[:, 3]

    X = torch.linspace(0, 1, steps=W).view(1, 1, W).to(boxes)
    Y = torch.linspace(0, 1, steps=H).view(1, H, 1).to(boxes)

    X = (X - x0) / ww  # (O, 1, W)
    Y = (Y - y0) / hh  # (O, H, 1)

    # Stack does not broadcast its arguments so we need to expand explicitly
    X = X.expand(O, H, W)
    Y = Y.expand(O, H, W)
    grid = torch.stack([X, Y], dim=3)  # (O, H, W, 2)

    # Right now grid is in [0, 1] space; transform to [-1, 1]
    grid = grid.mul(2).sub(1)

    return grid