from math import sqrt
import random
import itertools
from copy import deepcopy

import torch
from torch import optim
from torch import nn
from torch.nn import functional as F
from torchvision import transforms
from torch import distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision.utils import make_grid
import torchvision
from torch.utils.tensorboard.writer import SummaryWriter


class CausalSampleLayer(nn.Module):
    #@ to auto regress with W in an attention-like network
    def __init__(self, d_model:int, dim_feedforward:int=512, device=None, dtype=None) -> None:
        super().__init__()
        pac = {"device":device, "dtype":dtype}
        self.fc_obj_feat1 = SNLinear(d_model, dim_feedforward)
        self.fc_obj_feat2 = SNLinear(dim_feedforward, d_model)

        self.swish1 = Swish()
        self.swish2 = Swish()
                
        self.cfc1 = FCMod(d_model, d_model, bias=True)
        self.cfc2 = FCMod(d_model, d_model, bias=True)
        
        self.ln1 = nn.LayerNorm(d_model)
        self.ln2 = nn.LayerNorm(d_model)
        
        self.rezero = HardReZero()
        
    def forward(self, 
                batch_obj_feat:torch.Tensor, 
                batch_W:torch.Tensor, 
                batch_label_embed:torch.Tensor) -> torch.tensor:
        """forward to regress the feature

        Args:
            batch_obj_feat (torch.tensor): input obj features in batch, b o d
            batch_W (torch.tensor): the causal graph of each layout, b o o
            batch_label_embed (torch.tensor): the category embedding of each objects, b o d

        Returns:
            torch.tensor: the regress feature
        """
        
        b, num_o, d = batch_obj_feat.size()
        
        # 1st part fc-swish-fc - W - add&norm
        batchobj_feat = batch_obj_feat.view(b * num_o, d)
        hidden = self.fc_obj_feat1(batchobj_feat)
        hidden = self.swish1(hidden)
        hidden = self.fc_obj_feat2(hidden) # non-linear transform
        hidden = hidden.view(b, num_o, d)
        # first_feat = torch.einsum('bid, boi -> bod', hidden, batch_W)
        first_feat = torch.bmm(batch_W, hidden) # aggregate according to W
        
        first_feat = first_feat + batch_obj_feat
        first_feat = self.ln1(first_feat) # add and norm
        
        
        # 2nd part cfc-swish-cfc - add&norm 
        hidden = first_feat.view(b*num_o, d)
        batch_label_embed_view = batch_label_embed.view(b*num_o, d)
        hidden = self.cfc1(hidden, batch_label_embed_view)
        hidden = self.swish2(hidden)
        hidden = self.cfc2(hidden, batch_label_embed_view)
        hidden = hidden.view(b, num_o, d)
        
        second_feat = self.rezero(first_feat, hidden)
        second_feat = self.ln2(second_feat) # add and norm
        
        return second_feat


class MaskRegressBlock(nn.Module):
    """@MaskRegressBlock the block to use in raw mask generator with CGN
    """
    def __init__(self, num_feat:int, channels:int) -> None:
        """
        Args:
            num_feat : the number of channels of conditional feature
            channels : the number of channels of main feature , % 4 == 0
        """
        super().__init__()
        self.channels = channels
        self.gn = nn.GroupNorm(4, channels, affine=False)
        self.weight = SNLinear(num_feat, channels)
        self.bias = SNLinear(num_feat, channels)
        self.conv = nn.Sequential(Swish(), conv2d(channels, channels, 3, 1, 1, bias = False))
        self.rezero = ReZero()
    
    def forward(self, x:torch.Tensor, conditional_feat:torch.Tensor) -> torch.Tensor:
        """
        Args:
            x (torch.Tensor): the hidden feature to transform. b d
            conditional_feat (torch.Tensor): the feature of condition in Contional Normalizationz. b d
        """
        weight = self.weight(conditional_feat)[:,:,None,None]
        bias =  self.bias(conditional_feat)[:,:,None,None]
        cgn_transform = self.gn(x).mul(weight).add(bias)
        residual = self.conv( cgn_transform )
        return self.rezero(x, residual)


class MaskAdjustBlock(nn.Module):
    def __init__(self, channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.GroupNorm(4, channels), 
            Swish(),
            conv2d(channels, channels, 5, 1, 2, bias=False, spectral_norm=True, groups=channels),
            nn.GroupNorm(4, channels), 
            Swish(),
            conv2d(channels, channels, 1, 1, 0, bias=False, spectral_norm=True)
        )
        self.rezero = ReZero()
        
    def forward(self, x):
        return self.rezero(x, self.conv(x)) 



class LAMA(nn.Module):
    """LAMA @

    Args:
        nn ([type]): [description]
    """
    def __init__(self, num_feat):
        super().__init__()
        self.num_feat = num_feat
        match_dim = num_feat // 4 # d
        self.query_affine = nn.utils.spectral_norm(nn.Linear(num_feat, match_dim))
        self.key_affine = nn.utils.spectral_norm(nn.Linear(num_feat, match_dim))
        self.query_local = nn.Sequential(
            MaskAdjustBlock(match_dim),
            MaskAdjustBlock(match_dim),
            MaskAdjustBlock(match_dim),
        )
        self.mask_adjust_alpha = nn.Parameter(torch.tensor(0.0))
    
        self.init_parameter()

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1])
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

    def kq(self, obj_feat):
        b, num_o, num_feat = obj_feat.size()
        obj_feat = obj_feat.view(b * num_o, num_feat)
        obj_key = self.key_affine(obj_feat).view(b, num_o, -1) # b o d
        obj_query = self.query_affine(obj_feat).view(b, num_o, -1) # b o d
        return obj_key, obj_query

    def forward(self, obj_feat, raw_mask,):
        # the mask refined stage
        b, num_o, num_feat = obj_feat.size()
        # assert num_feat == self.num_feat
        obj_key, obj_query = self.kq(obj_feat)
        mask_resized = F.interpolate(raw_mask, size=(64, 64), mode='bicubic', align_corners=False)
        pixel_query = torch.einsum("b o d, b o w h -> b d w h", obj_query, mask_resized)
        local_query = self.query_local(pixel_query) # b d w h
        energy = torch.einsum("b o d, b d w h -> b o w h", obj_key, local_query)
        adjust = torch.tanh( energy.mul( self.mask_adjust_alpha.clamp(-1, 1) ) ).add(1)
        map_size = raw_mask.size(2)
        adjust = F.interpolate(adjust, size=(map_size, map_size), mode='bicubic', align_corners=False)

        return raw_mask.mul(adjust), adjust

class CLAMA(LAMA):
    """@CLAMA LAMA with causal restriction
    """
    def __init__(self, num_feat):
        super().__init__(num_feat)
        self.restrict = 64

    def forward(self, obj_feat, raw_mask, C):
        # C = torch.inverse(torch.eye(o, device=bbox.device)[None, :, :] - W_selected) # (I-W)^{-1} emb
        b, num_o, num_feat = obj_feat.size()
        # assert num_feat == self.num_feat
        obj_key, obj_query = self.kq(obj_feat) # 
        if raw_mask.size(2) > self.restrict or raw_mask.size(3) > self.restrict: # assume squared img, resize if too large
            mask_resized = F.interpolate(raw_mask, size=(self.restrict, self.restrict), mode='bilinear', align_corners=True)
        else: 
            mask_resized = raw_mask
        # global_query = torch.einsum("b i d, b i w h -> b i d w h", obj_query, mask_resized)
        # pixel_query = torch.einsum("b i d w h, b o i -> b o d w h", global_query, C)
        # avoid wh to save computational cost
        causal_query = torch.einsum("b i d, b o i -> b o i d", obj_query, C)
        pixel_query = torch.einsum("b i w h, b o i d -> b o d w h", mask_resized, causal_query)
        pixel_query = pixel_query.contiguous().view(b*num_o, pixel_query.size(2), \
            pixel_query.size(3), pixel_query.size(4)) # bo d wh
        local_query = self.query_local(pixel_query) # bo d w h
        local_query = local_query.view(b, num_o, \
            local_query.size(1), local_query.size(2), local_query.size(3))
        energy = torch.einsum('b o d, b o d w h -> b o w h', obj_key, local_query)
        adjust = torch.tanh( energy.mul( self.mask_adjust_alpha.clamp(-1, 1) ) ).add(1)        
        if raw_mask.size(2) > self.restrict:
            map_size = raw_mask.size(2)
            adjust = F.interpolate(adjust, size=(map_size, map_size), mode='bilinear', align_corners=True)

        mask = raw_mask.mul(adjust)
        # mask = mask.div( mask.sum(dim=1, keepdim=True) + 1e-8 ) 
        return mask, adjust
        
        #？ 这个论文里面怎么写啊？不会张量乘法哦。按照wh的位置，就好了
        # 首先是obj key 和obj query，对于某个像素，qj是第j个object的query，mj是mask的强度，
        # 那么\sum_{j} mj*qj*w_{i,j} 就是第i个obj的query，相当于按照mask 强度和因果关系聚合了起来，这是pixel
        # 之后就是local和enery了，照抄
        
        
        
        
   
# 使用这个替换了avgpool(2)作为downsample，在ROIalign的时候可能有误差，如果组合2^n+1的实现，可能误差更大一点点
# ROIAlign(output_size, spatial_scale, sampling_ratio)，按照之前的实现是ROIAlign((8,8), 1/8.0, int(0))
# 第一个是输出的大小，第二个是2的downsample次方，最后默认0，目前不太理解，之后可以试着直接用
     

class Blur(nn.Module):
    #@ Blur pool with stride. 
    # According to [An Effective Anti-Aliasing Approach for Residual Networks], 
    # it can be used as BlurS1+ConvS2 or ConvS1+BlurS2.
    # Automatic ReplicationPad2d to maintain resolution
    # adapted from https://github.com/adobe/antialiased-cnns/blob/master/antialiased_cnns/blurpool.py
    def __init__(self, stride=2, filt_size=3, ):
        super().__init__()
        # assert filt_size in [3, 5, 7, 9]
        self.filt_size = filt_size
        self.pad_sizes = filt_size // 2
        self.stride = stride

        d = {3:[1., 2., 1.], 5:[1., 4., 6., 4., 1.], 
             7:[1., 6., 15., 20., 15., 6., 1.], 
             9:[1., 8., 28., 56., 79., 56., 28., 8., 1]}
        a = torch.tensor(d[filt_size])
        filt = a[:, None] * a[None, :]
        filt = filt.div(torch.sum(filt))
        self.register_buffer('filt', filt[None,None,:,:])

        self.pad = nn.ReplicationPad2d(self.pad_sizes)

    def forward(self, in_feat):
        channel = in_feat.size(1)
        return F.conv2d(self.pad(in_feat), self.filt.data.expand([channel, 1, -1, -1]), stride=self.stride, groups=channel)




# BGN+SPADE 
class SpatialAdaptiveSynBatchGroupNorm2d(nn.Module):
    #@
    def __init__(self, num_features, num_w=512):
        super().__init__()
        self.num_features = num_features
        
        self.weight_proj = conv2d(num_w, num_features, 1, 1, 0)
        self.bias_proj = conv2d(num_w, num_features, 1, 1, 0)
        
        self.batch_norm2d = nn.BatchNorm2d(num_features, eps=1e-5, affine=False,
                            momentum=0.1, track_running_stats=True)
        self.group_norm = nn.GroupNorm(4, num_features, eps=1e-5, affine=False)
        self.rho = nn.Parameter(torch.tensor(0.4)) # the ratio of GN
        
        self.rezero = ReZero()
        
        self.blur3 = Blur(filt_size=3)
        self.blur5 = Blur(filt_size=5)
        self.blur7 = Blur(filt_size=7)
        self.blur9 = Blur(filt_size=9)

    def smooth(self, in_feat, w0, w1):
        if w1 / w0 >=2:
            in_feat = self.blur3(in_feat)
        elif w1 / w0 >=4:
            in_feat = self.blur5(in_feat)
        elif w1 / w0 >=8:
            in_feat = self.blur7(in_feat)
        else:
            in_feat = self.blur9(in_feat)
        return in_feat
        
    def forward(self, x, spatial_style):
        """
        :param x            : input feature map (b, c, w0, h0)
        :param spatial_style: latent spatial_style (b, dim_w, w1, h1)
        :return:
        """
        self.batch_norm2d._check_input_dim(x)
        # use BGN
        output_b = self.batch_norm2d(x)
        output_g = self.group_norm(x)
        output = output_b + self.rho.clamp(0., 1.) * (output_g - output_b)

        b, c, w0, h0 = x.size()
        b, d, w1, h1 = spatial_style.size()
        
        spatial_style_smooth = self.smooth(spatial_style, w0, w1)
        spatial_style_resize = F.interpolate(spatial_style_smooth, size=(w0, h0), mode='bilinear', align_corners=False) # b dim_w w0 h0
        weight, bias = self.weight_proj(spatial_style_resize), self.bias_proj(spatial_style_resize) # b c w0 h0
        affined = output.mul(weight).add(bias)
        
        return self.rezero(output, affined)

        # 必定可以用BMM解决
        # bbox_non_spatial = bbox.view(b, o, -1) # b o h*w
        # bbox_non_spatial_margin = bbox_non_spatial.sum(dim=1, keepdim=True) + torch.tensor(1e-4) # b 1 h*w
        # bbox_non_spatial.div_(bbox_non_spatial_margin)
        # weight, bias = weight.view(b, o, -1), bias.view(b, o, -1) # b o d
        # weight.transpose_(1, 2), bias.transpose_(1, 2) # b d o
        # weight, bias = torch.bmm(weight, bbox_non_spatial), torch.bmm(bias, bbox_non_spatial) # b d h*w
        # # weight.div_(bbox_non_spatial_margin), bias.div_(bbox_non_spatial_margin) # b d h*w
        # weight, bias = weight.view(b, -1, h, w), bias.view(b, -1, h, w)

        # weight = torch.sum(bbox * weight, dim=1, keepdim=False) / \
        #     (torch.sum(bbox, dim=1, keepdim=False) + 1e-6) # b d h w
        # bias = torch.sum(bbox * bias, dim=1, keepdim=False) / \
        #     (torch.sum(bbox, dim=1, keepdim=False) + 1e-6) # b d h w
        # affined = weight * output + bias
        # return output + self.alpha.clamp(-1, 1) * affined
        # return self.rezero(output, affined)
    

    def __repr__(self):
        return self.__class__.__name__ + '(' + str(self.num_features) + ')'


class ResBlockG(nn.Module):
    def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, upsample=False, num_w=128):
        """@
        """
        super().__init__()
        self.upsample = upsample
        self.h_ch = h_ch if h_ch else out_ch
        self.conv1 = conv2d(in_ch, self.h_ch, ksize, pad=ksize//2, bias=False)
        self.conv2 = conv2d(self.h_ch, out_ch, ksize, pad=ksize//2, bias=False)
        self.b1 = SpatialAdaptiveSynBatchGroupNorm2d(in_ch, num_w=num_w)
        self.b2 = SpatialAdaptiveSynBatchGroupNorm2d(self.h_ch, num_w=num_w)
        self.learnable_sc = in_ch != out_ch or upsample
        if self.learnable_sc:
            self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
        self.swish1 = Swish()
        self.swish2 = Swish()

        self.out_ch = out_ch
        self.noise1 = SimpleNoiseInjection()
        self.noise2 = SimpleNoiseInjection()
        self.rezero = ReZero()
        
    def residual(self, in_feat, spatial_style):
        x = in_feat
        x = self.noise1(x)
        x = self.b1(x, spatial_style)
        x = self.swish1(x)
        if self.upsample:
            x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False)
        x = self.conv1(x)
        x = self.noise2(x)
        x = self.b2(x, spatial_style)
        x = self.swish2(x)
        x = self.conv2(x)
        return x

    def shortcut(self, x):
        if self.learnable_sc:
            if self.upsample:
                x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False)
            x = self.c_sc(x)
        return x

    def forward(self, in_feat, spatial_style):
        return self.rezero(self.shortcut(in_feat), self.residual(in_feat, spatial_style))


class ResBlockD(nn.Module):
    """@ block to use in D
    """
    def __init__(self, in_ch:int, out_ch:int, ksize:int=3, pad:int=1, \
                        downsample: bool=False, bias=True):
        """
        Args:
            in_ch (int): [description]
            out_ch (int): [description]
            ksize (int, optional): [description]. Defaults to 3.
            pad (int, optional): [description]. Defaults to 1.
            downsample (bool, optional): Whether to down sample image. Defaults to False.
        
        residual: swish-conv-swish-conv-downsample
        main    : conv-downsample
        """
        super().__init__()
        self.conv1 = conv2d(in_ch, out_ch, ksize, 1, pad, bias=bias)
        self.conv2 = conv2d(out_ch, out_ch, ksize, 1, pad, bias=bias)
        self.swish1 = Swish()
        self.swish2 = Swish()
        self.downsample = Blur(2) if downsample else nn.Identity()
        self.learnable_sc = (in_ch != out_ch) or downsample
        if self.learnable_sc:
            self.c_sc = conv2d(in_ch, out_ch, 1, 1, 0)
        self.rezero = ReZero()

    def residual(self, in_feat):
        x = in_feat
        x = self.conv1(self.swish1(x))
        x = self.conv2(self.swish2(x))
        x = self.downsample(x)
        return x

    def shortcut(self, x):
        if self.learnable_sc:
            x = self.downsample(self.c_sc(x))
        return x

    def forward(self, in_feat):
        return self.rezero(self.shortcut(in_feat), self.residual(in_feat))


class BlockD_Bottom(ResBlockD):
    def __init__(self, in_ch:int, out_ch:int, ksize:int=3, downsample: bool=False):
        super().__init__(in_ch, out_ch, ksize=ksize, pad=ksize//2, downsample=downsample)
        self.swish2 = None
        self.learnable_sc = None
        self.c_sc = None
        self.rezero = None
    
    def forward(self, in_feat):
        x = in_feat
        x = self.conv1(x)
        x = self.conv2(self.swish1(x))
        x = self.downsample(x)
        return x
  


def conv2d(in_feat:int, out_feat:int, kernel_size:int=3, stride:int=1, pad:int=1, groups:int=1,
           bias: bool=True, spectral_norm: bool=True,) -> nn.Module:
    """@conv2d return customized cov

    Args:
        in_feat (int): in
        out_feat (int): out
        kernel_size (int, optional):  Defaults to 3.
        stride (int, optional):  Defaults to 1.
        pad (int, optional): Defaults to 1.
        groups (int, optional): Defaults to 1.
        bias (bool, optional):  Defaults to True.
        spectral_norm (bool, optional):  Defaults to True.

    Returns:
        nn.Module: conv
    """
    if stride>1:
        print(f"use Blur({stride}) instead of stride={stride} ")
    conv = nn.Conv2d(in_feat, out_feat, kernel_size, stride, 0, bias=bias, groups=groups)
    if spectral_norm:
        conv = nn.utils.spectral_norm(conv, eps=1e-4)
    if pad == 0:
        return conv
    else:
        return nn.Sequential( nn.ReplicationPad2d(pad), conv )


class SimpleNoiseInjection(nn.Module):
    """@SimpleNoiseInjection noise injection, simply mul {scale} * noise
    """
    def __init__(self, scale:float=1e-3):
        super().__init__()
        self.noise_scale = scale
        # self.noise_scale = nn.Parameter(torch.tensor(scale))

    def forward(self, in_feat, noise=None):
        if noise is None:
            noise = torch.randn_like(in_feat)
        return in_feat.add( noise.mul(self.noise_scale) )



def regular_DAG(A:torch.Tensor, m:int) -> torch.Tensor:
    #@ to calculate tr( I + 1/m A.*A )^m - m
    # A .* A ./ m
    AAm = A.pow(2).div(m) 
    # I + A .* A ./ m
    IAAm= torch.eye(m, device=A.device).add(AAm)
    # (I + A .* A ./ m)^m
    IAAmm = IAAm.matrix_power(m)
    # tr[ (I + A .* A ./ m)^m ] - m    
    if IAAmm.ndim==3:
        h_A = sum( IAAmmi[0].trace().sub(m)  for IAAmmi in IAAmm.split(1, 0)) / IAAmm.size(0)
    else:
        h_A = IAAmm.trace().sub(m)
    return h_A

class MemoryPool():
    #@ remains a pool of coming elements of {max_length} in differnt {keys}
    def __init__(self, max_length:int = 100) -> None:
        super().__init__()
        self.max_length = max_length
        self.d = {}
        self.real = []
        
    def put(self, key, element) -> None:
        # put {elements} in the pool of {key}
        val = self.d.get(key, []) 
        if len(val) > self.max_length: # remain {max_length}
            val[random.randint(0, len(val)-1)] = element
        else:
            val.append(element)
        self.d[key] = val
    
    def puts(self, keys, elements) -> None:
        for k, e in zip(keys, elements):
            self.put(k, e) 
    
    def save(self, real_samples):
        for r in real_samples:
            if len(self.real) > self.max_length: # remain {max_length}
                self.real[random.randint(0, len(self.real)-1)] = r
            else:
                self.real.append(r)
        # self.real.extend( [r for r in real_samples] )
        # if len(self.real) > self.max_length: # remain {max_length}
        #     self.real = self.real[ (len(self.real)-self.max_length): ]
        
    def sample_virtual(self, keys) -> list: 
        # randomly sample elements of given keys
        #! use put() before sample() in case IndexError
        return [ random.choice(self.d[k]) for k in keys ] # [ Tensor(d) ] * len(keys)
        
    def sample(self, k) -> list:
        return random.sample(self.real, k)

    def __len__(self):
        return len(self.real)
        



class HardReZero(nn.Module):
    def __init__(self, *, max:float=2_000) -> None:
        super().__init__()
        self.max = float(max)
        self.register_buffer("count", torch.tensor(0.))
        
    def forward(self, main:torch.Tensor, residual:torch.Tensor) -> torch.Tensor:
        self.count += 1 if self.count < self.max else 0
        rezero_alpha = min(1., self.count/self.max)
        return main + residual * rezero_alpha


class ReZero(nn.Module):
    #@ x + alpha * f(x), from ReZero in UAI 2021
    """
    Examples:
        >>> rezero = ReZero()
        >>> rezero(main(x), residual(x))
    """
    def __init__(self, init:float=0.) -> None:
        super().__init__()
        self.rezero_alpha = nn.Parameter( torch.tensor(init) )
        
    def forward(self, main:torch.Tensor, residual:torch.Tensor) -> torch.Tensor:
        # alpha = self.rezero_alpha.clamp(-1. , 1.) if self.clamp else self.rezero_alpha
        # return torch.add(main, residual.mul(self.rezero_alpha.clamp(-1. , 1.)))
        return main + residual * self.rezero_alpha.clamp(-1. , 1.)



class LinAutoRegressor(nn.Module):
    #@ to auto regress with W as linear weights with {bias}
    def __init__(self, d_model:int, num_classes:int = 0) -> None:
        super().__init__()
        self.bias = nn.Parameter(torch.zeros(d_model))
    
    def forward(self, batch_obj_feat:torch.Tensor, 
                batch_W:torch.Tensor, batch_labels: torch.LongTensor=None) -> torch.tensor:
        # return torch.einsum('bid, boi -> bod', batch_obj_feat, batch_W).add(self.bias)
        return torch.bmm(batch_W, batch_obj_feat).add(self.bias)

class NonLinAutoRegressor(nn.Module):
    #@ to auto regress with W in an attention-like network
    def __init__(self, d_model:int, num_classes:int ) -> None:
        super().__init__()
        dim_feedforward = d_model * 2
        self.fc_obj_feat1 = WN_FC(d_model, dim_feedforward, gain=1.)
        self.fc_obj_feat2 = WN_FC(dim_feedforward, d_model, gain=1.)

        self.swish1 = Swish()
        self.swish2 = Swish()
        self.swish3 = Swish()
                
        self.label_embeddings = nn.Embedding(num_classes, d_model)
        self.cfc1 = FCMod(d_model, d_model, bias=True)
        self.cfc2 = FCMod(d_model, d_model, bias=False)
        
        self.rezero = ReZero()
        
    def forward(self, 
                batch_obj_feat:torch.Tensor, 
                batch_W:torch.Tensor,
                batch_labels: torch.LongTensor 
                ) -> torch.tensor:
        """forward to regress the feature

        Args:
            batch_obj_feat (torch.tensor): input obj features in batch, b o d
            batch_W (torch.tensor): the causal graph of each layout, b o o
            batch_labels (torch.tensor): the category of each objects, b o

        Returns:
            torch.tensor: the regress feature
        """
        #* 首先是全体 FC-Swish-FC-Swish 一下(val 变换)，然后按照W加起来（类似attention）
        #* 之后是cFC-Swish-cFC-norm 输出，norm交给外面做
        #* 这个在自回归的M里面使用，第一part不加自己，完全PA组合，第二Part对于category 敏感，按照 {batch_label_embed} 的类别回归，最后相加输出
        #！ 避免对W的放缩，假设了输入feature 都是unit norm的，并且所有的 FC 都是 WN 没有 g 的，保证了 W 的作用
        #* 此处没有LN，避免失去了unit norm的几何含义，unit norm 目的在于让参数但在unit hyper shpere 上面比较cosine，比较容易回归。
        #* 生成器不需要几何含义，可以回归。此处有点违背高斯分布的假设，反正以前的文章也是这么做的
        #@ 可以引用TNNLS 早期sparsity learning回归的论文，里面比较多提到unit norm比较好回归
        #* 此处输出没有unit norm，unit norm 都在外面解决 
        # TODO 自回归写的时候要Unitnorm
        
        
        #* 第一次res+LN（add & norm）
        #* 最后cFC-swish-cFC一下（FC）
        #* 第二次res+LN（add & norm）
        #& 这个是在自回归 M 和生成器里面作为layer使用的，自回归里面不应该加自己的，无residual，完全靠PA来组合，最后有一定部分无视输入奠定输出基调
        #& 在生成器里面则应该加自己，residual，PA配合前面的，最后同样有一部分无视输入奠定基调
        #& 这里要避免W的不必要放缩，因此输入的feature应该norm一下？而且所有的FC应该都是
        #！很大一个关键在于，输入是什么至于范围，怎么norm过，输出就要怎么样norm，保证值域范围一致才好优化
        #？我们看看这个解决方案是否可以，第一部分的FC都是WN/SN的，因此顶多就是旋转一下，swish后负值部分收缩，旋转，收缩，然后W线性组合
        #？第二部分，组合的特征经过旋转-收缩-旋转，最后加第一部分的输出，那么LN加在哪里呢？LN会损失方向的含义，需要谨慎，不能贸然放在最后，此外这里的FC都是WN过的话，为什么还需要LN呢？
        #？原始的transformer可能统一地只有激活程度的含义，并没有几何含义，我们这里非常依赖几何含义，所以需要谨慎使用LN
        #! 分类器分类的时候得到的feature先 unit norm，之后再分类和判断，这里输入的也是unit norm，然后最后的输出需要unit norm一下，比较的时候是直接相乘就好了
        
        #！如果是生成器采样，那么几何含义就没有了，可以LN，
        #！但是带几何含义，就不能使用高斯分布的假设了。我们只是假设独立，没有假设高斯分布。就采用unitnorm的回归方案吧，生成的方案可以乱来
        
        b, num_o, d = batch_obj_feat.size()
        batch_label_embed = self.label_embeddings(batch_labels)
        
        # 1st part fc-swish-fc-swish - W
        batchobj_feat = batch_obj_feat.view(b*num_o, d)
        hidden = self.fc_obj_feat1(batchobj_feat)
        hidden = self.swish1(hidden)
        hidden = self.fc_obj_feat2(hidden)
        hidden = self.swish2(hidden)
        hidden = hidden.view(b, num_o, d)
        # first_feat = torch.einsum('bid, boi -> bod', hidden, batch_W)
        first_feat = torch.bmm(batch_W, hidden)
        
        # 2nd part cfc-swish-cfc - residual 
        hidden = first_feat.view(b*num_o, d)
        batch_label_embed_view = batch_label_embed.view(b*num_o, d)
        hidden = self.cfc1(hidden, batch_label_embed_view)
        hidden = self.swish3(hidden)
        hidden = self.cfc2(hidden, batch_label_embed_view)
        hidden = hidden.view(b, num_o, d)
        second_feat = self.rezero(first_feat, hidden)
        # second_feat = second_feat.rnorm(2, 0, 1).view(b, num_o, d)
        return second_feat




class CategoryCausalGraph(nn.Module):
    """ @
    CategoryCausalGraph This is the W in our method, to indicate the causal relations among categories
    
    >>> c = CategoryCausalGraph(6)
    >>> labels = [[0,2,4,4], [1,3,3,5], [0,1,4,3]]
    >>> c(labels)
    """
    def __init__(self, num_c:int, W = None):
        """__init__ initialize W

        Args:
            num_c (int): the number of categories
            W          : the initial W
            diag is to restrict the disgonal of W to 0
        """
        super().__init__()
        self.num_c = num_c
        self.register_buffer("diag", 1 - torch.eye( num_c ) )
        if W is not None:
            # assert num_c == W.size(0) and num_c == W.size(1)
            self.W = nn.Parameter( W.mul(self.diag) )
        else:
            self.W = nn.Parameter( torch.zeros( (num_c, num_c) ) )
        
        
    def forward(self, batch_bboxes = None, batch_labels:torch.Tensor = None ) -> torch.Tensor:
        """forward to extract causal relation of W in batch_labels

        Args:
            batch_labels: the batch_labels in the layout, $c_i$,  b num_o
        Returns:
            torch.tensor: the selected W
        """
        W = self.W.mul(self.diag) # restrict diag of W
        if batch_labels is None:
            return W
        else:
            # b, num_o = batch_labels.size()
            batch_labels = batch_labels.long()
            W_selected = torch.stack([W.index_select(0, l).index_select(1, l) for l in batch_labels])  # select in two dims
        
        if batch_bboxes is None:
            return W_selected
        else:
            # the connection between objects
            connection = extract_layout_connections(batch_bboxes, fmt="xywh")
            return W_selected.mul(connection.to(W_selected.device)) # W w/o overlap connection



def extract_layout_connections(batch_bounding_boxes:torch.Tensor, *, fmt:str='xywh') -> torch.tensor:
    """@extract_layout_connections extraction overlapping relations between bounding boxes

    Args:
        batch_bounding_boxes (torch.tensor): batch of bbox
        fmt (str, optional): format of bboxes. 
                                Can be 'xyxy', 'xywh', 'cxcxwh', according to torchvision.ops.box_convert. 
                                Defaults to 'xyxy'.

    Returns:
        torch.tensor: the batch of overlapping index
        
    >>> batch_bounding_boxes = torch.tensor([[[0.1, 0.2, 0.3, 0.4], [0.11, 0.22, 0.33, 0.44], [0.311, 0.422, 0.333, 0.844],],]*5)
    >>> extract_layout_connections(batch_bounding_boxes)
    """
    b, num_o, n = batch_bounding_boxes.size()
    # assert n == 4
    if fmt != 'xyxy':
        batch_bounding_boxes = [torchvision.ops.box_convert(b, fmt, 'xyxy') for b in batch_bounding_boxes]
    return torch.stack([(torchvision.ops.box_iou(b, b).gt(0.)).float() for b in batch_bounding_boxes])
  

# class Swish(nn.Module):
#     # An impletation of Swish. At the begining it is linear with beta = 0.
#     # torch.nn.SiLU
#     """
#     >>> import torch
#     >>> s = Swish()
#     >>> s(torch.tensor(1.))
#     """
#     def __init__(self, beta:float = 0.1):
#         super().__init__()
#         # self.swish_beta = nn.Parameter(torch.tensor(beta))

#     def forward(self, tensor:torch.Tensor) -> torch.tensor:
        # x * sigmoid(beta^2 * x)
        # beta = self.swish_beta.pow(2).clamp(max=1.)
        # swish =  2 * tensor * torch.sigmoid(tensor * beta) # tensor.mul(2).mul( tensor.mul(beta).sigmoid() )
        # return swish
        # tensor *= 2 * torch.sigmoid( tensor * self.swish_beta.clamp(-1, 1).pow(2) )
        # return tensor
        

Swish = nn.SiLU
    

class FCMod(nn.Module):
    """@
    conditional FC given y to proceed input x
    """
    def __init__(self, 
        in_: int, # in dim
        out_: int, # out dim
        bias: bool = False, # add affine bias
        demod=True, 
        eps = 1e-8, **kwargs):
        super().__init__(**kwargs)
        self.out_ = out_
        self.in_ = in_
        self.demod = demod
        self.weight = nn.Parameter(torch.zeros((out_, in_)))
        nn.init.kaiming_normal_(self.weight, mode='fan_in', nonlinearity='relu')
        self.bias_trans = nn.Parameter( torch.zeros( (out_, in_) ) ) if bias else None
        self.eps = eps
    

    def forward(self, x: torch.tensor, y: torch.tensor) -> torch.tensor:
        """forward [summary]

        Args:
            x (torch.tensor): feature to process, b x in_
            y (torch.tensor): feature to adapt FC, b x in_

        Returns:
            torch.tensor: result
        
        the in_ dim is scaled by y, and then out_ is normalized if self.demod
        #*  in fact output is w*y @ x + w@x + P@y
        """
        
        b, in_ = x.shape # b x in_
        # assert all([b == y.size(0), in_ ==  y.size(1), in_ == self.in_]), \
            # "dimension mismatch"
        
        y_expand = y[:, None, :] # b x 1 x in_
        w_ = self.weight[None, :, :] # 1 x out_ x in_
        weights = w_.mul(y_expand.add(1)) # weights = w * (y + 1), b x out_ x in_

        if self.demod: # normalized weights
            weights = F.normalize(weights, p=2, dim=2, eps=self.eps )
            # weights.div_( weights.norm(p=2, dim=2, keepdim=True).add(self.eps) )
            # d = torch.rsqrt(weights.pow(2).sum(dim=(2,), keepdim=True) + self.eps)
            # weights.mul_(d)

        result = torch.einsum("bi, boi -> bo", x, weights )

        if self.bias_trans is not None:
            result.add_( torch.einsum("bi, oi -> bo", y, self.bias_trans) ) # result += trans_bias(y)
    
        return result
    
class WN_FC(nn.Linear): 
    #@ FC with unit norm weights 
    def __init__(self, in_features: int, out_features: int, bias: bool=True, gain=1., **kwargs) -> None:
        super().__init__(in_features, out_features, bias=bias, **kwargs)
        self.gain = gain
    
    def forward(self, input: torch.Tensor) -> torch.Tensor:
        weight = F.normalize(self.weight, p=2, dim=1)
        return F.linear(input, weight, self.bias).mul(self.gain)

def ema_model(model:nn.Module, ratio:float = 0.5 ** (32 / (10 * 1000)), ) -> torch.optim.swa_utils.AveragedModel:
    """@ ema_model get anema model

    Args:
        model (nn.Module): the model to average
        ratio (float, optional): the ratio to retain average model. Defaults to 0.01.

    Returns:
        torch.optim.swa_utils.AveragedModel: the averaged model

    Examples:
        swa_model = ema_model(model)
        swa_model.update_parameters(model)
    """
    ema_avg = lambda averaged_model_parameter, model_parameter, num_averaged:\
            ratio * averaged_model_parameter + (1. - ratio) * model_parameter
    return torch.optim.swa_utils.AveragedModel(model, avg_fn=ema_avg)
        

class HingeLoss:
    def __init__(self):
        pass
    
    @staticmethod
    def train_real(outD:torch.tensor) -> torch.tensor:
        loss = F.relu(1.0 - outD)
        return loss.mean()
    
    @staticmethod
    def train_fake(outD:torch.tensor) -> torch.tensor:
        loss = F.relu(1.0 + outD)
        return loss.mean()
    
    
    @staticmethod
    def train_g(outD:torch.tensor) -> torch.tensor:
        return - outD.mean()
    
    
def SNLinear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None):
    return nn.utils.spectral_norm(nn.Linear(in_features, out_features, bias), eps=1e-4)

import DSmodels as dsmodels
class DSscore(nn.Module):
    def __init__(self, use_gpu=True):
        super().__init__()
        model = dsmodels.PerceptualLoss(model='net-lin', net='alex', use_gpu=use_gpu, version="0.1")
        self.model = model.cuda() if use_gpu else model
        self.results = []

    @torch.no_grad()
    def forward(self, img_generated, img_real):
        img_generated = img_generated.detach()
        img_real = img_real.type_as(img_generated)
        result = self.model.forward(img_generated, img_real)
        self.results.append(result)
        return result

    def mean_std(self):
        result = torch.cat(self.results, dim=0)
        mean = result.mean().item()
        std = result.std().item()
        self.results = []
        return mean, std


