import torch
from torch import optim
from torch import nn
from torch.nn import functional as F
import torchvision
from components import *
from math import pi, log2, floor


class RawMaskGenerator(nn.Module):
    # @
    def __init__(self, num_feat=128, mask_size=32, map_size=64):
        super().__init__()
        self.mask_size = mask_size
        self.map_size = map_size
        self.hidden_feat, hidden_feat = 128, 128

        self.fc = SNLinear(num_feat, hidden_feat * 4 * 4)
        
        self.conv1 = MaskRegressBlock(num_feat, hidden_feat)
        self.conv2 = MaskRegressBlock(num_feat, hidden_feat)
        self.conv3 = MaskRegressBlock(num_feat, hidden_feat)
        self.conv4 = MaskRegressBlock(num_feat, hidden_feat)
        
        self.final = nn.Sequential(conv2d(hidden_feat, 1, 1, 1, 0), nn.Sigmoid())

        self.init_parameter()

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1])
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

    def forward(self, obj_feat, bbox):
        """
        :param obj_feat: (b, num_o, num_feat(d0))
        :param bbox: (b, num_o, 4) x y w h
        :return: raw_mask: (b, num_o, map_size, map_size)
        """
        b, num_o, _ = bbox.size()
        obj_feat = obj_feat.view(b * num_o, -1) # bo d0
        x = self.fc(obj_feat).view(b * num_o, -1, 4, 4) # bo d1 4 4
        x = self.conv1(x, obj_feat)  # 4 * 4
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
        x = self.conv2(x, obj_feat)     # 8 * 8
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
        x = self.conv3(x, obj_feat)     # 16 * 16
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
        x = self.conv4(x, obj_feat)     # 32 * 32
        x = self.final(x)     # 32 * 32 
        x = x.view(b, num_o, self.mask_size, self.mask_size)

        # map mask to bbox in the image of num_o x map_size x map_size
        raw_mask = self.masks_to_layout(bbox, x, self.map_size).view(b, num_o, self.map_size, self.map_size) # b o w h
        raw_mask = raw_mask.div( raw_mask.sum(dim=1, keepdim=True) + 1e-8 )
        
        return raw_mask

    
    def masks_to_layout(self, boxes, masks, H, W=None):
        """
        Inputs:
            - boxes: Tensor of shape (b, num_o, 4) giving bounding boxes in the format
                [x, y, w, h] in the [0, 1] coordinate space
            - masks: Tensor of shape (b, num_o, M, M) giving binary masks for each object
            - H, W: Size of the output image.
        Returns:
            - out: Tensor of shape (N, num_o, H, W)
        """
        b, num_o, _ = boxes.size()
        M = masks.size(2)
        assert masks.size() == (b, num_o, M, M)
        if W is None:
            W = H

        grid = self._boxes_to_grid(boxes.view(b*num_o, -1), H, W).float().to(device=masks.device)

        img_in = masks.float().view(b*num_o, 1, M, M)
        sampled = F.grid_sample(img_in, grid, mode='bilinear', align_corners=False)

        return sampled.view(b, num_o, H, W)

    def _boxes_to_grid(self, boxes, H, W):
        """
        Input:
        - boxes: FloatTensor of shape (O, 4) giving boxes in the [x, y, w, h]
        format in the [0, 1] coordinate space
        #! x y w h
        - H, W: Scalars giving size of output
        Returns:
        - grid: FloatTensor of shape (O, H, W, 2) suitable for passing to grid_sample
        """
        O = boxes.size(0)

        boxes = boxes.view(O, 4, 1, 1)

        # All these are (O, 1, 1)
        x0, y0 = boxes[:, 0], boxes[:, 1]
        ww, hh = boxes[:, 2], boxes[:, 3]

        X = torch.linspace(0, 1, steps=W).view(1, 1, W).to(boxes)
        Y = torch.linspace(0, 1, steps=H).view(1, H, 1).to(boxes)

        X = (X - x0) / ww  # (O, 1, W)
        Y = (Y - y0) / hh  # (O, H, 1)

        # Stack does not broadcast its arguments so we need to expand explicitly
        X = X.expand(O, H, W)
        Y = Y.expand(O, H, W)
        grid = torch.stack([X, Y], dim=3)  # (O, H, W, 2)

        # Right now grid is in [0, 1] space; transform to [-1, 1]
        grid = grid.mul(2).sub(1)

        return grid





class ResnetGenerator(nn.Module):
    # @
    def __init__(self, img_size:int = 256, num_classes=10, output_dim=3, base_ch=64, num_o=8,):
        super().__init__()
        self.img_size = img_size
        self.num_classes = num_classes
        self.bash_ch = base_ch
        channels = {256:[8, 8, 4, 4, 4, 2, 2, 1, 1], 128:[8, 8, 4, 4, 2, 2, 1, 1], 64:[8, 4, 2, 2, 2, 1, 1]}[img_size]
        self.channels = channels
        upsamples = {256:[True, True, False, True, True, True, True, False], 
                    128:[True, True, False, True, True, True, False], 
                    64:[True, True, False, True, True, False]}[img_size]
        
        self.emb_len = 128 + img_size // 4
        self.label_embedding = nn.Embedding(num_classes, self.emb_len)
        #! bbox  x y w h
        self.bbox_emb = nn.Sequential(SNLinear(6, 64), Swish(), SNLinear(64, self.emb_len))
        
        csl = CausalSampleLayer(self.emb_len) # LN last
        self.causal_sampler = nn.Sequential(*[ByPassFilter(deepcopy(csl)) for _ in range(num_o)]) # LN last
        
        self.raw_mask = RawMaskGenerator(self.emb_len, map_size=img_size) # fc first, fc-sigmoid last
        self.clama = CLAMA(self.emb_len) # fc's first, tanh last 
        
        self.init_noise_scale = nn.Parameter( torch.tensor(1e-1) )
        self.universal_style = nn.Parameter( torch.zeros([1, self.emb_len, 1, 1]) )

        res = []
        for channel_in, channel_out, up in zip(channels[:-1], channels[1:], upsamples):
            res.append(ByPassFilter(
                ResBlockG(base_ch*channel_in, base_ch*channel_out, upsample=up, num_w=self.emb_len))) # norm-swish first, conv-noise last
        self.res = nn.Sequential(*res)
        
        self.final = nn.Sequential( conv2d(base_ch, output_dim, 1, 1, 0), nn.Tanh() )
        
        self.init_parameter()
        
        
        # self.z_dim = z_dim
        # num_w = z_dim + self.label_embedding.embedding_dim
        
        # self.fc = nn.utils.spectral_norm(nn.Linear(z_dim, 4*4*channels[0]*base_ch))

        # res = []
        # for channel_in, channel_out in zip(channels[:-1], channels[1:]):
        #     res.append(ByPassFilter(
        #         ResBlockG(base_ch*channel_in, base_ch*channel_out, upsample=True, num_w=num_w)))
        # self.res = nn.Sequential(*res)
        
        # self.final = nn.Sequential(nn.BatchNorm2d(base_ch),
        #                            nn.LeakyReLU(0.01),
        #                            conv2d(base_ch, output_dim, 3, 1, 1),
        #                            nn.Tanh())
        
        
        # # mapping function
        # mapping = list()
        # self.mapping = nn.Sequential(*mapping)

        # self.mask_regress = RawMaskGenerator(num_w+2, map_size=256)

        # self.style_mapping = nn.Sequential(
        #     nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
        #     nn.LeakyReLU(0.01),
        #     nn.utils.spectral_norm(nn.Linear(z_dim, z_dim)),
        #     nn.LeakyReLU(0.01),
        #     nn.utils.spectral_norm(nn.Linear(z_dim, z_dim))
        # )
        # self.init_parameter()
        # print(f"ResnetGenerator {img_size} initialized")

    def __repr__(self):
        return f"ResnetGenerator {self.img_size}_{self.num_classes}_{self.emb_len} initialized"

    def init_emb(self, bbox, labels):
        # bbox   b o 4
        # labels b o
        wh = bbox[:,:,2:].view(-1, 2) # bo 2
        bbox_ff = torch.cat([wh, torch.sin(wh.mul(2*pi)), torch.sin(wh.mul(4*pi))], dim = 1)
        bbox_emb = self.bbox_emb( bbox_ff ).view(bbox.size(0), bbox.size(1), -1)
        emb = self.label_embedding(labels) + bbox_emb
        emb = torch.normal(emb, 1e-1)
        return emb

    def forward(self, bbox, labels, cg, return_mask=False, input_mask=None):
        # 首先是按照采样embed，加噪声，和w h的影响
        # 按照因果图采样feature
        # 生成raw mask， CLAMA
        # 合成全局的feature
        # 给各个分辨率生成
        b, o, _ = bbox.size()
        emb = self.init_emb(bbox, labels) # b o emb_len
        W_selected = cg(bbox, labels) # b o o
        
        causal_emb, _, _ = self.causal_sampler([emb, W_selected, emb]) # b o emb_len
        # x = self.causal_sampler1(emb, W_selected, emb) # b o emb_len
        # causal_emb = self.causal_sampler2(x, W_selected, emb) # b o emb_len
        
        
        if input_mask is None:
            raw_mask = self.raw_mask(causal_emb, bbox)
            mask, adjust = self.clama(emb, raw_mask, W_selected) # shape
        else:
            raw_mask = input_mask
            mask = raw_mask
            adjust = None
        
        shape_style = torch.einsum( "bod , bowh -> bdwh ", causal_emb, mask) # b emb_len w h
        shape_style = shape_style + self.universal_style
        
        z_im = torch.randn((b, self.channels[0]*self.bash_ch, 4, 4), device=bbox.device) * self.init_noise_scale
        x, _ = self.res([z_im, shape_style]) # b channels[-1] w h
        x = self.final(x) # b 3 w h

        return x if not return_mask else [x, mask, raw_mask, adjust]
        
        # b, o = z.size(0), z.size(1)
        
        # label_embedding = self.label_embedding(y)
        # z, bbox = z.type_as(label_embedding), bbox.type_as(label_embedding)

        # if input_mask is None:
        #     mask_latent_vector = torch.cat([label_embedding, z, bbox[:,:,2:]], dim=2) # b*o*(num_w+2)
        #     if return_mask:
        #         mask, raw_mask = self.mask_regress(mask_latent_vector, bbox, return_raw=True)
        #     else:
        #         mask = self.mask_regress(mask_latent_vector, bbox)
        # else:
        #     mask = input_mask
        # w = torch.cat( [label_embedding, self.style_mapping(z.view(b*o, -1)).view(b,o,-1)], dim=2)  # b*o*num_w
        
        # # 整个image的style
        # if z_im is None:
        #     z_im = torch.randn((b, self.z_dim), device=z.device)

        # # 4x4
        # x = self.fc(z_im).view(b, -1, 4, 4)
        # # 256x256
        # x, w, mask = self.res([x, w, mask])
        # # to RGB
        # x = self.final(x)
        # return x if not return_mask else [x, mask, raw_mask]

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1])
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)


class ByPassFilter(nn.Module):
    """ByPassFilter integrated extra inputs
    
    Examples:
        for channel_in, channel_out in zip(channels[:-1], channels[1:]):
            res.append(ByPassFilter(
                ResBlockG(ch*channel_in, ch*channel_out, upsample=True, num_w=num_w)))
        self.res = nn.Sequential(*res)
        ...
        x, w, mask = self.res([x, w, mask])
        
        This is equal to 
        x = sel.res1(x, w, mask)
        x = sel.res2(x, w, mask)
        x = sel.res3(x, w, mask)
    """
    def __init__(self, model):
        super().__init__()
        self.model = model
        
    def forward(self, args):
        x = self.model(*args)
        return [x] + args[1:] # These are w and masks
    



class ResnetDiscriminator(nn.Module):
    # @
    def __init__(self, img_size = 64, num_classes=10, input_dim=3, base_ch=64, device = torch.device("cpu")):
        super().__init__()
        self.img_size = 64
        self.num_classes = num_classes
        self.device = device
        self.dim_obj_feat = 4 * base_ch

        self.shared = nn.Sequential(
            BlockD_Bottom(input_dim, base_ch, downsample=True),
            ResBlockD(base_ch, base_ch*2, downsample=True), # swish first, conv-down last
            ResBlockD(base_ch*2, base_ch*4, downsample=True)
        ) # 1/8 
        self.sl_boundary = ResBlockD(base_ch*4, base_ch*4, downsample=True) # 1/16
        
        intermediate = ResBlockD(base_ch*8, base_ch*8, downsample=True)
        # 1/32 
        self.img_path = [ ResBlockD(base_ch*4, base_ch*8, downsample=True)]  \
                        + [ deepcopy(intermediate) for _ in range( int(log2(img_size)-6) )] \
                        + [ ResBlockD(base_ch*8, base_ch*16, downsample=False) ] # 2 x 2
        self.img_path = nn.Sequential(* self.img_path)
        self.l_im = WN_FC(base_ch * 16, 1, bias=False)
        
        self.roi_align_s = torchvision.ops.RoIAlign([8, 8], 1./8. , sampling_ratio=-1, aligned=True)
        self.roi_align_l = torchvision.ops.RoIAlign([8, 8], 1./16., sampling_ratio=-1, aligned=True)
        
        sl_shared = ResBlockD(base_ch*4, base_ch*8, downsample=False)
        self.small_path = nn.Sequential(
            ResBlockD(base_ch*4, base_ch*4, downsample=False), sl_shared
        )
        self.large_path = sl_shared # 8 x 8
        self.obj_path = nn.Sequential(
            ResBlockD(base_ch*8, base_ch*8, downsample=True), # 4 x 4
            ResBlockD(base_ch*8, self.dim_obj_feat, downsample=True)  # 2 x 2
        )
        self.l_obj = WN_FC(self.dim_obj_feat, 1, bias=False)
        self.l_cls = WN_FC(self.dim_obj_feat, num_classes) # nn.utils.spectral_norm(nn.Embedding(num_classes, base_ch*16))
        

        # self.block1 = ResBlockD_Bottom(input_dim, base_ch, downsample=True)
        # self.block2 = ResBlockD(base_ch, base_ch*2, downsample=True) # swish first, conv-down last
        # self.block3 = ResBlockD(base_ch*2, base_ch*4, downsample=True)
        # self.block4 = ResBlockD(base_ch*4, base_ch*4, downsample=True)
        # self.block5 = ResBlockD(base_ch*4, base_ch*8, downsample=True)
        # self.block6 = ResBlockD(base_ch*8, base_ch*8, downsample=True)
        # self.block7 = ResBlockD(base_ch*8, base_ch*16, downsample=False)
        # self.l_im = WN_FC(base_ch * 16, 1, bias=False)
        # self.tail_activation = Swish()

        # self.roi_align_s = torchvision.ops.RoIAlign([8, 8], 1./8., aligned=True)
        # self.roi_align_l = torchvision.ops.RoIAlign([8, 8], 1./16., aligned=True)

        # self.block_obj4 = ResBlockD(base_ch*4, base_ch*4, downsample=False)
        # self.block_obj5 = ResBlockD(base_ch*4, base_ch*8, downsample=False)
        # self.block_obj6 = ResBlockD(base_ch*8, base_ch*16, downsample=True)
        # self.l_obj = WN_FC(base_ch * 16, 1, bias=False)
        # self.l_y = WN_FC(base_ch * 16, num_classes) # nn.utils.spectral_norm(nn.Embedding(num_classes, base_ch*16))

        self.init_parameter()

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1])
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

    def forward(self, x, bbox=None, labels=None,):
        batch_size, num_o = labels.size()
        # bbox b o 4, xywh, [0,1], reformat
        bbox_refmt = torchvision.ops.box_convert(bbox, 'xywh', 'xyxy')
        bbox_refmt[:,:,0] *= x.size(2)
        bbox_refmt[:,:,2] *= x.size(2)
        bbox_refmt[:,:,1] *= x.size(3)
        bbox_refmt[:,:,3] *= x.size(3)
        # s_idx = ( bbox[:, :, 2] <= 0.5 ) and ( bbox[:, :, 3] <= 0.5 ) # b o 
        s_idx = torch.logical_and( bbox[:, :, 2] <= 0.5, bbox[:, :, 3] <= 0.5 ) # b o 
        # seperate small and large objects, bbox_refmt
        bbox_refmt_s = [ b[i] for b, i in zip(bbox_refmt,  s_idx)] # [ o 4 ] * b
        bbox_refmt_l = [ b[i] for b, i in zip(bbox_refmt, ~s_idx)] # [ o 4 ] * b
        # seperate small and large objects, labels
        labels_s = [ l[i] for l, i in zip(labels,  s_idx)] # [ o ] * b
        labels_l = [ l[i] for l, i in zip(labels, ~s_idx)] # [ o ] * b
        labels_rearrage = torch.stack([ torch.cat([s, l], dim=0) for s, l in zip(labels_s, labels_l) ], dim=0) # b o 
        # seperate small and large objects, original bbox
        bbox_s = [ b[i] for b, i in zip(bbox,  s_idx) ] # [ o 4 ] * b
        bbox_l = [ b[i] for b, i in zip(bbox, ~s_idx) ] # [ o 4 ] * b
        bbox_rearrage = torch.stack([ torch.cat([s, l], dim=0) for s, l in zip(bbox_s, bbox_l) ], dim=0) # b o 4
        
        # shared path
        x8 = self.shared(x) # 1/8
        x16 = self.sl_boundary(x8) # 1/16
        
        # img path
        img_feat = self.img_path(x16)
        out_im = self.l_im(img_feat.mean([2, 3])) # b 1
        
        # small path
        obj_feat_s = self.roi_align_s(self.small_path(x8), bbox_refmt_s) 
        obj_feat_s = obj_feat_s.split([len(l) for l in labels_s], 0) # [ o d 8 8 ] * b
        # large path
        obj_feat_l = self.roi_align_s(self.large_path(x8), bbox_refmt_l) 
        obj_feat_l = obj_feat_l.split([len(l) for l in labels_l], 0) # [ o d 8 8 ] * b
        
        # obj path
        # stack all objects in the sample image
        obj_feat_rearrage = torch.stack([ torch.cat([s, l], dim=0) for s, l in zip(obj_feat_s, obj_feat_l) ], dim=0) # b o d 8 8
        obj_feat_rearrage_merged = obj_feat_rearrage.view(batch_size*num_o, -1, 8, 8)
        # discriminate real/false and clsify
        obj_feat = self.obj_path(obj_feat_rearrage_merged)
        assert obj_feat.size(2) == 2 and obj_feat.size(3) == 2 
        obj_feat = obj_feat.mean( [2, 3] )
        out_obj_TF  = self.l_obj( obj_feat ) # bo 1
        # out_obj_cls = F.cross_entropy(self.l_cls(obj_feat), labels_rearrage.flatten(), reduction="none") # bo 1
        out_obj_cls = self.l_cls(obj_feat) # bo num_c
        out_obj_cls = out_obj_cls.gather(1, labels_rearrage.view(-1, 1))
        # view again to get object-specific feats in each sample
        obj_feat_rearrage = obj_feat.view(batch_size, num_o, -1)
        
        return out_im, out_obj_TF, out_obj_cls, [obj_feat_rearrage, bbox_rearrage, labels_rearrage]
        # return out_im, torch.randn_like(out_im), torch.randn([batch_size*num_o, 184], device=out_im.device), \
        #         [torch.randn([batch_size, num_o, self.dim_obj_feat], device=out_im.device), bbox, labels]
        
        
        # obj path
        # obj_feat_merged = torch.cat(obj_feat_l + obj_feat_s, dim=0)
        # labels_merged = torch.cat(labels_l + labels_s, dim=0)
        # obj_feat = self.obj_path(obj_feat_merged)
        # assert obj_feat.size(2) == 2 and obj_feat.size(3) == 2 
        # loss_obj_TF  = self.l_obj( obj_feat.mean( [2, 3] ) ) # bo 1
        # loss_obj_cls = F.cross_entropy(self.l_cls(obj_feat), labels_merged, reduction="none") # bo 1
        
        # return loss_im, loss_obj_TF, loss_obj_cls, obj_feat.mean([2, 3])
        
        
class DiscriminatorCausal(nn.Module):
    #@ the auxiliary model in estimating total correlation D_c
    def __init__(self, d_model:int, nhead:int=4, num_layers:int=3 ) -> None:
        """
        Args:
            d_model (int): the hidden dim
            nhead (int, optional): {d_model} must be divisible by num_heads. Defaults to 4.
            num_layers (int, optional): # of layers to stack. Defaults to 6.
        """
        super().__init__()
        self.d_model = d_model
        #? 是否要对residual的类别aware，不要了，不方便人造数据实验吧
        # self.in_affine = FCMod(d_model, d_model, bias = False) 
        # use Transformer arch to aggregate features
        encoder_layer = nn.TransformerEncoderLayer(d_model, nhead)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers, norm=None)
        # affine the avg feat
        self.affine = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.LayerNorm(d_model),
            Swish()
        )
        self.rezero = ReZero()
        self.final = nn.Linear(d_model, 1, bias=False)
        
        self.init_parameter()

    def init_parameter(self):
        for k in self.named_parameters():
            if k[1].dim() > 1:
                torch.nn.init.orthogonal_(k[1])
            if k[0][-4:] == 'bias':
                torch.nn.init.constant_(k[1], 0)

    def forward(self, in_feat:torch.Tensor) -> torch.tensor: # b o d -> b 1
        b, num_o, d = in_feat.size()
        # assert d == self.d_model
        # hidden = self.in_affine( in_feat.view(-1, d), batch_obj_embedding.view(-1, d) ).view(b, num_o, d)
        hidden = self.transformer_encoder(in_feat).mean(1) # b o d ->  b d
        hidden = self.rezero(hidden, self.affine(hidden) ) # residual, b d
        return self.final( hidden ) # b d -> b 1


        
        
                
