###########################################################################
# Created by: NTU
# Email: heshuting555@gmail.com
# Copyright (c) 2023
###########################################################################

from math import ceil
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torch.autograd import Variable
import numpy as np
from detectron2.config import configurable
from detectron2.layers import Conv2d

from einops import rearrange, repeat

class GraphNorm(nn.Module):
    def __init__(self, num_features):
        super(GraphNorm, self).__init__()
        self.weight = nn.Parameter(torch.ones(num_features))
        self.bias = nn.Parameter(torch.zeros(num_features))
        self.eps = 1e-5

    def forward(self, x):
        mean = x.mean(dim=1, keepdim=True)
        std = x.std(dim=1, keepdim=True)
        return self.weight * (x - mean) / (std + self.eps) + self.bias


def bn_init(bn):
    bn.weight.data.fill_(1)
    bn.bias.data.zero_()


class GraphParser(nn.Module):
    def __init__(self, in_channels, K=3,text_nodes=4):
        super(GraphParser, self).__init__()
        self.K = K
        dim_in = in_channels
        dim_out = in_channels
        self.dim_out = dim_out
        self.fusion_e = nn.Linear(3 * in_channels, in_channels, bias=False)
        self.A = nn.Linear(dim_in, dim_out, bias=False)
        self.B = nn.Linear(dim_in, dim_out, bias=False)
        self.C = nn.Linear(dim_in, dim_out, bias=False)
        self.V = nn.Linear(dim_in, dim_out, bias=False)
        self.U = nn.Linear(dim_in, dim_out, bias=False)
        self.sigmoid = nn.Sigmoid()
        self.softmax = nn.Softmax(1)
        self.bnv = GraphNorm(dim_out)
        self.bne = GraphNorm(dim_out)
        self.act = nn.ReLU()
        self.final_node = nn.Linear(dim_in, dim_out, bias=False)
        self.se_norm_node = nn.LayerNorm(dim_out)
        self.final_text = nn.Linear(dim_in, dim_out, bias=False)
        self.se_norm_text = nn.LayerNorm(dim_out)


        self.fusion_word = nn.Sequential(
            nn.Linear(768, 768),
            nn.Linear(768, text_nodes),
            nn.Softmax(dim=-1)
        )
        self.fc = nn.Linear(768,768)
        self.NA = nn.Linear(768,dim_out)
        self.TT = nn.Linear(dim_in,dim_out)
        self.TO = nn.Linear(dim_in,dim_out)
        self.relu = nn.ReLU()
        self.bnt = GraphNorm(dim_out)

        self.init_weights_linear(dim_in, 1)

    def init_weights_linear(self, dim_in, gain):
        scale = gain * np.sqrt(2.0 / dim_in)
        self.A.weight.data.normal_(0, scale)
        self.B.weight.data.normal_(0, scale)
        self.C.weight.data.normal_(0, scale)
        self.V.weight.data.normal_(0, scale)
        self.U.weight.data.normal_(0, scale)
        self.fc.weight.data.normal_(0, scale)
        self.NA.weight.data.normal_(0, scale)
        self.TT.weight.data.normal_(0, scale)
        self.TO.weight.data.normal_(0, scale)
        bn_init(self.bnv)
        bn_init(self.bne)
        bn_init(self.bnt)

    def get_obj_edges(self, frame_query, query_pos, lang_feat=None):
        BT, fQ, C = frame_query.shape
        frame_query_f = frame_query.reshape(BT*fQ,C)
        query_pos_f = query_pos.reshape(BT*fQ,C)

        si = frame_query_f.detach()
        si = torch.einsum('i j , j k -> i k', si, si.transpose(0, 1))
        si = si.reshape(BT,fQ,BT,fQ)
        #[BT * fQ, BT * fQ]
        idx = si.topk(k=self.K, dim=-1, largest=True)[1].unsqueeze(-1).expand(-1, -1, -1, -1, C) #[B,N,B,K]  [B,N,C] -> [B,N,B,N,C]
        selected_nodes = frame_query.unsqueeze(0).unsqueeze(0).repeat(BT,fQ,1,1,1)
        selected_nodes = torch.gather(selected_nodes, 3, idx)
        tar_query = selected_nodes.reshape(BT*fQ,self.K*BT,C)
        selected_nums = self.K*BT
        source_query = frame_query_f.unsqueeze(1).repeat(1,selected_nums,1) #[TN,TK,C] [T,N,T,K,C]
        query_sim = nn.functional.normalize(tar_query * source_query,dim=-1,p=2)

        selected_nodes_pos = query_pos.unsqueeze(0).unsqueeze(0).repeat(BT,fQ,1,1,1)
        selected_nodes_pos = torch.gather(selected_nodes_pos, 3, idx)
        tar_query_pos = selected_nodes_pos.reshape(BT*fQ,self.K*BT,C)
        source_query_pos = query_pos_f.unsqueeze(1).repeat(1,selected_nums,1)

        distance = torch.dist(tar_query_pos,source_query_pos,p=2)

        edges_pos = nn.functional.normalize((tar_query_pos - source_query_pos) * distance,dim=-1,p=2)
        edges_lang = nn.functional.normalize(lang_feat.repeat(BT*fQ,selected_nums,1),dim=-1,p=2)

        edges = torch.cat([query_sim,edges_lang,edges_pos],dim=-1)

        edges = self.fusion_e(edges)
        #edges = nn.functional.normalize(edges, p=2, dim=-1) #[]
        
        return edges,tar_query,source_query
        #outs ->
    def forward_obj_gnn(self, frame_query, query_pos, lang_feat=None):
        edges,selected_nodes,self_nodes = self.get_obj_edges(frame_query,query_pos,lang_feat)
        BT, fQ, C = frame_query.shape
        x = frame_query.reshape(BT*fQ,C)
        self_nodes = self.A(self_nodes) #BT*fQ,BT*K,C
        e = self.C(edges)
        ne = self.act(self.bne((self_nodes + self.B(selected_nodes) + e))) + edges
        ne = self.softmax(self.sigmoid(ne))
        ne = self.V(self_nodes) * ne
        aggregation = ne.sum(1) / (BT * self.K) + self.U(x) #[N,C]
        outs = self.act(self.bnv(aggregation))
        
        outs = self.final_node(outs).reshape(BT,fQ,C)
        outs = self.se_norm_node(outs)

        return outs
    def normalize_digraph(self,A):
        n, _ = A.shape
        node_degrees = A.detach().sum(dim = -1)
        degs_inv_sqrt = node_degrees ** -0.5
        norm_degs_matrix = torch.eye(n)
        dev = A.get_device()
        if dev >= 0:
            norm_degs_matrix = norm_degs_matrix.to(dev)
        norm_degs_matrix = norm_degs_matrix.view(n, n) * degs_inv_sqrt.view(n, 1)
        norm_A = (norm_degs_matrix @ A).permute(1,0) @ norm_degs_matrix
        return norm_A
    def get_text_edges(self, frame_query, lang_word_feat=None,lang_mask=None): #[40,1]
        lang_word_feat = lang_word_feat.permute(1,0)
        attn = (self.fusion_word(lang_word_feat) * lang_mask[0]).permute(1,0) #[4,K] 
        value = self.fc(lang_word_feat) * lang_mask[0] #[K,768]
        text_nodes = self.NA(attn @ value).reshape(-1,self.dim_out) #[B,4,dim_in]
        BT, fQ, C = frame_query.shape
        frame_query_f = frame_query.reshape(BT*fQ,C)
        si = torch.einsum('i j , j k -> i k', frame_query_f, text_nodes.transpose(0, 1)) #[BT*fQ,C] [C,4] [BT*fQ,4] 
        si = si.softmax(0)
        adj = (si >= 0.5).float()
        #adj = self.normalize_digraph(adj)
        return adj,text_nodes

    def forward_text_gnn(self, frame_query, lang_word_feat=None,lang_mask=None):
        adj,text_nodes = self.get_text_edges(frame_query,lang_word_feat,lang_mask) #[BT*fQ,4]
        BT, fQ, C = frame_query.shape
        x = frame_query.reshape(BT*fQ,C) #[BT*fQ,C] #[BT*fQ,4]
        tar = text_nodes.reshape(4,self.dim_out)
        aggregate = torch.einsum('i j, j k->i k', adj, self.TT(tar)) #[BT*fQ,dim_out]
        outs = self.relu(self.bnt(aggregate + self.TO(x))) + x
        outs = self.final_text(outs).reshape(BT,fQ,C)
        outs = self.se_norm_text(outs) 
        return outs

    def forward(self, frame_query, query_pos, lang_feat=None,lang_word_feat=None,lang_mask=None):
        frame_query = self.forward_obj_gnn(frame_query, query_pos, lang_feat)
        frame_query = self.forward_text_gnn(frame_query, lang_word_feat,lang_mask)
        return frame_query


class MLP(nn.Module):
    """ Very simple multi-layer perceptron (also called FFN)"""

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x

class Tlggnv2(nn.Module):
    @configurable
    def __init__(self,
                in_channels,
                 hidden_dim,
                 *,
                 dec_layers,
                 conv_dim: int,
                 enforce_input_project: bool,
                 num_classes: int,
                 num_queries: int,
                 mask_dim: int,
                 num_frames: int,
                 use_sim: bool,
                 sim_use_clip: bool,
                 aux_loss: bool,
                 K=3):
        super().__init__()
        self.num_layers = dec_layers
        self.num_classes = num_classes
        self.graph_decoder_layers = nn.ModuleList()
        #self.use_sim = use_sim
        self.use_sim = False
        self.sim_use_clip = sim_use_clip
        self.num_frames = num_frames
        self.aux_loss = aux_loss
        self.num_queries = num_queries

        self.vita_mask_features = Conv2d(
            conv_dim,
            mask_dim,
            kernel_size=1,
            stride=1,
            padding=0,
        )
        weight_init.c2_xavier_fill(self.vita_mask_features)

        for i in range(self.num_layers):
            self.graph_decoder_layers.append(GraphParser(
                in_channels=hidden_dim,
                K=K,
            ))

        if in_channels != hidden_dim or enforce_input_project:
            self.input_proj_dec = nn.Linear(hidden_dim, hidden_dim)
        else:
            self.input_proj_dec = nn.Sequential()

        self.decoder_norm = nn.LayerNorm(hidden_dim)

        #self.fc = nn.Linear(hidden_dim,hidden_dim)

        self.class_embed = nn.Linear(2 * hidden_dim, num_classes + 1)

        self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)

        if self.use_sim:
            self.sim_embed_frame = nn.Linear(hidden_dim, hidden_dim)
            if self.sim_use_clip:
                self.sim_embed_clip = nn.Linear(hidden_dim, hidden_dim)


    @classmethod
    def from_config(cls, cfg, in_channels):
        ret = {}
        ret["in_channels"] = in_channels

        ret["hidden_dim"] = cfg.MODEL.VITA.HIDDEN_DIM
        ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES


        assert cfg.MODEL.VITA.DEC_LAYERS >= 1
        ret["dec_layers"] = cfg.MODEL.VITA.DEC_LAYERS
        ret["enforce_input_project"] = cfg.MODEL.VITA.ENFORCE_INPUT_PROJ

        ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
        ret["num_frames"] = cfg.INPUT.SAMPLING_FRAME_NUM

        ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
        ret["sim_use_clip"] = cfg.MODEL.VITA.SIM_USE_CLIP
        ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
        ret["use_sim"] = cfg.MODEL.VITA.SIM_WEIGHT > 0.0

        return ret

    def forward(self, frame_query, query_pos, lang_feat, lang_word_feat, lang_mask):
        BT, fQ, C = frame_query.shape
        B = BT // self.num_frames if self.training else 1
        T = self.num_frames if self.training else BT // B

        frame_query = self.input_proj_dec(frame_query)  # T, fQ, LB, C

        if self.use_sim:
            pred_fq_embed = self.sim_embed_frame(frame_query)   # TfQ, LB, C
            pred_fq_embed = pred_fq_embed.transpose(0, 1).reshape(L, B, T, fQ, C)
        else:
            pred_fq_embed = None

        decoder_outputs = []
        for i in range(self.num_layers):
            frame_query = self.graph_decoder_layers[i](
                frame_query=frame_query,
                query_pos=query_pos,
                lang_feat=lang_feat,
                lang_word_feat=lang_word_feat,
                lang_mask=lang_mask
            ) #[L,N,cQ,C]
            if (self.training and self.aux_loss) or (i == self.num_layers - 1):
                dec_out = self.decoder_norm(frame_query)
                decoder_outputs.append(dec_out.view(T, self.num_queries, C))
        decoder_outputs = torch.stack(decoder_outputs, dim=0)  # D, L, B, N, cQ, C
        #D, L, B, N, cQ, C = decoder_outputs.shape
        D, B, cQ, C = decoder_outputs.shape
        text_features_ = repeat(lang_feat, 'b c -> d b q c', d=D, q=cQ).repeat(1,B,1,1)
        fusion_feature = torch.cat([decoder_outputs, text_features_], dim=-1)
        pred_cls = self.class_embed(fusion_feature)
        pred_mask_embed = self.mask_embed(decoder_outputs)
        if self.use_sim and self.sim_use_clip:
            pred_cq_embed = self.sim_embed_clip(decoder_outputs)
        else:
            pred_cq_embed = [None] * self.num_layers
        out = {
            'pred_logits': pred_cls[-1],
            'pred_mask_embed': pred_mask_embed[-1],
            'pred_fq_embed': pred_fq_embed,
            'pred_cq_embed': pred_cq_embed[-1],
            'aux_outputs': self._set_aux_loss(
                pred_cls, pred_mask_embed, pred_cq_embed, pred_fq_embed
            )
        }
        return out
    @torch.jit.unused
    def _set_aux_loss(
        self, outputs_cls, outputs_mask_embed, outputs_cq_embed, outputs_fq_embed
    ):
        return [{"pred_logits": a, "pred_mask_embed": b, "pred_cq_embed": c, "pred_fq_embed": outputs_fq_embed}
                for a, b, c in zip(outputs_cls[:-1], outputs_mask_embed[:-1], outputs_cq_embed[:-1])]