###########################################################################
# Created by: NTU
# Email: heshuting555@gmail.com
# Copyright (c) 2023
###########################################################################

from math import ceil
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torch.autograd import Variable
import numpy as np
from detectron2.config import configurable
from detectron2.layers import Conv2d

from einops import rearrange, repeat

class GraphNorm(nn.Module):
    def __init__(self, num_features):
        super(GraphNorm, self).__init__()
        self.weight = nn.Parameter(torch.ones(num_features))
        self.bias = nn.Parameter(torch.zeros(num_features))
        self.eps = 1e-5

    def forward(self, x):
        mean = x.mean(dim=1, keepdim=True)
        std = x.std(dim=1, keepdim=True)
        return self.weight * (x - mean) / (std + self.eps) + self.bias


def bn_init(bn):
    bn.weight.data.fill_(1)
    bn.bias.data.zero_()
def create_e_matrix(n):
    end = torch.zeros((n*n,n))
    for i in range(n):
        end[i * n:(i + 1) * n, i] = 1
    start = torch.zeros(n, n)
    for i in range(n):
        start[i, i] = 1
    start = start.repeat(n,1)
    return start,end
class GraphParser(nn.Module):
    def __init__(self, in_channels, K=1):
        super(GraphParser, self).__init__()
        self.in_channels = in_channels
        #self.num_classes = num_classes * K
        # GNN Matrix: E x N
        # Start Matrix Item:  define the source node of one edge
        # End Matrix Item:  define the target node of one edge
        # Algorithm details in Residual Gated Graph Convnets: arXiv preprint arXiv:1711.07553
        # or Benchmarking Graph Neural Networks: arXiv preprint arXiv:2003.00982v3

        # start, end = create_e_matrix(self.num_classes)
        # self.start = Variable(start, requires_grad=False)
        # self.end = Variable(end, requires_grad=False)

        dim_in = self.in_channels
        dim_out = self.in_channels

        self.U1 = nn.Linear(dim_in, dim_out, bias=False)
        self.V1 = nn.Linear(dim_in, dim_out, bias=False)
        self.A1 = nn.Linear(dim_in, dim_out, bias=False)
        self.B1 = nn.Linear(dim_in, dim_out, bias=False)
        self.E1 = nn.Linear(dim_in, dim_out, bias=False)

        self.U2 = nn.Linear(dim_in, dim_out, bias=False)
        self.V2 = nn.Linear(dim_in, dim_out, bias=False)
        self.A2 = nn.Linear(dim_in, dim_out, bias=False)
        self.B2 = nn.Linear(dim_in, dim_out, bias=False)
        self.E2 = nn.Linear(dim_in, dim_out, bias=False)

        self.sigmoid = nn.Sigmoid()
        self.softmax = nn.Softmax(2)
        self.bnv1 = GraphNorm(dim_out)
        self.bne1 = GraphNorm(dim_out)

        self.bnv2 = GraphNorm(dim_out)
        self.bne2 = GraphNorm(dim_out)

        self.act = nn.ReLU()

        self.init_weights_linear(dim_in, 1)

        self.fusion_e = nn.Linear(in_channels,in_channels,bias=False)
        self.K = K

    def init_weights_linear(self, dim_in, gain):
        # conv1
        scale = gain * np.sqrt(2.0 / dim_in)
        self.U1.weight.data.normal_(0, scale)
        self.V1.weight.data.normal_(0, scale)
        self.A1.weight.data.normal_(0, scale)
        self.B1.weight.data.normal_(0, scale)
        self.E1.weight.data.normal_(0, scale)

        self.U2.weight.data.normal_(0, scale)
        self.V2.weight.data.normal_(0, scale)
        self.A2.weight.data.normal_(0, scale)
        self.B2.weight.data.normal_(0, scale)
        self.E2.weight.data.normal_(0, scale)

        bn_init(self.bnv1)
        bn_init(self.bne1)
        bn_init(self.bnv2)
        bn_init(self.bne2)

    def forward_graph(self, x, edge):
        dev = x.device

        N,nc,C = x.size()
        x = x.reshape(N,nc,C)
        edge = edge.reshape(N,nc*nc,C)

        start, end = create_e_matrix(nc)
        start = Variable(start, requires_grad=False).to(dev)
        end = Variable(end, requires_grad=False).to(dev)

        # GNN Layer 1:
        res = x
        Vix = self.A1(x)  # V x d_out
        Vjx = self.B1(x)  # V x d_out
        e = self.E1(edge)  # E x d_out

        edge = edge + self.act(self.bne1(torch.einsum('ev, bvc -> bec', (end, Vix)) + torch.einsum('ev, bvc -> bec',(start, Vjx)) + e))  # E x d_out

        e = self.sigmoid(edge)
        b, _, c = e.shape
        e = e.view(b,nc, nc, c)
        e = self.softmax(e)
        e = e.view(b, -1, c)

        Ujx = self.V1(x)  # V x H_out
        Ujx = torch.einsum('ev, bvc -> bec', (start, Ujx))  # E x H_out
        Uix = self.U1(x)  # V x H_out
        x = Uix + torch.einsum('ve, bec -> bvc', (end.t(), e * Ujx)) / nc  # V x H_out
        x = self.act(res + self.bnv1(x))
        res = x

        # GNN Layer 2:
        Vix = self.A2(x)  # V x d_out
        Vjx = self.B2(x)  # V x d_out
        e = self.E2(edge)  # E x d_out
        edge = edge + self.act(self.bne2(torch.einsum('ev, bvc -> bec', (end, Vix)) + torch.einsum('ev, bvc -> bec', (start, Vjx)) + e))  # E x d_out

        e = self.sigmoid(edge)
        b, _, c = e.shape
        e = e.view(b, nc, nc, c)
        e = self.softmax(e)
        e = e.view(b, -1, c)

        Ujx = self.V2(x)  # V x H_out
        Ujx = torch.einsum('ev, bvc -> bec', (start, Ujx))  # E x H_out
        Uix = self.U2(x)  # V x H_out
        x = Uix + torch.einsum('ve, bec -> bvc', (end.t(), e * Ujx)) / nc  # V x H_out
        x = self.act(res + self.bnv2(x))
        x = x.reshape(N,nc,C)
        return x

    def get_mostk_similar(self,frames, K):
        T, N, C = frames.size()
        query = frames[0, :, :]
        # 计算每个查询和所有帧之间的欧氏距离
        query_expanded = query.unsqueeze(0).unsqueeze(0).repeat(T, N, 1, 1)  # [T, N, N, C]
        frames_expanded = frames.unsqueeze(2) # [T, 1, N, C]
        frames_expanded = frames_expanded.repeat(1,1,N,1) # [T, N, N, C]

        distance = torch.norm(query_expanded - frames_expanded, dim=-1)  # [T, N, N]

        # 找到最相似的k个query的索引和对应的距离
        topk_distances, topk_indices = torch.topk(distance, K, dim=-1, largest=False)  # [T, N, k]

        # 将 topk_indices 转换成适合 gather 的索引
        expanded_topk_indices = topk_indices.unsqueeze(-1).repeat(1, 1, 1, C)  # [T, N, k, C]

        # 从 query 中提取最相似的向量
        #query_expanded_for_gather = frames.unsqueeze(2).expand(L, T, N, N, C)  # [T, N, N, C]
        most_similar_queries = torch.gather(frames_expanded, 2, expanded_topk_indices)
        most_similar_queries = most_similar_queries.permute(1, 0, 2, 3).reshape(N, T * K, C)
        return most_similar_queries

    def get_edges(self,nodes, text_embedding=None):
        # (L,N,T*K,C) (L,C)
        # 构建节点对的索引
        N, tk, C = nodes.size()
        device = nodes.device
        # 构建节点对的索引
        node_indices = torch.arange(tk)  # [0, 1, 2, ..., T*K-1]
        node_pairs = torch.cartesian_prod(node_indices, node_indices).to(device)  # 所有可能的节点对，形状为 [(T*K)^2, 2]

        # 扩展维度以适应批量操作
        node_pairs = node_pairs.unsqueeze(0).expand(N, -1, -1)  # (L, N, (T*K)^2, 2)

        # 提取开始和结束节点的索引
        start_indices = node_pairs[..., 0].long()  # (L, N, (T*K)^2)
        end_indices = node_pairs[..., 1].long()  # (L, N, (T*K)^2)

        # 使用 gather 函数提取边特征
        # 我们需要将 nodes 扩展成 (L, N, (T*K)^2, C) 的形状，以便 gather 正确操作
        nodes_expanded = nodes.expand(-1, tk, -1)  # (N, T*K, C)

        edges_start = torch.gather(nodes_expanded, 1,
                                   start_indices.unsqueeze(-1).expand(-1, -1, C))  # (L, N, (T*K)^2, C)
        edges_end = torch.gather(nodes_expanded, 1,
                                 end_indices.unsqueeze(-1).expand(-1, -1, C))  # (L, N, (T*K)^2, C)

        edge_features = edges_start * edges_end * text_embedding # (L, N, (T*K)^2, C)
        edge_features = self.fusion_e(edge_features)
        edge_features = nn.functional.normalize(edge_features, p=2, dim=-1)
        return edge_features

    def forward(self,frame_query,lang_feat):
        BT, fQ, C = frame_query.shape
        nodes = self.get_mostk_similar(frame_query, self.K) #(N, T * K, C)
        edges = self.get_edges(nodes, lang_feat)
        outs = self.forward_graph(nodes, edges) #(N, T * K, C)
        outs = outs.reshape(fQ,BT,self.K,C)
        outs = torch.mean(outs,dim=-2).permute(1,0,2)
        return outs

class MLP(nn.Module):
    """ Very simple multi-layer perceptron (also called FFN)"""

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x

class GVRS(nn.Module):
    @configurable
    def __init__(self,
                in_channels,
                 hidden_dim,
                 *,
                 dec_layers,
                 conv_dim: int,
                 enforce_input_project: bool,
                 num_classes: int,
                 num_queries: int,
                 mask_dim: int,
                 num_frames: int,
                 use_sim: bool,
                 sim_use_clip: bool,
                 aux_loss: bool,
                 K=8):
        super().__init__()
        self.num_layers = dec_layers
        self.num_classes = num_classes
        self.graph_decoder_layers = nn.ModuleList()
        #self.use_sim = use_sim
        self.use_sim = False
        self.sim_use_clip = sim_use_clip
        self.num_frames = num_frames
        self.aux_loss = aux_loss
        self.num_queries = num_queries

        self.vita_mask_features = Conv2d(
            conv_dim,
            mask_dim,
            kernel_size=1,
            stride=1,
            padding=0,
        )
        weight_init.c2_xavier_fill(self.vita_mask_features)

        for i in range(self.num_layers):
            self.graph_decoder_layers.append(GraphParser(
                in_channels=hidden_dim,
                K=K,
            ))

        if in_channels != hidden_dim or enforce_input_project:
            self.input_proj_dec = nn.Linear(hidden_dim, hidden_dim)
        else:
            self.input_proj_dec = nn.Sequential()

        self.decoder_norm = nn.LayerNorm(hidden_dim)

        #self.fc = nn.Linear(hidden_dim,hidden_dim)

        self.class_embed = nn.Linear(2 * hidden_dim, num_classes + 1)

        self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)

        if self.use_sim:
            self.sim_embed_frame = nn.Linear(hidden_dim, hidden_dim)
            if self.sim_use_clip:
                self.sim_embed_clip = nn.Linear(hidden_dim, hidden_dim)


    @classmethod
    def from_config(cls, cfg, in_channels):
        ret = {}
        ret["in_channels"] = in_channels

        ret["hidden_dim"] = cfg.MODEL.VITA.HIDDEN_DIM
        ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES


        assert cfg.MODEL.VITA.DEC_LAYERS >= 1
        ret["dec_layers"] = cfg.MODEL.VITA.DEC_LAYERS
        ret["enforce_input_project"] = cfg.MODEL.VITA.ENFORCE_INPUT_PROJ

        ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
        ret["num_frames"] = cfg.INPUT.SAMPLING_FRAME_NUM

        ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
        ret["sim_use_clip"] = cfg.MODEL.VITA.SIM_USE_CLIP
        ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
        ret["use_sim"] = cfg.MODEL.VITA.SIM_WEIGHT > 0.0

        return ret

    def fusion_query(self,frame_query):
        #[L,BT,N,C]
        frame_query = self.fc(frame_query)
        weights = F.softmax(frame_query, dim=1)  # shape: [T, hidden_dim]

        outs = (frame_query * weights).sum(dim=1)
        return outs

    def forward(self, frame_query, lang_feat, lang_mask):
        BT, fQ, C = frame_query.shape
        B = BT // self.num_frames if self.training else 1
        T = self.num_frames if self.training else BT // B

        frame_query = self.input_proj_dec(frame_query)  # T, fQ, LB, C

        if self.use_sim:
            pred_fq_embed = self.sim_embed_frame(frame_query)   # TfQ, LB, C
            pred_fq_embed = pred_fq_embed.transpose(0, 1).reshape(L, B, T, fQ, C)
        else:
            pred_fq_embed = None

        decoder_outputs = []
        for i in range(self.num_layers):
            frame_query = self.graph_decoder_layers[i](
                frame_query=frame_query,
                lang_feat=lang_feat
            ) #[L,N,cQ,C]
            #cur_outs = self.fusion_query(frame_query) #(fQ, LB, C)
            if (self.training and self.aux_loss) or (i == self.num_layers - 1):
                dec_out = self.decoder_norm(frame_query)
                decoder_outputs.append(dec_out.view(T, self.num_queries, C))
        decoder_outputs = torch.stack(decoder_outputs, dim=0)  # D, L, B, N, cQ, C
        #D, L, B, N, cQ, C = decoder_outputs.shape
        D, B, cQ, C = decoder_outputs.shape
        text_features_ = repeat(lang_feat, 'b c -> d b q c', d=D, q=cQ).repeat(1,B,1,1)
        fusion_feature = torch.cat([decoder_outputs, text_features_], dim=-1)
        pred_cls = self.class_embed(fusion_feature)
        pred_mask_embed = self.mask_embed(decoder_outputs)
        if self.use_sim and self.sim_use_clip:
            pred_cq_embed = self.sim_embed_clip(decoder_outputs)
        else:
            pred_cq_embed = [None] * self.num_layers
        out = {
            'pred_logits': pred_cls[-1],
            'pred_mask_embed': pred_mask_embed[-1],
            'pred_fq_embed': pred_fq_embed,
            'pred_cq_embed': pred_cq_embed[-1],
            'aux_outputs': self._set_aux_loss(
                pred_cls, pred_mask_embed, pred_cq_embed, pred_fq_embed
            )
        }
        return out
    @torch.jit.unused
    def _set_aux_loss(
        self, outputs_cls, outputs_mask_embed, outputs_cq_embed, outputs_fq_embed
    ):
        return [{"pred_logits": a, "pred_mask_embed": b, "pred_cq_embed": c, "pred_fq_embed": outputs_fq_embed}
                for a, b, c in zip(outputs_cls[:-1], outputs_mask_embed[:-1], outputs_cq_embed[:-1])]
