import torch
from torch import nn
from src.model.space_model.gconv import Gconv
from typing import Tuple
import math


class DGraph(nn.Module):
    def __init__(self, dims: Tuple[int, int, int], num_nodes):
        super(DGraph, self).__init__()
        self.num_nodes = num_nodes
        self.input_size, self.hidden_size, self.output_size = dims

        self.gconv1 = Gconv(dims)
        self.gconv2 = Gconv(dims)

        self._idx = torch.arange(num_nodes)
        self.register_buffer('idx', self._idx)
        self.emb1 = nn.Embedding(num_nodes, self.hidden_size)
        self.emb2 = nn.Embedding(num_nodes, self.hidden_size)
        self.alpha = 1
        self.k = 5
        

    def forward(self, x, A):
        # x.shape = (B，N，T * H)，A.shape = (batch, N，N)
        nodevec1 = self.emb1(self.idx)
        nodevec2 = self.emb2(self.idx)
        
        filter1 = self.gconv1(x, A)
        filter2 = self.gconv2(x, A)
        nodevec1 = torch.tanh(self.alpha * torch.mul(nodevec1, filter1))
        nodevec2 = torch.tanh(self.alpha * torch.mul(nodevec2, filter2))
        a = torch.relu(torch.tanh(torch.matmul(nodevec1, nodevec2.transpose(-2, -1)) - torch.matmul(nodevec2, nodevec1.transpose(-2, -1))))

        topk_values, topk_indices = torch.topk(a, self.k, dim=-2)
        topk_values_soft = torch.softmax(topk_values, dim=-2)
        sparse_adj = torch.zeros_like(a)  # 创建一个全零矩阵
        sparse_adj.scatter_(-2, topk_indices, topk_values_soft)  # 仅填充 Top-K 位置
        return sparse_adj