import torch
from torch import nn
from src.model.attention import Attention
from typing import Tuple
import math


class AGraph(nn.Module):
    def __init__(self, dims: Tuple[int, int, int], seq_len, num_nodes):
        super(AGraph, self).__init__()
        self.seq_len = seq_len
        self.num_nodes = num_nodes
        self.input_size, self.hidden_size, self.output_size = dims
        self.k = 5

    def forward(self, x):
        # x.shape = (B, T, N, H // 2)
        B, T, N, H_hf = x.shape
        x = x.permute(0, 3, 2, 1).contiguous()
        x = x.reshape(-1, N, T)
    
        # x.shape = (B * H // 2, N, T)
        fx = x[:, :, :T // 2] - x[:, :, :T // 2].mean(dim=-1, keepdim=True)
        nfx = (fx - torch.mean(fx, dim=-1, keepdim=True)) / (torch.norm(fx, dim=-1, keepdim=True) + 0.001)
        bx = x[:, :, T // 2:] - x[:, :, T // 2:].mean(dim=-1, keepdim=True)
        nbx = (bx - torch.mean(bx, dim=-1, keepdim=True)) / (torch.norm(bx, dim=-1, keepdim=True) + 0.001)

        nfx_t = nfx.permute(0, 2, 1).contiguous()

        adj = torch.matmul(nbx, nfx_t)
        adj /= torch.norm(nbx, dim=-1, keepdim=True)
        adj /= torch.norm(nfx, dim=-1, keepdim=True)
        
        adj = adj.reshape(B, H_hf, N, N)
        adj = torch.mean(adj, dim=1, keepdim=False)

        topk_values, topk_indices = torch.topk(adj, self.k, dim=-2)
        topk_values_soft = torch.softmax(topk_values, dim=-2)
        sparse_adj = torch.zeros_like(adj)  # 创建一个全零矩阵
        sparse_adj.scatter_(-2, topk_indices, topk_values_soft)  # 仅填充 Top-K 位置
        return sparse_adj