import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm

class DisenGCN(nn.Module):
    def __init__(self, 
                 inp_dim,
                 hid_dim,
                 init_k,
                 delta_k,
                 routit,
                 tau,
                 dropout,
                 num_layers,
                 **kwargs):
        super(DisenGCN, self).__init__()
        self.init_disen = InitDisenLayer(inp_dim, hid_dim, init_k)
        
        self.conv_layers = nn.ModuleList()
        k = init_k
        for l in range(num_layers):
            fac_dim = hid_dim // k
            self.conv_layers.append(RoutingLayer(k, routit, tau))
            k -= delta_k   
        
        self.dropout = dropout
    
    def _dropout(self, X):
        return F.dropout(X, p=self.dropout, training=self.training)
        
    def forward(self, X, adj):
        Z = self.init_disen(X)
        for disen_conv in self.conv_layers:
            Z = disen_conv(Z, adj)
            Z = self._dropout(torch.relu(Z))
        Z = Z.reshape(len(Z), -1)
        return Z

class InitDisenLayer(nn.Module):
    def __init__(self, inp_dim, hid_dim, num_factors):
        super(InitDisenLayer, self).__init__()

        self.inp_dim = inp_dim
        self.hid_dim = (hid_dim//num_factors) * num_factors
        self.num_factors = num_factors
        
        self.factor_lins = nn.Linear(self.inp_dim, self.hid_dim)
        
        self.reset_parameters()

    def reset_parameters(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.zeros_(m.bias)

    def forward(self, X):
        Z = self.factor_lins(X).view(-1, self.num_factors, self.hid_dim //self.num_factors)
        Z = F.normalize(torch.relu(Z), dim=2)
        return Z


# Feature disentangle layer
class RoutingLayer(nn.Module):
    def __init__(self, num_factors, routit, tau):
        super(RoutingLayer, self).__init__()
        self.num_factors = num_factors
        self.routit = routit
        self.tau = tau

    def forward(self, x, adj, sparse=True):
        z = x  # [N, K, D]
        c = x  # [N, K, D]
        
        for t in range(self.routit):
            # 计算注意力分数
            p = torch.matmul(z.unsqueeze(1), c.unsqueeze(0).transpose(-1, -2))  # [N, N, K, D]
            p = p.sum(dim=-1, keepdim=True)  # [N, N, K, 1]
            
            if sparse:
                # 对于稀疏矩阵，我们需要先获取非零元素的索引和值
                indices = adj._indices()
                values = adj._values()
                
                # 创建一个全零张量
                masked_p = torch.zeros_like(p)
                
                # 只在有边的位置填充值
                for idx in range(indices.shape[1]):
                    i, j = indices[0, idx], indices[1, idx]
                    masked_p[i, j] = p[i, j] * values[idx]
                
                p = masked_p
            else:
                # 如果是密集矩阵，直接相乘
                adj_expanded = adj.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, self.num_factors, 1)
                p = p * adj_expanded
            
            # softmax注意力
            p = F.softmax(p/self.tau, dim=1)  # [N, N, K, 1]
            
            # 聚合邻居信息
            weight_sum = (p * z.unsqueeze(0))  # [N, N, K, D]
            c = z + weight_sum.sum(dim=1)  # [N, K, D]
            
            # 归一化
            c = F.normalize(c, dim=2)
            
        return c