import torch
import pandas as pd
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import sys, math


def process_adj(A):     #用于对邻接矩阵进行处理，返回变化后的邻接矩阵和度矩阵
    # Get size of the adjacency matrix
    size = len(A)
    # Get the degrees for each node
    degrees = []
    for node_adjaceny in A:
        num = 0
        for node in node_adjaceny:
            if node == 1.0:
                num = num + 1
        # Add an extra for the "self loop"
        num = num + 1
        degrees.append(num)
    # Create diagonal matrix D from the degrees of the nodes
    D = np.diag(degrees)
    # Cholesky decomposition of D
    D = np.linalg.cholesky(D)
    # Inverse of the Cholesky decomposition of D
    D = np.linalg.inv(D)
    # Create an identity matrix of size x size
    I = np.eye(size)
    # Turn adjacency matrix into a numpy matrix
    A = np.matrix(A)
    # Create A hat
    A_hat = A + I
    # Return A_hat
    return torch.Tensor(A_hat), torch.Tensor(D) # the same as Variable
# D.mm(A).mm(D)

class PunishGraphAttentionLayer(nn.Module):    #在下边punishgat中调用到了
    '''
    Simple GAT layer, similar to https://arxiv,org/abs/1710.10903
    '''
    def __init__(self, in_features, out_features, dropout, alpha, concat=True):
        super(PunishGraphAttentionLayer, self).__init__()
        self.dropout = dropout
        self.in_features = in_features # input_dim
        self.out_features = out_features # output_dim
        self.alpha = alpha   #LeakyReLU的负斜率alpha和是否使用ELU非线性激活函数concat
        self.concat = concat # True: ELU, or False: No ELU
       #也定义了两个线性变换层self.W和self.a，并使用Xavier均匀分布初始化它们的权重
        self.W = nn.Linear(in_features, out_features)
        nn.init.xavier_uniform_(self.W.weight, gain=1.414)
        self.a = nn.Linear(2 * out_features, 1)
        nn.init.xavier_uniform_(self.a.weight, gain=1.414)

        self.leakyrelu = nn.LeakyReLU(self.alpha)  #创建了一个LeakyReLU激活函数对象，为修正线性单元（ReLU）的变体
        # 负斜率由参数self.alpha控制，它决定了负数输入的响应程度
    def forward(self, h, adj, scores):
        Wh = self.W(h)  # (batch_size, num_of_nodes, output_dim)
        a_input = self._prepare_attentional_mechanism_input(Wh) # (batch_size, num_of_nodes, num_of_nodes, 2 * output_dim)
        e = self.leakyrelu(self.a(a_input).squeeze(-1)) # attention coefficients
   #根据Wh生成用于计算注意力系数的输入a_input。接下来，通过线性变换self.a和LeakyReLU激活函数计算出注意力系数e
        zero_vec = -9e15 * torch.ones_like(e) # -limit
        # torch.where 操作是一个三元运算符，类似于numpy中的np.where，即 torch.where(condition,x,y)中condition中的元素为1，则输出x中对应位置的元素，否则输出y中对应位置的元素
        attention = torch.where(adj > 0, e, zero_vec) # scores   邻接矩阵adj和注意力系数e生成注意力矩阵attention
        if scores is not None:  
            #print(Wh.shape, scores.shape)   得分
            Wh = Wh * scores

        attention = F.softmax(attention, dim=-1) # (batch_size, num_of_nodes, num_of_nodes)
        attention = F.dropout(attention, self.dropout, training=self.training) # (batch_size, num_of_nodes, num_of_nodes)
        h_prime = torch.bmm(attention, Wh) # (batch_size, num_of_nodes, output_dim)
       #将注意力矩阵attention与Wh做矩阵乘法得到最终的节点特征表示h_prime
        if self.concat:
            return F.elu(h_prime) # common non-linearity
        else:
            return h_prime

    def _prepare_attentional_mechanism_input(self, Wh):  #生成注意力机制的输入
        N = Wh.size()[1]  # number of nodes
        bs = Wh.size()[0]

        # Below, two matrices are created that contain embeddings in their rows in different orders.
        # (e stands for embedding)
        # These are the rows of the first matrix (Wh_repeated_in_chunks):
        # e1, e1, ..., e1,            e2, e2, ..., e2,            ..., eN, eN, ..., eN
        # '-------------' -> N times  '-------------' -> N times       '-------------' -> N times
        #
        # These are the rows of the second matrix (Wh_repeated_alternating):
        # e1, e2, ..., eN, e1, e2, ..., eN, ..., e1, e2, ..., eN
        # '----------------------------------------------------' -> N times
        #

        Wh_repeated_in_chunks = Wh.repeat(1, 1, N).view(bs, N * N, self.out_features)
        Wh_repeated_alternating = Wh.repeat(1, N, 1)
        # Wh_repeated_in_chunks.shape == Wh_repeated_alternating.shape == (N * N, out_features)

        # The all_combination_matrix, created below, will look like this (|| denotes concatenation):
        # e1 || e1
        # e1 || e2
        # e1 || e3
        # ...
        # e1 || eN
        # e2 || e1
        # e2 || e2
        # e2 || e3
        # ...
        # e2 || eN
        # ...
        # eN || e1
        # eN || e2
        # eN || e3
        # ...
        # eN || eN

        all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=2)
        # all_combinations_matrix.shape == (N * N, 2 * out_features)

        return all_combinations_matrix.view(bs, N, N, 2 * self.out_features)

#  上类实现了一个GAT层的前向传播过程，包括计算注意力系数、注意力矩阵、节点特征的线性变换和非线性激活等操作，用于进行图神经网络的节点特征传播。

class punishGAT(nn.Module): # n_hid: relatively small     用到了
    def __init__(self, n_feat, n_hid, dropout, alpha, n_heads, q_attn=True):
        """
        Multi-head version of GAT
        """
        super(punishGAT, self).__init__()
        self.dropout = dropout
        self.q_attn = q_attn
        # Multi-head   还创建了多个PunishGraphAttentionLayer类型的图注意力层实例，并将它们存储在列表self.attentions中。
        self.attentions = [PunishGraphAttentionLayer(n_feat, n_hid, dropout=dropout, alpha=alpha, concat=True) for _ in
                           range(n_heads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)  # add into pytorch modules_dict, can be indexed by the name
        # 输出层，也通过图注意力层来实现，可实现分类、预     ？？？

    def forward(self, x, adj, scores):  #通过循环遍历self.attentions列表中的每个图注意力层实例，并调用其__call__方法，
        # 传入输入特征x、邻接矩阵adj和附加打分scores进行计算。得到每个图注意力层实例的输出后，将它们按维度2进行拼接，形成多头注意力的表示
        x = F.dropout(x, self.dropout, training=self.training)  # dropout，防止过拟合
        x = torch.cat([att(x, adj, scores) for att in self.attentions], dim=2)  # 将每个head得到的表示进行拼接
        #多头的思想，输入一份，分成四份，再输出四分之一，再进行拼接，正好相等
        x = F.dropout(x, self.dropout, training=self.training)  # dropout，防止过拟合
        return x
    

 # demo-4: KNN Graph   可以用于构建图神经网络中的邻接矩阵
from sklearn.metrics.pairwise import cosine_similarity as cos
from sklearn.metrics import pairwise_distances as pair

def construct_graph(features,topk):
    dist = cos(features)
    inds = []
    for i in range(dist.shape[0]):
        ind = np.argpartition(dist[i,:],-(topk+1))[-(topk+1):]
        inds.append(ind)

    A = np.zeros((dist.shape[0],dist.shape[0]))
    for i, v in enumerate(inds):
        for vv in v:
            A[i,vv] = 1
    return A



