"""
图神经网络
Graph Convolutional Networks for Text Classification (Yao et al., AAAI 2019)
GRAPH ATTENTION NETWORKS (Velickovic et al., ICLR 2017)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F


class GraphConvolution(nn.Module):
    def __init__(self, input_dim, output_dim, use_bias=True):
        """

        :param input_dim: 节点输入特征维度
        :param output_dim: 输出特征维度
        :param use_bias: 是否使用偏置
        """
        super(GraphConvolution, self).__init__()
        # 定义GCN层的权重矩阵
        # self.weight = nn.Parameter(torch.tensor(input_dim, output_dim))
        # if self.use_bias:
        #     self.bias = nn.Parameter(torch.tensor(output_dim))
        # else:
        #     self.register_parameter('bias', None)

        self.use_bias = use_bias
        self.weight = nn.Linear(in_features=input_dim, out_features=output_dim, bias=False)
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(output_dim), requires_grad=True)

    def forward(self, adjacency, input_feature):
        """
        :param adjacency: 邻接矩阵（torch.sparse.FloatTensor）
        :param input_feature: 输入特征 X
        :return:
        """
        # support = torch.mm(input_feature, self.weight)  # XW (N,D');X (N,D);W (D,D')
        # output = torch.sparse.mm(adjacency, support)  # (N,D')
        # if self.use_bias:
        #     output += self.bias

        support = self.weight(input_feature)
        # torch.sparse.mm(sparse_matrix, dense_matrix)
        output = torch.sparse.mm(adjacency, support)
        if self.use_bias:
            output = output + self.bias
        return output


class TextGCN(nn.Module):
    def __init__(self, input_dim, hidden_size, num_classes):
        """
        定义一个包含两层GraphConvolution的模型
        :param input_dim:
        :param hidden_size:
        :param num_classes:
        """
        super(TextGCN, self).__init__()
        # hidden_size = 16
        self.gcn1 = GraphConvolution(input_dim, hidden_size)
        self.gcn2 = GraphConvolution(hidden_size, num_classes)

    def forward(self, adjacency, feature):
        output = F.relu(self.gcn1(adjacency, feature))
        logits = self.gcn2(adjacency, output)
        return logits


if __name__ == '__main__':
    pass
