"""Classes for SimGNN modules."""

import torch
from torch_geometric.nn import GCNConv
from torch.autograd import Variable


class AttentionModule(torch.nn.Module):
    """
    SimGNN Attention Module to make a pass on graph.
    """

    def __init__(self, args):
        """
        :param args: Arguments object.
        """
        super(AttentionModule, self).__init__()
        self.args = args
        self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.args.filters_3,
                                                             self.args.filters_3))
        # if self.use_gpu:
        #     self.weight_matrix.to(self.device)
        torch.nn.init.xavier_uniform_(self.weight_matrix)

    def forward(self, embedding):
        """
        Making a forward propagation pass to create a graph level representation.
        :param embedding: Result of the GCN  =>  [num_nodes, filters_3]
        :return representation: A graph level representation vector.
        """
        # global_context： 所有节点的embedding求平均
        # torch.matmul(embedding, self.weight_matrix) shape =  [num_nodes, filters_3] * [filters_3, filters_3] = [num_nodes, filters_3]

        # dim = 0 表示 列求平均
        # 所以 global_context 的shape 为 => [1, filters_3]  => [1, embedding_size] => [1, 32]
        # 也就是说， mean求得的是 所有节点的embedding表示的平均值
        global_context = torch.mean(torch.matmul(
            embedding, self.weight_matrix), dim=0)
        # transformed_global 就是 paper的 "C" , [1, embedding_size] => [1, 32]
        transformed_global = torch.tanh(global_context)
        # sigmoid_scores 的shape 为 [num_nodes, filters_3] * [filters_3, 1] = [num_nodes, 1]
        # [num_nodes, 32] * [32, 1] = [num_nodes, 1]
        sigmoid_scores = torch.sigmoid(
            torch.mm(embedding, transformed_global.view(-1, 1)))

        # 为图的每一个节点都计算一个 att_embedding表示
        # representation.shape = [filters_3, num_nodes] * [num_nodes, 1] = [filters_3, 1]  =>  [32, 1]
        representation = torch.mm(torch.t(embedding), sigmoid_scores)
        return representation


class TenorNetworkModule(torch.nn.Module):
    """
    SimGNN Tensor Network module to calculate similarity vector.
    """

    def __init__(self, args):
        """
        :param args: Arguments object.
        """
        super(TenorNetworkModule, self).__init__()
        self.args = args
        # [filters_3 , filters_3 , tensor_neurons]  =>  [32 , 32 , 16]
        self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.args.filters_3,
                                                             self.args.filters_3,
                                                             self.args.tensor_neurons))
        # [tensor_neurons , (2 * filters_3)]  =>  [16, 64]
        self.weight_matrix_block = torch.nn.Parameter(torch.Tensor(self.args.tensor_neurons,
                                                                   2*self.args.filters_3))
        #   [tensor_neurons , 1]  => [16 , 1]
        self.bias = torch.nn.Parameter(
            torch.Tensor(self.args.tensor_neurons, 1))
        torch.nn.init.xavier_uniform_(self.weight_matrix)
        torch.nn.init.xavier_uniform_(self.weight_matrix_block)
        torch.nn.init.xavier_uniform_(self.bias)
        # if self.use_gpu:
        #     self.weight_matrix.to(self.device)
        #     self.weight_matrix_block.to(self.device)
        #     self.bias.to(self.device)

    def forward(self, embedding_1, embedding_2):
        """
        Making a forward propagation pass to create a similarity vector.
        :param embedding_1: Result of the 1st embedding after attention.
        :param embedding_2: Result of the 2nd embedding after attention.
        即： 传入的是两个图的 att_embedding  shape: [filters_3, 1]  =>  [32, 1]
        :return scores: A similarity score vector.
        """

        # calculate : scoring =  embedding_1 @ self.weight_matrix @ embedding_2
        # shape change:  T([32, 1]) * [32, 32*16]  =>  [1, 32] * [32, 32*16]  =>  [1, 32*16]  =>  [1, 512]
        scoring = torch.mm(torch.t(embedding_1),
                           self.weight_matrix.view(self.args.filters_3, -1))

        # [1, 512]  =>  [32, 16]
        scoring = scoring.view(self.args.filters_3, self.args.tensor_neurons)

        # [16, 32] * [32, 1]  =>  [16, 1]
        scoring = torch.mm(torch.t(scoring), embedding_2)

        # calculate: block_scoring =  embedding_1_2  @  self.weight_matrix_block
        # [64, 1]
        combined_representation = torch.cat((embedding_1, embedding_2))

        # [16, 64] * [64, 1]  =>  [16, 1]
        block_scoring = torch.mm(
            self.weight_matrix_block, combined_representation)

        # scoring + block_scoring
        # 对应值相加
        # [16, 1] + [16, 1] = [16, 1]
        scores = torch.nn.functional.relu(scoring + block_scoring + self.bias)
        return scores


class SimGNN(torch.nn.Module):
    """
    SimGNN: A Neural Network Approach to Fast Graph Similarity Computation
    https://arxiv.org/abs/1808.05689
    """

    def __init__(self, args, number_of_labels):
        """
        :param args: Arguments object.
        :param number_of_labels: Number of node labels.
        """
        super(SimGNN, self).__init__()
        self.args = args

        self.number_labels = number_of_labels
        # self.setup_layers()
        print(f'number_labels: {self.number_labels}')
        # 16
        self.feature_count = self.args.tensor_neurons
        self.embedding = torch.nn.Embedding(
            self.number_labels, self.args.embedding_dim)

        # self.convs = torch.nn.ModuleList()
        self.convs1 = GCNConv(self.args.embedding_dim,
                              self.args.filters_1)
        self.convs2 = GCNConv(self.args.filters_1,
                              self.args.filters_2)
        self.convs3 = GCNConv(self.args.filters_2,
                              self.args.filters_3)
        # 得到 [n, out_channels]
        # att
        self.attention = AttentionModule(self.args)

        # 用来计算 embedding_graph_1  和 embedding_graph_2 的合并向量
        self.tensor_network = TenorNetworkModule(self.args)

        # bottle-neck-neurons , 16
        # feature_count , 16
        # [16, 16]
        self.fully_connected_first = torch.nn.Linear(self.feature_count,
                                                     self.args.bottle_neck_neurons)
        # [16, 1]
        self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)
        # if self.use_gpu:
        #     self.embedding.to(self.device)
        #     self.convs1.to(self.device)
        #     self.convs2.to(self.device)
        #     self.convs3.to(self.device)
        #     self.fully_connected_first.to(self.device)
        #     self.scoring_layer.to(self.device)
    # def setup_layers(self):
    #     """
    #     Creating the layers.
    #     """
    #     # 16
    #     self.feature_count = self.args.tensor_neurons
    #     #self.embedding = torch.nn.Embedding(self.number_labels, 512)
    #     # 三层GCN
    #     '''
    #     GCN init: def __init__(self, in_channels, out_channels)
    #     def forward(self, x, edge_index):
    #         x has shape [N, in_channels]
    #         edge_index has shape [2, E]
    #     '''

    def convolution(self):
        modulelists = torch.nn.ModuleList()
        modulelists.append(
            GCNConv(self.args.embedding_dim, self.args.filters_1))
        # modulelists.append(torch.nn.functional.relu())
        # modulelists.append(torch.nn.functional.dropout( p=self.args.dropout, training=self.training))
        modulelists.append(GCNConv(self.args.filters_1, self.args.filters_2))
        # modulelists.append(torch.nn.functional.relu())
        # modulelists.append(torch.nn.functional.dropout( p=self.args.dropout, training=self.training))
        modulelists.append(GCNConv(self.args.filters_2, self.args.filters_3))
        return modulelists

    def convolutional_pass(self, edge_index, features):
        """
        Making convolutional pass.
        :param edge_index: Edge indices.
        :param features: Feature matrix.
        :return features: Abstract feature matrix.
        """
        features = self.convs[0](features, edge_index)
        features = torch.nn.functional.relu(features)
        features = torch.nn.functional.dropout(features,
                                               p=self.args.dropout,
                                               training=self.training)
        features = self.convs[1](features, edge_index)
        features = torch.nn.functional.relu(features)
        features = torch.nn.functional.dropout(features,
                                               p=self.args.dropout,
                                               training=self.training)
        features = self.convs[2](features, edge_index)
        return features

    def forward(self, data):
        """
        Forward pass with graphs.
        :param data: Data dictiyonary.
        :return score: Similarity score.
        """
        edge_index_1 = data["edge_index_1"]
        edge_index_2 = data["edge_index_2"]
        features_1 = data["features_1"]
        features_2 = data["features_2"]

        # 先使用图的邻接矩阵对特征矩阵进行GCN处理, 得到图节点的嵌入表示
        # [num_nodes, embedding_size]  =>  [num_nodes, filters_3]

        features_1 = self.embedding(features_1)
        features_2 = self.embedding(features_2)

        features_1 = self.convs1(features_1, edge_index_1)
        features_1 = torch.nn.functional.relu(features_1)
        features_1 = torch.nn.functional.dropout(features_1,
                                                 p=self.args.dropout,
                                                 training=self.training)
        features_1 = self.convs2(features_1, edge_index_1)
        features_1 = torch.nn.functional.relu(features_1)
        features_1 = torch.nn.functional.dropout(features_1,
                                                 p=self.args.dropout,
                                                 training=self.training)
        abstract_features_1 = self.convs3(features_1, edge_index_1)

        features_2 = self.convs1(features_2, edge_index_2)
        features_2 = torch.nn.functional.relu(features_2)
        features_2 = torch.nn.functional.dropout(features_2,
                                                 p=self.args.dropout,
                                                 training=self.training)
        features_2 = self.convs2(features_2, edge_index_2)
        features_2 = torch.nn.functional.relu(features_2)
        features_2 = torch.nn.functional.dropout(features_2,
                                                 p=self.args.dropout,
                                                 training=self.training)
        abstract_features_2 = self.convs3(features_2, edge_index_2)

        # abstract_features_1 = self.convolutional_pass(edge_index_1, features_1)
        # abstract_features_2 = self.convolutional_pass(edge_index_2, features_2)

        # abstract_features_1 和 abstract_features_2是使用GCN对节点的嵌入表示后得到的矩阵
        # 所以注意力机制层是得到 att_embedding
        # 尺寸为 [num_nodes, GCN_out_channels]  即:  [num_nodes, filters_3]
        # pooled_features_1 和 pooled_features_2  =>  [filters_3, 1]
        pooled_features_1 = self.attention(abstract_features_1)
        pooled_features_2 = self.attention(abstract_features_2)

        # scores shape : [16, 1]
        scores = self.tensor_network(pooled_features_1, pooled_features_2)
        # transpose: [1, 16]
        scores = torch.t(scores)

        # [1,16] @ [16, 16] = [1, 16]
        scores = torch.nn.functional.relu(self.fully_connected_first(scores))
        # [1, 16] @ [16, 2] = [1, 2]
        # 返回logits
        return self.scoring_layer(scores)
