import torch

import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from torch_geometric.nn import SAGEConv, to_hetero
dataset = OGB_MAG(root='/home/Dyf/code/dataset/data', preprocess='metapath2vec',transform=T.ToUndirected())
data = dataset[0]

class GNN(torch.nn.Module):
    def __init__(self, hidden_channels, out_channels):
        super().__init__()
        # 默认 aggr=mean
        self.conv1 = SAGEConv((-1, -1), hidden_channels)
        self.conv2 = SAGEConv((-1, -1), out_channels)

    def forward(self, x, edge_index):
        x = self.conv1(x, edge_index).relu()
        x = self.conv2(x, edge_index)
        return x
# 节点类型一共 349 种 ，隐藏层神经网络个数 64 。
model = GNN(hidden_channels=64, out_channels=dataset.num_classes)
# conv1__author = torch.add(conv1__author1, conv1__author2) 当 aggr='sum'
model = to_hetero(model, data.metadata(), aggr='sum')
# 当 aggr='mean' 计算特征的均值
# model = to_hetero(model, data.metadata(), aggr='mean')
print(model)
# print(model.print_readable())

"""
GraphModule(
  (conv1): ModuleDict(
    # 首先确定有 7 中关系边 前面的点指向后面的点，注意这里的 aggr=mean 代表SAGEConv计算的过程中使用的聚合方式是mean
    (author__affiliated_with__institution): SAGEConv((-1, -1), 64, aggr=mean)
    (author__writes__paper): SAGEConv((-1, -1), 64, aggr=mean)
    (paper__cites__paper): SAGEConv((-1, -1), 64, aggr=mean)
    (paper__has_topic__field_of_study): SAGEConv((-1, -1), 64, aggr=mean)
    (institution__rev_affiliated_with__author): SAGEConv((-1, -1), 64, aggr=mean)
    (paper__rev_writes__author): SAGEConv((-1, -1), 64, aggr=mean)
    (field_of_study__rev_has_topic__paper): SAGEConv((-1, -1), 64, aggr=mean)
  )
  (conv2): ModuleDict(
    # 7 中关系边。
    (author__affiliated_with__institution): SAGEConv((-1, -1), 349, aggr=mean)
    (author__writes__paper): SAGEConv((-1, -1), 349, aggr=mean)
    (paper__cites__paper): SAGEConv((-1, -1), 349, aggr=mean)
    (paper__has_topic__field_of_study): SAGEConv((-1, -1), 349, aggr=mean)
    (institution__rev_affiliated_with__author): SAGEConv((-1, -1), 349, aggr=mean)
    (paper__rev_writes__author): SAGEConv((-1, -1), 349, aggr=mean)
    (field_of_study__rev_has_topic__paper): SAGEConv((-1, -1), 349, aggr=mean)
  )
)
def forward(self, x, edge_index):
    x_dict = torch_geometric_nn_to_hetero_transformer_get_dict(x);  x = None
    # 四种节点
    x__paper = x_dict.get('paper', None)
    x__author = x_dict.get('author', None)
    x__institution = x_dict.get('institution', None)
    x__field_of_study = x_dict.get('field_of_study', None);  x_dict = None
    
    # 以下的这一行计算了所有的索引张量并组成一个字典形式用于进行不同边的聚合运算。 
    edge_index_dict = torch_geometric_nn_to_hetero_transformer_get_dict(edge_index);  edge_index = None
    # 7种关系，注意最后面的指向目的名称。
    edge_index__author__affiliated_with__institution = edge_index_dict.get(('author', 'affiliated_with', 'institution'), None)
    edge_index__author__writes__paper = edge_index_dict.get(('author', 'writes', 'paper'), None)
    edge_index__paper__cites__paper = edge_index_dict.get(('paper', 'cites', 'paper'), None)
    edge_index__paper__has_topic__field_of_study = edge_index_dict.get(('paper', 'has_topic', 'field_of_study'), None)
    edge_index__institution__rev_affiliated_with__author = edge_index_dict.get(('institution', 'rev_affiliated_with', 'author'), None)
    edge_index__paper__rev_writes__author = edge_index_dict.get(('paper', 'rev_writes', 'author'), None)
    edge_index__field_of_study__rev_has_topic__paper = edge_index_dict.get(('field_of_study', 'rev_has_topic', 'paper'), None);  edge_index_dict = None
    
    conv1__institution = self.conv1.author__affiliated_with__institution((x__author, x__institution), edge_index__author__affiliated_with__institution)
    conv1__paper1 = self.conv1.author__writes__paper((x__author, x__paper), edge_index__author__writes__paper)
    conv1__paper2 = self.conv1.paper__cites__paper(x__paper, edge_index__paper__cites__paper)
    conv1__field_of_study = self.conv1.paper__has_topic__field_of_study((x__paper, x__field_of_study), edge_index__paper__has_topic__field_of_study)
    # author1 是关联了周围institution信息的新author信息。
    conv1__author1 = self.conv1.institution__rev_affiliated_with__author((x__institution, x__author), edge_index__institution__rev_affiliated_with__author);  x__institution = None
    # 对于批次处理的所有paper和author节点，都采用计算author周围指向自己的paper信息聚合到author并和author信息组合经过一个全连接激活函数，最后处理的结果成为新的author信息。
    # 简单来说 author2 带有其周围所有paper信息的新的author特征。是关联了周围paper节点信息的新author信息，其他计算信息计算方法同方法。 
    conv1__author2 = self.conv1.paper__rev_writes__author((x__paper, x__author), edge_index__paper__rev_writes__author);  x__author = None
    conv1__paper3 = self.conv1.field_of_study__rev_has_topic__paper((x__field_of_study, x__paper), edge_index__field_of_study__rev_has_topic__paper);  x__field_of_study = x__paper = None
    conv1__paper_1 = torch.add(conv1__paper1, conv1__paper2);  conv1__paper1 = conv1__paper2 = None
    conv1__paper = torch.add(conv1__paper3, conv1__paper_1);  conv1__paper3 = conv1__paper_1 = None
    # 将连个author信息做和，继续经过激活处理。
    conv1__author = torch.add(conv1__author1, conv1__author2);  conv1__author1 = conv1__author2 = None
    relu__paper = conv1__paper.relu();  conv1__paper = None
    #激活处理后得到新的融合特征。
    relu__author = conv1__author.relu();  conv1__author = None
    relu__institution = conv1__institution.relu();  conv1__institution = None
    relu__field_of_study = conv1__field_of_study.relu();  conv1__field_of_study = None
    # 再算一次。
    conv2__institution = self.conv2.author__affiliated_with__institution((relu__author, relu__institution), edge_index__author__affiliated_with__institution);  edge_index__author__affiliated_with__institution = None
    conv2__paper1 = self.conv2.author__writes__paper((relu__author, relu__paper), edge_index__author__writes__paper);  edge_index__author__writes__paper = None
    conv2__paper2 = self.conv2.paper__cites__paper(relu__paper, edge_index__paper__cites__paper);  edge_index__paper__cites__paper = None
    conv2__field_of_study = self.conv2.paper__has_topic__field_of_study((relu__paper, relu__field_of_study), edge_index__paper__has_topic__field_of_study);  edge_index__paper__has_topic__field_of_study = None
    conv2__author1 = self.conv2.institution__rev_affiliated_with__author((relu__institution, relu__author), edge_index__institution__rev_affiliated_with__author);  relu__institution = edge_index__institution__rev_affiliated_with__author = None
    conv2__author2 = self.conv2.paper__rev_writes__author((relu__paper, relu__author), edge_index__paper__rev_writes__author);  relu__author = edge_index__paper__rev_writes__author = None
    conv2__paper3 = self.conv2.field_of_study__rev_has_topic__paper((relu__field_of_study, relu__paper), edge_index__field_of_study__rev_has_topic__paper);  relu__field_of_study = relu__paper = edge_index__field_of_study__rev_has_topic__paper = None
    conv2__paper_1 = torch.add(conv2__paper1, conv2__paper2);  conv2__paper1 = conv2__paper2 = None
    conv2__paper = torch.add(conv2__paper3, conv2__paper_1);  conv2__paper3 = conv2__paper_1 = None
    conv2__author = torch.add(conv2__author1, conv2__author2);  conv2__author1 = conv2__author2 = None
    return {'paper': conv2__paper, 'author': conv2__author, 'institution': conv2__institution, 'field_of_study': conv2__field_of_study}

1、基于边进行聚合运算。
2、计算的结果sum汇总到指向的目的节点。
3、计算的顺序相当重要。必须将其中一层的信息都计算之后，才能进行特征融合，然后才可以进行下一层计算。避免混乱。
4、计算过程中是基于目的节点的汇总，每一个卷积层第二个参数，是融合目的地。
"""