import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import init
import torch.nn.functional as F
import random


class Encoder(nn.Module):
    """
    Encodes a node's using 'convolutional' GraphSage approach
    """

    def __init__(self, features, feature_dim,
                 embed_dim, adj_lists, aggregator,
                 num_sample=10,
                 base_model=None, gcn=False, cuda=False,
                 feature_transform=False):
        super(Encoder, self).__init__()

        self.features = features
        # 变换前的hidden_size/维度
        self.feat_dim = feature_dim
        self.adj_lists = adj_lists
        # 即邻居聚合后的mebedding: agg1 = MeanAggregator(features, cuda=True)
        self.aggregator = aggregator
        self.num_sample = num_sample
        if base_model != None:
            self.base_model = base_model

        # 默认False, model.py里面设置成True
        self.gcn = gcn
        # 变换后的hidden_size/维度
        self.embed_dim = embed_dim
        self.cuda = cuda
        self.aggregator.cuda = cuda
        # 矩阵W维度 = 变换后维度 * 变换前维度
        # 其中gcn表示是否拼接，如果拼接的话由于是"自身向量||邻居聚合向量", 所以维度为2倍
        self.weight = nn.Parameter(
            torch.FloatTensor(embed_dim, self.feat_dim if self.gcn else 2 * self.feat_dim))
        init.xavier_uniform(self.weight)

    def forward(self, nodes):
        """
        Generates embeddings for a batch of nodes.

        nodes     -- list of nodes
        """
        # 调用aggregator.py文件中的MeanAggregator class的forward函数，得到聚合邻居的信息
        neigh_feats = self.aggregator.forward(nodes, [self.adj_lists[int(node)] for node in nodes],
                                              self.num_sample)
        if not self.gcn:
            if self.cuda:
                self_feats = self.features(torch.LongTensor(nodes).cuda())
            else:
                self_feats = self.features(torch.LongTensor(nodes))
            # 将自身和聚合邻居的向量拼接, algorithm 1 line 5的拼接部分
            combined = torch.cat([self_feats, neigh_feats], dim=1)
        else:
            # 只用聚合邻居的向量来表示，不用自身信息, algorithm 1 line 5的拼接部分
            combined = neigh_feats
        # 送入到神经网络，algorithm 1 line 5乘以矩阵W
        combined = F.relu(self.weight.mm(combined.t()))
        # 经过一层GNN layer后的点的embedding，维度为embed_dim * nodes
        return combined


"""
Set of modules for aggregating embeddings of neighbors.
"""


# 实现聚合类，对邻居信息进行AGGREGATE
class MeanAggregator(nn.Module):
    """
    Aggregates a node's embeddings using mean of neighbors' embeddings
    """

    def __init__(self, features, cuda=False, gcn=False):
        """
        Initializes the aggregator for a specific graph.

        features -- function mapping LongTensor of node ids to FloatTensor of feature values.
        cuda -- whether to use GPU
        gcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style
        """

        super(MeanAggregator, self).__init__()

        self.features = features
        self.cuda = cuda
        self.gcn = gcn

    def forward(self, nodes, to_neighs, num_sample=10):
        """
        # batch中的点的列表
        nodes --- list of nodes in a batch
        # batch中每个点对应的邻居集合
        to_neighs --- list of sets, each set is the set of neighbors for node in batch
        num_sample --- number of neighbors to sample. No sampling if None.
        """
        # Local pointers to functions (speed hack)
        _set = set
        if not num_sample is None:
            _sample = random.sample
            # 首先对每一个节点的邻居集合neigh进行遍历，判断一下已有邻居数和采样数大小，多于采样数进行抽样
            # 对一个batch中的每一个节点的邻接点set进行sample
            samp_neighs = [_set(_sample(to_neigh,
                                        num_sample,
                                        )) if len(to_neigh) >= num_sample else to_neigh for to_neigh in to_neighs]
        else:
            samp_neighs = to_neighs

        # 将自己也作为自己的邻居点 (类似于GCN里面的A + I的操作)
        if self.gcn:
            samp_neighs = [samp_neigh + set([nodes[i]]) for i, samp_neigh in enumerate(samp_neighs)]
        # *拆解列表后，转为为多个独立的元素作为参数给union，union函数进行去重合并
        unique_nodes_list = list(set.union(*samp_neighs))
        # 节点标号不一定都是从0开始的，创建一个字典，key为节点ID，value为节点序号 (old id到new id的转换，为下面列切片做准备)
        unique_nodes = {n: i for i, n in enumerate(unique_nodes_list)}
        # print(len(nodes), len(unique_nodes), len(samp_neighs))

        # 构建缩小的邻接矩阵，即这个batch所用到的点所构成的小的邻接矩阵
        # nodes表示batch内的节点，unique_nodes表示batch内的节点用到的所有邻居节点，unique_nodes > nodes
        # len(samp_neighs)是这个batch的大小，即nodes数量，创建一个nodes * unique_nodes大小的邻接矩阵
        mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))
        # 列切片, 遍历每一个邻居集合的每一个元素，并且通过unique_nodes(old id)获取到节点对应的序号
        column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
        # 行切片, 比如samp_neighs = [{3,5,9}, {2,8}, {2}]，行切片为[0,0,0,1,1,2]
        row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))]
        # 利用切片创建图的邻接矩阵
        # 即(row_indices[i], column_indices[i])对应的位置为1
        mask[row_indices, column_indices] = 1
        if self.cuda:
            mask = mask.cuda()

        # 构造邻接矩阵
        # 统计每一个节点的邻居数量
        num_neigh = mask.sum(1, keepdim=True)
        # 归一化(除以邻居数量)
        mask = mask.div(num_neigh)
        # embed_matrix: [n, m]
        # n: unique_nodes
        # m: dim
        if self.cuda:
            embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())
        else:
            embed_matrix = self.features(torch.LongTensor(unique_nodes_list))
        # mask是nodes * unique_nodes大小的邻接矩阵, embed_matrix是unique_nodes * hid_size的特征矩阵
        # 即A * X, 这里A是邻接矩阵， X是特征矩阵，这里一系列的操作是按batch训练需要采样出一个局部的A
        to_feats = mask.mm(embed_matrix)
        return to_feats
