import torch
import torch.nn.functional as F
from torch_geometric.nn.conv import SAGEConv
from torch_geometric.nn import Set2Set
from torch import nn


# set2set的聚合次数,是一个超参
processing_steps = 4
in_channels = 128
hidden_channels = 64
output_channels = 32
dropout = 0.5


class SAGE(torch.nn.Module):
    def __init__(self):
        super(SAGE, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(SAGEConv(in_channels, hidden_channels).cuda())
        self.convs.append(SAGEConv(hidden_channels, hidden_channels).cuda())
        self.convs.append(SAGEConv(hidden_channels, hidden_channels).cuda())
        #   我已经参透了set2set,原来并不需要图的邻居信息,哈哈哈
        #   得到全局表示,只需要把所有的结点都放在一个集合里面就好了
        self.set2set = Set2Set(hidden_channels, processing_steps).cuda()
        self.linear1 = nn.Linear(
            in_features=2*hidden_channels, out_features=hidden_channels).cuda()
        self.linear2 = nn.Linear(
            in_features=hidden_channels, out_features=output_channels).cuda()
        self.dropout = dropout

    def reset_parameters(self):
        for conv in self.convs:
            conv.reset_parameters()

    # 感觉batch形式可能会出问题
    def forward(self, att, edge_index_x):
        batch_lth = len(att)
        res = []
        # 来写一个伪批处理，哈哈哈，难得搞了
        for i in range(batch_lth):
            x = att[i]
            edge_index = edge_index_x[i]
            x = self.convs[0](x, edge_index)
            x = F.relu(x)
            x = F.dropout(x, p=self.dropout, training=self.training)
            x = self.convs[1](x, edge_index)
            x = F.relu(x)
            x = F.dropout(x, p=self.dropout, training=self.training)
            x = self.convs[2](x, edge_index)
            x = F.relu(x)
            x = F.dropout(x, p=self.dropout, training=self.training)
            #   这个的输入，应该为（N,dim）+结点分配的集合，这里为[0]*N就很好了
            #   输出位 （1,2*dim)

            tempout = self.set2set(x, torch.tensor(
                [0] * len(x), dtype=torch.long).cuda())
            x = self.linear1(tempout)
            x = F.relu(x)
            out = self.linear2(x)
            out = F.relu(out)
            res.append(out)
        stack_res = torch.cat(res)
        return stack_res
