import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
    

class GraphConvLayer(nn.Module):
    """图卷积层"""
    def __init__(self, n_in, n_out, use_bias=True):
        super(GraphConvLayer, self).__init__()
        self.n_in = n_in
        self.n_out = n_out
        self.use_bias = use_bias
        #定义GCN层的权重矩阵
        self.weight = nn.Parameter(torch.Tensor(n_in, n_out))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(n_out))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters() #使用自定义的参数初始化方式
 
    def reset_parameters(self):
        #自定义参数初始化方式
        #权重参数初始化方式
        nn.init.kaiming_uniform_(self.weight)
        if self.use_bias: #偏置参数初始化为0
            nn.init.zeros_(self.bias)
 
    def forward(self, A, x):
        temp = torch.matmul(x, self.weight)
        out = torch.bmm(A, temp)
        if self.use_bias:
            out = out + self.bias
        return F.tanh(out)
    
    
class GCN(nn.Module):
    '''in:15
    25.5'''

    def __init__(self, nh=8, ng1=10, ng2=10):
        '''nah:顶点哈希个数
        nav:顶点嵌入维数'''
        super(GCN, self).__init__()
        self.fc0 = nn.Linear(15, nh)
        self.gcn1 = GraphConvLayer(nh, ng1)
        self.gcn2 = GraphConvLayer(ng1, ng2)
        self.fc1 = nn.Linear(ng2, 1)

    def forward(self, h, a):
        '''a:邻接矩阵，b*N*N'''
        B, N = h.shape[:2]
        A = a+torch.eye(N, device=a.device).view(-1,N,N)
        
        x = h.float()
        x = self.fc0(x)
        x = torch.tanh(x)
        
        x = self.gcn1(A, x)
        x = self.gcn2(A, x) #B*N*ng2
        
        x = self.fc1(x) #B*N*1
        x = torch.tanh(x)
        x = x.squeeze(-1) #B*N
        x = x.sum(axis=1) #B
        return x