import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class gcn_layer(nn.Module):
    def __init__(self, insize, outsize, bias=True):
        super(gcn_layer, self).__init__()
        self.insize = insize
        self.outsize = outsize
        self.weight = nn.Parameter(torch.FloatTensor(insize, outsize))
        if bias:
            self.bias = nn.Parameter(torch.FloatTensor(outsize))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()


    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)
 

    def forward(self, hl, adj):
        mid = torch.mm(adj, hl)
        hn = torch.mm(mid, self.weight)
        if self.bias is not None:
            return hn + self.bias
        else:
            return hn


class gcn(nn.Module):
    def __init__(self, insize, outsize, hidsize, hidlayernum):
        super(gcn, self).__init__()
        self.ly1 = gcn_layer(insize, hidsize)
        self.ly2 = gcn_layer(hidsize, outsize)
        self.hidlayernum = hidlayernum
        if hidlayernum < 0:
            raise ValueError("hidlayernum < 0")
        else:
            self.hid = []
            for i in range(hidlayernum):
                self.hid.append(gcn_layer(hidsize, hidsize))


    def forward(self, feature, adj):
        hid_out = F.relu(self.ly1(feature, adj))
        for i in range(self.hidlayernum):
            hid_out = F.relu(self.hid[i](hid_out, adj))
        ly2_out = self.ly2(hid_out, adj)
        res = F.log_softmax(ly2_out, dim=1)
        return res

