import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class gatp_layer(nn.Module):
    def __init__(self, insize, outsize):
        super(gatp_layer, self).__init__()
        self.insize = insize
        self.outsize = outsize
        self.weight = nn.Parameter(torch.FloatTensor(insize, outsize))
        self.a_half_0 = nn.Parameter(torch.FloatTensor(outsize, 1))
        self.a_half_1 = nn.Parameter(torch.FloatTensor(outsize, 1))
        self.reset_parameters()


    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        self.a_half_0.data.uniform_(-stdv, stdv)
        self.a_half_1.data.uniform_(-stdv, stdv)


    def forward(self, hl, adj):
        mid = torch.mm(hl, self.weight)
        N = hl.size()[0]

        attention_half_0 = torch.mm(mid, self.a_half_0).repeat(1, N)
        attention_half_1 = torch.mm(mid, self.a_half_1).repeat(1, N)

        all_attention = F.leaky_relu(attention_half_0 + attention_half_1.T)
    
        zero = -1e20 * torch.ones_like(all_attention)
        attention = torch.where(adj > 0, all_attention, zero)
        attention = F.softmax(attention, dim=1)
        
        hn = torch.mm(attention, mid)
        return hn


class gatp(nn.Module):
    def __init__(self, insize, outsize, hidsize, hidlayernum):
        super(gatp, self).__init__()
        self.ly1 = gatp_layer(insize, hidsize)
        self.ly2 = gatp_layer(hidsize, outsize)
        self.hidlayernum = hidlayernum
        if hidlayernum < 0:
            raise ValueError("hidlayernum < 0")
        else:
            self.hid = []
            for i in range(hidlayernum):
                self.hid.append(gatp_layer(hidsize, hidsize))


    def forward(self, feature, adj):
        hid_out = F.relu(self.ly1(feature, adj))
        for i in range(self.hidlayernum):
            hid_out = F.relu(self.hid[i](hid_out, adj))
        ly2_out = self.ly2(hid_out, adj)
        res = F.log_softmax(ly2_out, dim=1)
        return res

