import torch
import torch.nn as nn
from dgl.nn import GraphConv


class GCNEncoder(nn.Module):
    def __init__(self, d_input, d_model):
        super(GCNEncoder, self).__init__()
        self.fc = nn.Linear(in_features=d_input,
                            out_features=d_model,
                            bias=False)
        self.dropout = nn.Dropout(p=0.2)
        self.layernorm = nn.LayerNorm(d_model)
        activation = nn.LeakyReLU()
        self.gcn_layer1 = GraphConv(in_feats=d_model,
                                    out_feats=d_model * 2,
                                    norm='both', weight=True, bias=True,
                                    activation=activation,
                                    allow_zero_in_degree=True)
        self.gcn_layer2 = GraphConv(in_feats=d_model * 2,
                                    out_feats=d_model,
                                    norm='both', weight=True, bias=True,
                                    activation=activation,
                                    allow_zero_in_degree=True)
        self.output_fc = nn.Linear(in_features=d_model * 3,
                                   out_features=d_model)

    def forward(self, g, feature, users):
        temp = self.dropout(torch.tanh(self.fc(feature)))
        temp = self.layernorm(temp)

        embedding_1 = (self.gcn_layer1(g, temp))
        embedding_2 = self.gcn_layer2(g, embedding_1)
        embedding = torch.cat((embedding_1, embedding_2), dim=-1)
        embedding = self.output_fc(embedding)
        return embedding[users]
