import torch
from torch import nn
from typing import Tuple


class GraphConvolution(nn.Module):
    def __init__(self):
        super(GraphConvolution, self).__init__()

    def forward(self, x, adj):
        x = torch.matmul(adj, x)
        return x


class GraphNorm(nn.Module):
    def __init__(self):
        super(GraphNorm, self).__init__()

    def forward(self, adj):
        adj = adj / (torch.unsqueeze(adj.sum(-1), -1) + 1e-9)
        adj = adj + torch.eye(adj.shape[-1]).to(adj.device)
        return adj


class Gconv(nn.Module):
    def __init__(self, dims: Tuple[int, int, int]):
        super(Gconv, self).__init__()
        self.input_size, self.hidden_size, self.output_size = dims

        self.mlp = nn.Sequential(
            nn.Linear(self.input_size, self.hidden_size), 
            nn.ReLU(), 
            nn.Linear(self.hidden_size, self.output_size)
        )
        self.norm = GraphNorm()
        self.gc = GraphConvolution()

    def forward(self, x, adj):
        # x.shape = (B, N, H), adj.shape = (B, N, N)
        h = self.gc(x, self.norm(adj))
        out = self.mlp(h)
        return out