import torch
from torch import nn
from torch_geometric.nn.conv.rgcn_conv import RGCNConv
from torch_geometric.nn.conv.gat_conv import GATConv
from torch_geometric.nn.conv.graph_conv import GraphConv

batch = 100000

conv_1 = RGCNConv(10, 10, 2, num_bases=2)
conv_2 = GATConv(10, 10, heads=2, concat=False, dropout=0.0, add_self_loops=True)
conv_3 = GraphConv(10, 10)
cls = nn.Linear(10, 20)

conv_1.to("cuda")
conv_2.to("cuda")
conv_3.to("cuda")
cls.to("cuda")

for i in range(batch):
    input = torch.randn((2, 10)).requires_grad_().to("cuda")
    index = torch.tensor([[0, 1], [1, 0]]).long().to("cuda")
    edge_type = torch.tensor([0, 1]).long().to("cuda")

    out_1 = conv_1(input, index, edge_type)
    out_2 = conv_2(out_1, index)
    out_3 = conv_3(out_2, index)
    out = cls(out_3)
    loss = torch.sum(out)

    print(torch.autograd.grad(loss, input, retain_graph=True))
    loss.backward()
