import torch
import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from torch_geometric.nn import SAGEConv, to_hetero
from torch_geometric.nn import GATConv, Linear, to_hetero
import torch.nn.functional as F


dataset = OGB_MAG(root='./data', preprocess='metapath2vec', transform=T.ToUndirected())
data = dataset[0]

class GNN(torch.nn.Module):
    def __init__(self, hidden_channels, out_channels):
        super().__init__()
        self.conv1 = SAGEConv((-1, -1), hidden_channels)
        self.conv2 = SAGEConv((-1, -1), out_channels)

    def forward(self, x, edge_index):
        print(x.shape,edge_index.shape)

        x = self.conv1(x, edge_index).relu()
        x = self.conv2(x, edge_index)
        return x


class GAT(torch.nn.Module):
    def __init__(self, hidden_channels, out_channels):
        super().__init__()
        self.conv1 = GATConv((-1, -1), hidden_channels, add_self_loops=False)
        self.lin1 = Linear(-1, hidden_channels)
        self.conv2 = GATConv((-1, -1), out_channels, add_self_loops=False)
        self.lin2 = Linear(-1, out_channels)

    def forward(self, x, edge_index):
        # print(x.shape, edge_index.shape)
        x = self.conv1(x, edge_index) + self.lin1(x)
        x = x.relu()
        x = self.conv2(x, edge_index) + self.lin2(x)
        return x


model = GAT(hidden_channels=1, out_channels=dataset.num_classes)
model = to_hetero(model, data.metadata(), aggr='sum')
# print(model)
# model = GNN(hidden_channels=64, out_channels=dataset.num_classes)
# model = to_hetero(model, data.metadata(), aggr='sum')

# criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
model =model.to("cuda")
data =data.to("cuda")

print(data.x_dict["paper"].shape,
data.x_dict["author"].shape,
data.x_dict["institution"].shape,
data.edge_index_dict[('author', 'affiliated_with', 'institution')].shape,
data.edge_index_dict[('author', 'writes', 'paper')].shape,
data.edge_index_dict[('paper', 'cites', 'paper')].shape,
data.edge_index_dict[('paper', 'has_topic', 'field_of_study')].shape,
data.edge_index_dict[('institution', 'rev_affiliated_with', 'author')].shape,
data.edge_index_dict[('paper', 'rev_writes', 'author')].shape,
data.edge_index_dict[('field_of_study', 'rev_has_topic', 'paper')].shape)


def train():
    model.train()
    optimizer.zero_grad()
    # print("11111111")
    # print(data.x_dict.keys(), data.edge_index_dict.keys())

    out = model(data.x_dict, data.edge_index_dict)
    mask = data['paper'].train_mask
    loss = F.cross_entropy(out['paper'][mask], data['paper'].y[mask])
    loss.backward()
    optimizer.step()
    return float(loss)

print(train())
print(train())
print(train())
print(train())