from torch_geometric.loader import NeighborLoader
import torch
import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG

from torch_geometric.nn import GATConv, Linear, to_hetero
import torch.nn.functional as F

import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from torch_geometric.nn import HGTConv, Linear


class HGT(torch.nn.Module):
    def __init__(self, hidden_channels, out_channels, num_heads, num_layers):
        super().__init__()

        self.lin_dict = torch.nn.ModuleDict()
        for node_type in data.node_types:
            self.lin_dict[node_type] = Linear(-1, hidden_channels)
            """
            conv = HeteroConv({
                ('paper', 'cites', 'paper'): GCNConv(-1, hidden_channels),
                ('author', 'writes', 'paper'): SAGEConv((-1, -1), hidden_channels),
                ('paper', 'rev_writes', 'author'): GATConv((-1, -1), hidden_channels, add_self_loops=False),
            }, aggr='sum')
            """

        self.convs = torch.nn.ModuleList()
        for _ in range(num_layers):
            conv = HGTConv(hidden_channels, hidden_channels, data.metadata(),
                           num_heads, group='sum')
            self.convs.append(conv)

        self.lin = Linear(hidden_channels, out_channels)

    def forward(self, x_dict, edge_index_dict):
        for node_type, x in x_dict.items():
            # print("node_type", node_type)
            x_dict[node_type] = self.lin_dict[node_type](x).relu_()

        for conv in self.convs:
            x_dict = conv(x_dict, edge_index_dict)

        return self.lin(x_dict['paper'])


dataset = OGB_MAG(root='./data', preprocess='metapath2vec', transform=T.ToUndirected())
data = dataset[0]
data = data.to("cuda")

train_loader = NeighborLoader(
    data,
    # Sample 15 neighbors for each node and each edge type for 2 iterations:
    num_neighbors=[15] * 2,
    # Use a batch size of 128 for sampling training nodes of type "paper":
    batch_size=64,
    input_nodes=('paper', data['paper'].train_mask),
)

batch = next(iter(train_loader))
num_neighbors = {key: [15] * 2 for key in data.edge_types}

model = HGT(hidden_channels=32, out_channels=dataset.num_classes,
            num_heads=2, num_layers=2)
model = model.to("cuda:0")

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
with torch.no_grad():  # Initialize lazy modules.
    for batch in train_loader:
        optimizer.zero_grad()
        batch = batch.to('cuda:0')
        batch_size = batch['paper'].batch_size
        out = model(batch.x_dict, batch.edge_index_dict)
        break


def train():
    model.train()
    total_examples = total_loss = 0
    index = 0
    for batch in train_loader:
        optimizer.zero_grad()
        batch = batch.to('cuda:0')
        batch_size = batch['paper'].batch_size
        out = model(batch.x_dict, batch.edge_index_dict)
        # print("#################",out.shape,batch_size,batch['paper'].y.shape)
        # print(out['paper'][:batch_size].shape)
        # print(batch['paper'].y[:batch_size].shape)
        # print("#################")

        # loss = F.cross_entropy(out['paper'][:batch_size],
        #                        batch['paper'].y[:batch_size])

        loss = F.cross_entropy(out[:batch_size],
                               batch['paper'].y[:batch_size])
        loss.backward()
        optimizer.step()

        total_examples += batch_size
        total_loss += float(loss) * batch_size
        index += 0

        if index % 100 == 0:
            print("loss", loss.item())

    return total_loss / total_examples


for i in range(10):
    print(train())
