from torch_geometric.data import HeteroData
import torch
import torch_geometric.transforms as T
from torch_geometric.loader import NeighborLoader


data = HeteroData()

# Create two node types "paper" and "author" holding a feature matrix:
# data['paper'].x = ["a","b","c","e","f","g","h"]
# data['author'].x =  ["a1","b1","c1","e1","f1","g1","h1"]

data['paper'].x = torch.randn(10, 5)
data['author'].x = torch.randn(10, 5)
# Create an edge type "(author, writes, paper)" and building the
# graph connectivity:
data['author', 'writes', 'paper'].edge_index = torch.IntTensor(
    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])  # [2, num_edges]
print(data['paper'])
print(data['author', 'writes', 'paper'].edge_index)


#
# import torch_geometric.transforms as T
# from torch_geometric.datasets import OGB_MAG
# from torch_geometric.nn import SAGEConv, to_hetero
#
#
# dataset = OGB_MAG(root='/persistent/data/metapath2vec', preprocess='metapath2vec', transform=T.ToUndirected())
# print(dataset.num_classes)
# print(dataset.num_features)
# print(dataset.num_edge_features)
# print(dataset.num_node_features)


transform = T.RandomLinkSplit(
    num_val=0.2,
    num_test=0.1,
    disjoint_train_ratio=0.0,
    neg_sampling_ratio=0.0,
    add_negative_train_samples=False,
    # is_undirected=True,
    # split_labels=True,
    # edge_types=[
    #     ('author', 'affiliated_with', 'institution'),
    #     ('author', 'writes', 'paper'),
    #     ('paper', 'cites', 'paper')
    # ],
    edge_types=[
        ('author', 'writes', 'paper')
    ],
    # rev_edge_types=[
    #     ('institution', 'rev_affiliated_with', 'author'),
    #     ('paper', 'rev_writes', 'author'),
    #     ('field_of_study', 'rev_has_topic', 'paper')
    # ]
)
train_data, val_data, test_data = transform(data)
print(train_data)
print(val_data)
print(test_data)

# data = dataset[0]
#
#
# train_loader = NeighborLoader(
#     data,
#     # Sample 15 neighbors for each node and each edge type for 2 iterations:
#     num_neighbors=[15] * 2,
#     # Use a batch size of 128 for sampling training nodes of type "paper":
#     batch_size=128,
#     input_nodes=('paper', data['paper'].train_mask),
# )
#
# batch = next(iter(train_loader))