from __future__ import absolute_import

from torch.utils import data

from torch_geometric.datasets import Planetoid, TUDataset
import pandas as pd


class GraphDatasetLoader(data.Dataset):
    '''
    GraphDatasetLoader
    '''

    def __init__(self, root, name="Cora", split="Public", need_shuffle=True):
        '''
        __init__ _summary_

        Parameters
        ----------
        root : str
            Root directory where the dataset should be saved.
        name : str, optional
            The name of the dataset ("Cora", "CiteSeer", "PubMed", "PROTEINS", "ENZYMES"), by default "Cora"
        split : str, optional
            The type of dataset split ("public", "full", "geom-gcn", "random").
                If set to "public", the split will be the public fixed split from the "Revisiting Semi-Supervised Learning with Graph Embeddings" <https://arxiv.org/abs/1603.08861>_ paper.
                If set to "full", all nodes except those in the validation and test sets will be used for training (as in the "FastGCN: Fast Learning with Graph Convolutional Networks via Importance Sampling" <https://arxiv.org/abs/1801.10247>_ paper).
                If set to "geom-gcn", the 10 public fixed splits from the "Geom-GCN: Geometric Graph Convolutional Networks" <https://openreview.net/forum?id=S1e2agrFvS>_ paper are given.
                If set to "random", train, validation, and test sets will be randomly generated, according to num_train_per_class, num_val and num_test.  by default "FastGCN"
        need_shuffle : bool, optional
            Randomly shuffles the examples in the dataset, by default True
        '''
        super(GraphDatasetLoader, self).__init__()

        self.root = root
        self.name = name
        self.split = split
        self.need_shuffle = need_shuffle

        self.task = 'node'

        if name in ["Cora", "CiteSeer", "PubMed"]:
            self.dataset = Planetoid(root=self.root, name=self.name, split=self.split)
        elif name in ["PROTEINS", "ENZYMES"]:
            self.dataset = TUDataset(root=self.root, name=self.name)

        if self.need_shuffle:
            self.dataset = self.dataset.shuffle()

    def __summary__(self):

        return pd.DataFrame(data={
            "name": self.dataset.name,
            "num_classes": [self.dataset.num_classes],
            "num_features": [self.dataset.num_features],
            "num_edge_features": [self.dataset.num_edge_features],
            "num_node_features": [self.dataset.num_node_features]
        })

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self,  index):
        return self.dataset.__getitem__(index)
