citeseer / citeseer.py
Gaëtan Caillaut
update version
b6db9f8
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
from datasets import features
import pandas
import os
import datasets
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = ""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
The CiteSeer dataset consists of 3312 scientific publications classified into one of six classes. The citation network consists of 4732 links. Each publication in the dataset is described by a 0/1-valued word vector indicating the absence/presence of the corresponding word from the dictionary. The dictionary consists of 3703 unique words. The README file in the dataset provides more details.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "https://linqs.soe.ucsc.edu/data"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = {
"nodes": "https://linqs-data.soe.ucsc.edu/public/lbc/citeseer.tgz",
"edges": "https://linqs-data.soe.ucsc.edu/public/lbc/citeseer.tgz"
}
_CLASS_LABELS = [
"Agents",
"AI",
"DB",
"IR",
"ML",
"HCI"
]
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class CiteseerDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.1")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="nodes", version=VERSION,
description="The CiteSeer dataset"),
datasets.BuilderConfig(name="edges", version=VERSION,
description="The CiteSeer network")
]
# It's not mandatory to have a default configuration. Just use one if it make sense.
DEFAULT_CONFIG_NAME = "nodes"
def _info(self):
if self.config.name == "nodes":
word_features = [f"word{i}" for i in range(3703)]
features_dict = {
w: datasets.Value("bool")
for w in word_features
}
features_dict["node"] = datasets.Value("string")
features_dict["label"] = datasets.ClassLabel(names=_CLASS_LABELS)
features_dict["neighbors"] = datasets.Sequence(
datasets.Value("string")
)
features = datasets.Features(features_dict)
elif self.config.name == "edges": # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
"source": datasets.Value("string"),
"target": datasets.Value("string")
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
# Here we define them above because they are different between the two configurations
features=features,
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
my_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(my_urls)
data_dir = os.path.join(data_dir, "citeseer")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"edges_path": os.path.join(data_dir, "citeseer.cites"),
"nodes_path": os.path.join(data_dir, "citeseer.content"),
"split": "train"
}
)
]
def _generate_examples(
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
self, edges_path, nodes_path, split
):
""" Yields examples as (key, example) tuples. """
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is here for legacy reason (tfds) and is not important in itself.
if self.config.name == "nodes":
neighbors = {}
with open(edges_path, "rt", encoding="UTF-8") as f:
for line in f:
target, src = line.strip().split()
for n in (target, src):
if n not in neighbors:
neighbors[n] = []
neighbors[src].append(target)
colnames = ["node"] + \
[f"word{i}" for i in range(3703)] + ["label"]
dtypes = [str] + [bool] * 3703 + [str]
nodes = pandas.read_csv(
nodes_path,
sep="\t",
header=None,
names=colnames,
dtype=dict(zip(colnames, dtypes))
)
# Some nodes appears in the edgelist but not in the dataset, so we rm them
all_nodes = set(neighbors.keys())
for vv in neighbors.values():
all_nodes.update(set(vv))
nodes_to_rm = all_nodes.difference(nodes["node"])
for n in neighbors.keys():
neighbors[n] = list(set(neighbors[n]).difference(nodes_to_rm))
col2idx = {col: i for i, col in enumerate(list(nodes))}
for id, row in enumerate(nodes.itertuples(index=False, name=None)):
n = row[col2idx["node"]]
features = {
"node": n,
"label": row[col2idx["label"]],
"neighbors": neighbors[n]
}
for i in range(3703):
feature_name = f"word{i}"
features[feature_name] = row[col2idx[feature_name]]
yield id, features
elif self.config.name == "edges":
with open(edges_path, "rt", encoding="UTF-8") as f:
for id, line in enumerate(f):
target, src = line.strip().split()
yield id, {"source": src, "target": target}