Datasets:
GEM
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
dart / dart.py
Sebastian Gehrmann
.
22cdf1c
raw
history blame
6.72 kB
import json
import datasets
_CITATION = """\
@inproceedings{nan-etal-2021-dart,
title = "{DART}: Open-Domain Structured Data Record to Text Generation",
author = "Nan, Linyong and
Radev, Dragomir and
Zhang, Rui and
Rau, Amrit and
Sivaprasad, Abhinand and
Hsieh, Chiachun and
Tang, Xiangru and
Vyas, Aadit and
Verma, Neha and
Krishna, Pranav and
Liu, Yangxiaokang and
Irwanto, Nadia and
Pan, Jessica and
Rahman, Faiaz and
Zaidi, Ahmad and
Mutuma, Mutethia and
Tarabar, Yasin and
Gupta, Ankit and
Yu, Tao and
Tan, Yi Chern and
Lin, Xi Victoria and
Xiong, Caiming and
Socher, Richard and
Rajani, Nazneen Fatema",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naacl-main.37",
doi = "10.18653/v1/2021.naacl-main.37",
pages = "432--447",
abstract = "We present DART, an open domain structured DAta Record to Text generation dataset with over 82k instances (DARTs). Data-to-text annotations can be a costly process, especially when dealing with tables which are the major source of structured data and contain nontrivial structures. To this end, we propose a procedure of extracting semantic triples from tables that encodes their structures by exploiting the semantic dependencies among table headers and the table title. Our dataset construction framework effectively merged heterogeneous sources from open domain semantic parsing and spoken dialogue systems by utilizing techniques including tree ontology annotation, question-answer pair to declarative sentence conversion, and predicate unification, all with minimum post-editing. We present systematic evaluation on DART as well as new state-of-the-art results on WebNLG 2017 to show that DART (1) poses new challenges to existing data-to-text datasets and (2) facilitates out-of-domain generalization. Our data and code can be found at https://github.com/Yale-LILY/dart.",
}
"""
_DESCRIPTION = """\
DART is a large and open-domain structured DAta Record to Text generation corpus
with high-quality sentence annotations with each input being a set of
entity-relation triples following a tree-structured ontology. It consists of
82191 examples across different domains with each input being a semantic RDF
triple set derived from data records in tables and the tree ontology of table
schema, annotated with sentence description that covers all facts in the triple set.
"""
_URLs = {
"train": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json",
"validation": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-dev.json",
"test": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-test.json",
}
class Dart(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "dart"
def _info(self):
features = datasets.Features(
{
"gem_id": datasets.Value("string"),
"gem_parent_id": datasets.Value("string"),
"dart_id": datasets.Value("int32"),
"tripleset": [[datasets.Value("string")]], # list of triples
"subtree_was_extended": datasets.Value("bool"),
"target_sources": [datasets.Value("string")],
"target": datasets.Value("string"), # single target for train
"references": [datasets.Value("string")],
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=datasets.info.SupervisedKeysData(
input="tripleset", output="target"
),
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_dir = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl}
)
for spl in ["train", "validation", "test"]
]
def _generate_examples(self, filepath, split, filepaths=None, lang=None):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
data = json.loads(f.read())
id_ = -1
i = -1
for example in data:
if split == "train":
i += 1
for annotation in example["annotations"]:
id_ += 1
yield id_, {
"gem_id": f"dart-{split}-{id_}",
"gem_parent_id": f"dart-{split}-{id_}",
"dart_id": i,
"tripleset": example["tripleset"],
"subtree_was_extended": example.get(
"subtree_was_extended", None
), # some are missing
"target_sources": [
annotation["source"]
for annotation in example["annotations"]
],
"target": annotation["text"],
"references": [],
}
else:
id_ += 1
yield id_, {
"gem_id": f"dart-{split}-{id_}",
"gem_parent_id": f"dart-{split}-{id_}",
"dart_id": id_,
"tripleset": example["tripleset"],
"subtree_was_extended": example.get(
"subtree_was_extended", None
), # some are missing
"target_sources": [
annotation["source"]
for annotation in example["annotations"]
],
"target": example["annotations"][0]["text"]
if len(example["annotations"]) > 0
else "",
"references": [
annotation["text"] for annotation in example["annotations"]
],
}