Datasets:
GEM
/

Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
none
Source Datasets:
original
ArXiv:
Tags:
data-to-text
License:
File size: 6,717 Bytes
a95be6a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import json
import datasets

_CITATION = """\
@inproceedings{nan-etal-2021-dart,
    title = "{DART}: Open-Domain Structured Data Record to Text Generation",
    author = "Nan, Linyong  and
      Radev, Dragomir  and
      Zhang, Rui  and
      Rau, Amrit  and
      Sivaprasad, Abhinand  and
      Hsieh, Chiachun  and
      Tang, Xiangru  and
      Vyas, Aadit  and
      Verma, Neha  and
      Krishna, Pranav  and
      Liu, Yangxiaokang  and
      Irwanto, Nadia  and
      Pan, Jessica  and
      Rahman, Faiaz  and
      Zaidi, Ahmad  and
      Mutuma, Mutethia  and
      Tarabar, Yasin  and
      Gupta, Ankit  and
      Yu, Tao  and
      Tan, Yi Chern  and
      Lin, Xi Victoria  and
      Xiong, Caiming  and
      Socher, Richard  and
      Rajani, Nazneen Fatema",
    booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
    month = jun,
    year = "2021",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.naacl-main.37",
    doi = "10.18653/v1/2021.naacl-main.37",
    pages = "432--447",
    abstract = "We present DART, an open domain structured DAta Record to Text generation dataset with over 82k instances (DARTs). Data-to-text annotations can be a costly process, especially when dealing with tables which are the major source of structured data and contain nontrivial structures. To this end, we propose a procedure of extracting semantic triples from tables that encodes their structures by exploiting the semantic dependencies among table headers and the table title. Our dataset construction framework effectively merged heterogeneous sources from open domain semantic parsing and spoken dialogue systems by utilizing techniques including tree ontology annotation, question-answer pair to declarative sentence conversion, and predicate unification, all with minimum post-editing. We present systematic evaluation on DART as well as new state-of-the-art results on WebNLG 2017 to show that DART (1) poses new challenges to existing data-to-text datasets and (2) facilitates out-of-domain generalization. Our data and code can be found at https://github.com/Yale-LILY/dart.",
}
"""

_DESCRIPTION = """\
DART is a large and open-domain structured DAta Record to Text generation corpus
with high-quality sentence annotations with each input being a set of
entity-relation triples following a tree-structured ontology. It consists of
82191 examples across different domains with each input being a semantic RDF
triple set derived from data records in tables and the tree ontology of table
schema, annotated with sentence description that covers all facts in the triple set.
"""

_URLs = {
    "train": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json",
    "validation": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-dev.json",
    "test": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-test.json",
}


class Dart(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    DEFAULT_CONFIG_NAME = "dart"

    def _info(self):
        features = datasets.Features(
            {
                "gem_id": datasets.Value("string"),
                "gem_parent_id": datasets.Value("string"),
                "dart_id": datasets.Value("int32"),
                "tripleset": [[datasets.Value("string")]],  # list of triples
                "subtree_was_extended": datasets.Value("bool"),
                "target_sources": [datasets.Value("string")],
                "target": datasets.Value("string"),  # single target for train
                "references": [datasets.Value("string")],
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=datasets.info.SupervisedKeysData(
                input="tripleset", output="target"
            ),
            homepage="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        dl_dir = dl_manager.download_and_extract(_URLs)
        return [
            datasets.SplitGenerator(
                name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl}
            )
            for spl in ["train", "validation", "test"]
        ]

    def _generate_examples(self, filepath, split, filepaths=None, lang=None):
        """Yields examples."""
        with open(filepath, encoding="utf-8") as f:
            data = json.loads(f.read())
            id_ = -1
            i = -1
            for example in data:
                if split == "train":
                    i += 1
                    for annotation in example["annotations"]:
                        id_ += 1
                        yield id_, {
                            "gem_id": f"dart-{split}-{id_}",
                            "gem_parent_id": f"dart-{split}-{id_}",
                            "dart_id": i,
                            "tripleset": example["tripleset"],
                            "subtree_was_extended": example.get(
                                "subtree_was_extended", None
                            ),  # some are missing
                            "target_sources": [
                                annotation["source"]
                                for annotation in example["annotations"]
                            ],
                            "target": annotation["text"],
                            "references": [],
                        }
                else:
                    id_ += 1
                    yield id_, {
                        "gem_id": f"dart-{split}-{id_}",
                        "gem_parent_id": f"dart-{split}-{id_}",
                        "dart_id": id_,
                        "tripleset": example["tripleset"],
                        "subtree_was_extended": example.get(
                            "subtree_was_extended", None
                        ),  # some are missing
                        "target_sources": [
                            annotation["source"]
                            for annotation in example["annotations"]
                        ],
                        "target": example["annotations"][0]["text"]
                        if len(example["annotations"]) > 0
                        else "",
                        "references": [
                            annotation["text"] for annotation in example["annotations"]
                        ],
                    }