Sebastian Gehrmann commited on
Commit
a95be6a
1 Parent(s): b5662c7
Files changed (2) hide show
  1. dart.py +144 -0
  2. dataset_infos.json +107 -0
dart.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @inproceedings{nan-etal-2021-dart,
8
+ title = "{DART}: Open-Domain Structured Data Record to Text Generation",
9
+ author = "Nan, Linyong and
10
+ Radev, Dragomir and
11
+ Zhang, Rui and
12
+ Rau, Amrit and
13
+ Sivaprasad, Abhinand and
14
+ Hsieh, Chiachun and
15
+ Tang, Xiangru and
16
+ Vyas, Aadit and
17
+ Verma, Neha and
18
+ Krishna, Pranav and
19
+ Liu, Yangxiaokang and
20
+ Irwanto, Nadia and
21
+ Pan, Jessica and
22
+ Rahman, Faiaz and
23
+ Zaidi, Ahmad and
24
+ Mutuma, Mutethia and
25
+ Tarabar, Yasin and
26
+ Gupta, Ankit and
27
+ Yu, Tao and
28
+ Tan, Yi Chern and
29
+ Lin, Xi Victoria and
30
+ Xiong, Caiming and
31
+ Socher, Richard and
32
+ Rajani, Nazneen Fatema",
33
+ booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
34
+ month = jun,
35
+ year = "2021",
36
+ address = "Online",
37
+ publisher = "Association for Computational Linguistics",
38
+ url = "https://aclanthology.org/2021.naacl-main.37",
39
+ doi = "10.18653/v1/2021.naacl-main.37",
40
+ pages = "432--447",
41
+ abstract = "We present DART, an open domain structured DAta Record to Text generation dataset with over 82k instances (DARTs). Data-to-text annotations can be a costly process, especially when dealing with tables which are the major source of structured data and contain nontrivial structures. To this end, we propose a procedure of extracting semantic triples from tables that encodes their structures by exploiting the semantic dependencies among table headers and the table title. Our dataset construction framework effectively merged heterogeneous sources from open domain semantic parsing and spoken dialogue systems by utilizing techniques including tree ontology annotation, question-answer pair to declarative sentence conversion, and predicate unification, all with minimum post-editing. We present systematic evaluation on DART as well as new state-of-the-art results on WebNLG 2017 to show that DART (1) poses new challenges to existing data-to-text datasets and (2) facilitates out-of-domain generalization. Our data and code can be found at https://github.com/Yale-LILY/dart.",
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """\
46
+ DART is a large and open-domain structured DAta Record to Text generation corpus
47
+ with high-quality sentence annotations with each input being a set of
48
+ entity-relation triples following a tree-structured ontology. It consists of
49
+ 82191 examples across different domains with each input being a semantic RDF
50
+ triple set derived from data records in tables and the tree ontology of table
51
+ schema, annotated with sentence description that covers all facts in the triple set.
52
+ """
53
+
54
+ _URLs = {
55
+ "train": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json",
56
+ "validation": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-dev.json",
57
+ "test": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-test.json",
58
+ }
59
+
60
+
61
+ class Dart(datasets.GeneratorBasedBuilder):
62
+ VERSION = datasets.Version("1.0.0")
63
+ DEFAULT_CONFIG_NAME = "dart"
64
+
65
+ def _info(self):
66
+ features = datasets.Features(
67
+ {
68
+ "gem_id": datasets.Value("string"),
69
+ "gem_parent_id": datasets.Value("string"),
70
+ "dart_id": datasets.Value("int32"),
71
+ "tripleset": [[datasets.Value("string")]], # list of triples
72
+ "subtree_was_extended": datasets.Value("bool"),
73
+ "target_sources": [datasets.Value("string")],
74
+ "target": datasets.Value("string"), # single target for train
75
+ "references": [datasets.Value("string")],
76
+ }
77
+ )
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=features,
81
+ supervised_keys=datasets.info.SupervisedKeysData(
82
+ input="tripleset", output="target"
83
+ ),
84
+ homepage="",
85
+ citation=_CITATION,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ """Returns SplitGenerators."""
90
+ dl_dir = dl_manager.download_and_extract(_URLs)
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl}
94
+ )
95
+ for spl in ["train", "validation", "test"]
96
+ ]
97
+
98
+ def _generate_examples(self, filepath, split, filepaths=None, lang=None):
99
+ """Yields examples."""
100
+ with open(filepath, encoding="utf-8") as f:
101
+ data = json.loads(f.read())
102
+ id_ = -1
103
+ i = -1
104
+ for example in data:
105
+ if split == "train":
106
+ i += 1
107
+ for annotation in example["annotations"]:
108
+ id_ += 1
109
+ yield id_, {
110
+ "gem_id": f"dart-{split}-{id_}",
111
+ "gem_parent_id": f"dart-{split}-{id_}",
112
+ "dart_id": i,
113
+ "tripleset": example["tripleset"],
114
+ "subtree_was_extended": example.get(
115
+ "subtree_was_extended", None
116
+ ), # some are missing
117
+ "target_sources": [
118
+ annotation["source"]
119
+ for annotation in example["annotations"]
120
+ ],
121
+ "target": annotation["text"],
122
+ "references": [],
123
+ }
124
+ else:
125
+ id_ += 1
126
+ yield id_, {
127
+ "gem_id": f"dart-{split}-{id_}",
128
+ "gem_parent_id": f"dart-{split}-{id_}",
129
+ "dart_id": id_,
130
+ "tripleset": example["tripleset"],
131
+ "subtree_was_extended": example.get(
132
+ "subtree_was_extended", None
133
+ ), # some are missing
134
+ "target_sources": [
135
+ annotation["source"]
136
+ for annotation in example["annotations"]
137
+ ],
138
+ "target": example["annotations"][0]["text"]
139
+ if len(example["annotations"]) > 0
140
+ else "",
141
+ "references": [
142
+ annotation["text"] for annotation in example["annotations"]
143
+ ],
144
+ }
dataset_infos.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dart": {
3
+ "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
4
+ "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
5
+ "homepage": "https://gem-benchmark.github.io/",
6
+ "license": "CC-BY-SA-4.0",
7
+ "features": {
8
+ "gem_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "gem_parent_id": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "dart_id": {
19
+ "dtype": "int32",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "tripleset": [
24
+ [
25
+ {
26
+ "dtype": "string",
27
+ "id": null,
28
+ "_type": "Value"
29
+ }
30
+ ]
31
+ ],
32
+ "subtree_was_extended": {
33
+ "dtype": "bool",
34
+ "id": null,
35
+ "_type": "Value"
36
+ },
37
+ "target_sources": [
38
+ {
39
+ "dtype": "string",
40
+ "id": null,
41
+ "_type": "Value"
42
+ }
43
+ ],
44
+ "target": {
45
+ "dtype": "string",
46
+ "id": null,
47
+ "_type": "Value"
48
+ },
49
+ "references": [
50
+ {
51
+ "dtype": "string",
52
+ "id": null,
53
+ "_type": "Value"
54
+ }
55
+ ]
56
+ },
57
+ "post_processed": null,
58
+ "supervised_keys": null,
59
+ "builder_name": "gem",
60
+ "config_name": "dart",
61
+ "version": {
62
+ "version_str": "1.1.0",
63
+ "description": null,
64
+ "major": 1,
65
+ "minor": 1,
66
+ "patch": 0
67
+ },
68
+ "splits": {
69
+ "train": {
70
+ "name": "train",
71
+ "num_bytes": 23047610,
72
+ "num_examples": 62659,
73
+ "dataset_name": "gem"
74
+ },
75
+ "validation": {
76
+ "name": "validation",
77
+ "num_bytes": 1934054,
78
+ "num_examples": 2768,
79
+ "dataset_name": "gem"
80
+ },
81
+ "test": {
82
+ "name": "test",
83
+ "num_bytes": 3476953,
84
+ "num_examples": 5097,
85
+ "dataset_name": "gem"
86
+ }
87
+ },
88
+ "download_checksums": {
89
+ "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json": {
90
+ "num_bytes": 22969160,
91
+ "checksum": "92c8594979c05f508f5739047079ec2ffe5a244e58bfa2b50a9cb8b9c65f5a2b"
92
+ },
93
+ "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-dev.json": {
94
+ "num_bytes": 2468789,
95
+ "checksum": "56606eac12baa7f0ddb81c61890f9f1a95bace4df8f8989852786358fe5d2b88"
96
+ },
97
+ "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-test.json": {
98
+ "num_bytes": 4501417,
99
+ "checksum": "984be50fa46d0dbfce1ecfdad4a5c5a5cf82f1be0b124fe94f9f9b175d2a5045"
100
+ }
101
+ },
102
+ "download_size": 29939366,
103
+ "post_processing_size": null,
104
+ "dataset_size": 28458617,
105
+ "size_in_bytes": 58397983
106
+ }
107
+ }