Datasets:
GEM
/

Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
unknown
Source Datasets:
original
Tags:
data-to-text
License:
Sebastian Gehrmann commited on
Commit
6d41ecf
1 Parent(s): 0708081

initial data loader

Browse files
Files changed (2) hide show
  1. dataset_infos.json +244 -0
  2. web_nlg.py +165 -0
dataset_infos.json ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "en": {
3
+ "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
4
+ "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
5
+ "homepage": "https://gem-benchmark.github.io/",
6
+ "license": "CC-BY-SA-4.0",
7
+ "features": {
8
+ "gem_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "gem_parent_id": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "input": [
19
+ {
20
+ "dtype": "string",
21
+ "id": null,
22
+ "_type": "Value"
23
+ }
24
+ ],
25
+ "target": {
26
+ "dtype": "string",
27
+ "id": null,
28
+ "_type": "Value"
29
+ },
30
+ "references": [
31
+ {
32
+ "dtype": "string",
33
+ "id": null,
34
+ "_type": "Value"
35
+ }
36
+ ],
37
+ "category": {
38
+ "dtype": "string",
39
+ "id": null,
40
+ "_type": "Value"
41
+ },
42
+ "webnlg_id": {
43
+ "dtype": "string",
44
+ "id": null,
45
+ "_type": "Value"
46
+ }
47
+ },
48
+ "post_processed": null,
49
+ "supervised_keys": null,
50
+ "builder_name": "gem",
51
+ "config_name": "en",
52
+ "version": {
53
+ "version_str": "1.1.0",
54
+ "description": null,
55
+ "major": 1,
56
+ "minor": 1,
57
+ "patch": 0
58
+ },
59
+ "splits": {
60
+ "train": {
61
+ "name": "train",
62
+ "num_bytes": 13067615,
63
+ "num_examples": 35426,
64
+ "dataset_name": "gem"
65
+ },
66
+ "validation": {
67
+ "name": "validation",
68
+ "num_bytes": 1153995,
69
+ "num_examples": 1667,
70
+ "dataset_name": "gem"
71
+ },
72
+ "test": {
73
+ "name": "test",
74
+ "num_bytes": 1403601,
75
+ "num_examples": 1779,
76
+ "dataset_name": "gem"
77
+ },
78
+ "challenge_train_sample": {
79
+ "name": "challenge_train_sample",
80
+ "num_bytes": 193198,
81
+ "num_examples": 502,
82
+ "dataset_name": "gem"
83
+ },
84
+ "challenge_validation_sample": {
85
+ "name": "challenge_validation_sample",
86
+ "num_bytes": 359868,
87
+ "num_examples": 499,
88
+ "dataset_name": "gem"
89
+ },
90
+ "challenge_test_scramble": {
91
+ "name": "challenge_test_scramble",
92
+ "num_bytes": 402407,
93
+ "num_examples": 500,
94
+ "dataset_name": "gem"
95
+ },
96
+ "challenge_test_numbers": {
97
+ "name": "challenge_test_numbers",
98
+ "num_bytes": 409213,
99
+ "num_examples": 500,
100
+ "dataset_name": "gem"
101
+ }
102
+ },
103
+ "download_checksums": {
104
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_train.json": {
105
+ "num_bytes": 10135450,
106
+ "checksum": "959646a986465c436362dfc44bb4966d5a2d39f2725b39fe32701981daf666d0"
107
+ },
108
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_val.json": {
109
+ "num_bytes": 1273018,
110
+ "checksum": "8214bf87ff0369e505ba5c11cdbbaa1127f7908ad77a75a2f1d1a76730c3a954"
111
+ },
112
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_test.json": {
113
+ "num_bytes": 1537460,
114
+ "checksum": "68a4a919a9b805e17959a52f7d5c14a6083bba1459645b4189824fca468e362d"
115
+ },
116
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/web_nlg_en.zip": {
117
+ "num_bytes": 236041,
118
+ "checksum": "42740fc1010cbc490a023b5ec13c55b95acc848b7b459a9a586242b444b1ba40"
119
+ }
120
+ },
121
+ "download_size": 13181969,
122
+ "post_processing_size": null,
123
+ "dataset_size": 16989897,
124
+ "size_in_bytes": 30171866
125
+ },
126
+ "ru": {
127
+ "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
128
+ "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
129
+ "homepage": "https://gem-benchmark.github.io/",
130
+ "license": "CC-BY-SA-4.0",
131
+ "features": {
132
+ "gem_id": {
133
+ "dtype": "string",
134
+ "id": null,
135
+ "_type": "Value"
136
+ },
137
+ "gem_parent_id": {
138
+ "dtype": "string",
139
+ "id": null,
140
+ "_type": "Value"
141
+ },
142
+ "input": [
143
+ {
144
+ "dtype": "string",
145
+ "id": null,
146
+ "_type": "Value"
147
+ }
148
+ ],
149
+ "target": {
150
+ "dtype": "string",
151
+ "id": null,
152
+ "_type": "Value"
153
+ },
154
+ "references": [
155
+ {
156
+ "dtype": "string",
157
+ "id": null,
158
+ "_type": "Value"
159
+ }
160
+ ],
161
+ "category": {
162
+ "dtype": "string",
163
+ "id": null,
164
+ "_type": "Value"
165
+ },
166
+ "webnlg_id": {
167
+ "dtype": "string",
168
+ "id": null,
169
+ "_type": "Value"
170
+ }
171
+ },
172
+ "post_processed": null,
173
+ "supervised_keys": null,
174
+ "builder_name": "gem",
175
+ "config_name": "ru",
176
+ "version": {
177
+ "version_str": "1.1.0",
178
+ "description": null,
179
+ "major": 1,
180
+ "minor": 1,
181
+ "patch": 0
182
+ },
183
+ "splits": {
184
+ "train": {
185
+ "name": "train",
186
+ "num_bytes": 6888009,
187
+ "num_examples": 14630,
188
+ "dataset_name": "gem"
189
+ },
190
+ "validation": {
191
+ "name": "validation",
192
+ "num_bytes": 795998,
193
+ "num_examples": 790,
194
+ "dataset_name": "gem"
195
+ },
196
+ "test": {
197
+ "name": "test",
198
+ "num_bytes": 1145282,
199
+ "num_examples": 1102,
200
+ "dataset_name": "gem"
201
+ },
202
+ "challenge_train_sample": {
203
+ "name": "challenge_train_sample",
204
+ "num_bytes": 247089,
205
+ "num_examples": 501,
206
+ "dataset_name": "gem"
207
+ },
208
+ "challenge_validation_sample": {
209
+ "name": "challenge_validation_sample",
210
+ "num_bytes": 514117,
211
+ "num_examples": 500,
212
+ "dataset_name": "gem"
213
+ },
214
+ "challenge_test_scramble": {
215
+ "name": "challenge_test_scramble",
216
+ "num_bytes": 521625,
217
+ "num_examples": 500,
218
+ "dataset_name": "gem"
219
+ }
220
+ },
221
+ "download_checksums": {
222
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_train.json": {
223
+ "num_bytes": 5724246,
224
+ "checksum": "bfaa20bd792a34fda25cff766fbabaf12c56c60b898865a2f976cfaad9c04d2e"
225
+ },
226
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_val.json": {
227
+ "num_bytes": 783342,
228
+ "checksum": "ac2e74d8618196ccf44be695dbdf4960e1f15dc9a39ebd754a808e793327aafd"
229
+ },
230
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_test.json": {
231
+ "num_bytes": 1123674,
232
+ "checksum": "24f4282eb6aa8dc424b6b676e1531a730b508e999b2c55d52215e72e4c7ec524"
233
+ },
234
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/web_nlg_ru.zip": {
235
+ "num_bytes": 223583,
236
+ "checksum": "3d8c6db0a5f941fe897674fd404352fc31b98d50d9492e7cf7f8aed61c69cc21"
237
+ }
238
+ },
239
+ "download_size": 7854845,
240
+ "post_processing_size": null,
241
+ "dataset_size": 10112120,
242
+ "size_in_bytes": 17966965
243
+ }
244
+ }
web_nlg.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import datasets
4
+
5
+ _CITATION = """\
6
+ @inproceedings{castro-ferreira20:bilin-bi-direc-webnl-shared,
7
+ title={The 2020 Bilingual, Bi-Directional WebNLG+ Shared Task Overview and Evaluation Results (WebNLG+ 2020)},
8
+ author={Castro Ferreira, Thiago and
9
+ Gardent, Claire and
10
+ Ilinykh, Nikolai and
11
+ van der Lee, Chris and
12
+ Mille, Simon and
13
+ Moussallem, Diego and
14
+ Shimorina, Anastasia},
15
+ booktitle = {Proceedings of the 3rd WebNLG Workshop on Natural Language Generation from the Semantic Web (WebNLG+ 2020)},
16
+ pages = "55--76",
17
+ year = 2020,
18
+ address = {Dublin, Ireland (Virtual)},
19
+ publisher = {Association for Computational Linguistics}}
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+ WebNLG is a bi-lingual dataset (English, Russian) of parallel DBpedia triple sets
24
+ and short texts that cover about 450 different DBpedia properties. The WebNLG data
25
+ was originally created to promote the development of RDF verbalisers able to
26
+ generate short text and to handle micro-planning (i.e., sentence segmentation and
27
+ ordering, referring expression generation, aggregation); the goal of the task is
28
+ to generate texts starting from 1 to 7 input triples which have entities in common
29
+ (so the input is actually a connected Knowledge Graph). The dataset contains about
30
+ 17,000 triple sets and 45,000 crowdsourced texts in English, and 7,000 triples sets
31
+ and 19,000 crowdsourced texts in Russian. A challenging test set section with
32
+ entities and/or properties that have not been seen at training time is available.
33
+ """
34
+
35
+ _LANG = ["en", "ru"]
36
+ _URLs = {
37
+ "en": {
38
+ "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_train.json",
39
+ "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_val.json",
40
+ "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_test.json",
41
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/web_nlg_en.zip",
42
+ },
43
+ "ru": {
44
+ "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_train.json",
45
+ "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_val.json",
46
+ "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_test.json",
47
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/web_nlg_ru.zip",
48
+ },
49
+ }
50
+
51
+
52
+ class WebNLG(datasets.GeneratorBasedBuilder):
53
+ BUILDER_CONFIGS = [
54
+ datasets.BuilderConfig(
55
+ name=lang,
56
+ version=datasets.Version("1.0.0"),
57
+ description="",
58
+ )
59
+ for lang in _LANG
60
+ ]
61
+
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=datasets.Features(
66
+ {
67
+ "gem_id": datasets.Value("string"),
68
+ "gem_parent_id": datasets.Value("string"),
69
+ "input": [datasets.Value("string")],
70
+ "target": datasets.Value("string"), # single target for train
71
+ "references": [datasets.Value("string")],
72
+ "category": datasets.Value("string"),
73
+ "webnlg_id": datasets.Value("string"),
74
+ }
75
+ ),
76
+ supervised_keys=None,
77
+ homepage="https://webnlg-challenge.loria.fr/challenge_2020/",
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ """Returns SplitGenerators."""
83
+ dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
84
+ lang = str(self.config.name)
85
+ challenge_sets = [
86
+ ("challenge_train_sample", f"train_web_nlg_{lang}_RandomSample500.json"),
87
+ (
88
+ "challenge_validation_sample",
89
+ f"validation_web_nlg_{lang}_RandomSample500.json",
90
+ ),
91
+ (
92
+ "challenge_test_scramble",
93
+ f"test_web_nlg_{lang}_ScrambleInputStructure500.json",
94
+ ),
95
+ ]
96
+ if lang == "en":
97
+ challenge_sets += [
98
+ (
99
+ "challenge_test_numbers",
100
+ f"test_web_nlg_{lang}_replace_numbers_500.json",
101
+ )
102
+ ]
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl}
106
+ )
107
+ for spl in ["train", "validation", "test"]
108
+ ] + [
109
+ datasets.SplitGenerator(
110
+ name=challenge_split,
111
+ gen_kwargs={
112
+ "filepath": os.path.join(
113
+ dl_dir["challenge_set"], self.config.name, filename
114
+ ),
115
+ "split": challenge_split,
116
+ },
117
+ )
118
+ for challenge_split, filename in challenge_sets
119
+ ]
120
+
121
+ def _generate_examples(self, filepath, split, filepaths=None, lang=None):
122
+ """Yields examples."""
123
+ if "challenge" in split:
124
+ exples = json.load(open(filepath, encoding="utf-8"))
125
+ if isinstance(exples, dict):
126
+ assert len(exples) == 1, "multiple entries found"
127
+ exples = list(exples.values())[0]
128
+ for id_, exple in enumerate(exples):
129
+ if len(exple) == 0:
130
+ continue
131
+ exple["gem_parent_id"] = exple["gem_id"]
132
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
133
+ yield id_, exple
134
+ else:
135
+ with open(filepath, encoding="utf-8") as f:
136
+ examples = json.load(f)
137
+ id_ = -1
138
+ for example in examples["values"]:
139
+ if split == "train":
140
+ for target in example["target"]:
141
+ id_ += 1
142
+ yield id_, {
143
+ "gem_id": f"{self.config.name}-{split}-{id_}",
144
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
145
+ "input": example["input"],
146
+ "target": target,
147
+ "references": []
148
+ if split == "train"
149
+ else example["target"],
150
+ "category": example["category"],
151
+ "webnlg_id": example["webnlg-id"],
152
+ }
153
+ else:
154
+ id_ += 1
155
+ yield id_, {
156
+ "gem_id": f"{self.config.name}-{split}-{id_}",
157
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
158
+ "input": example["input"],
159
+ "target": example["target"][0]
160
+ if len(example["target"]) > 0
161
+ else "",
162
+ "references": example["target"],
163
+ "category": example["category"],
164
+ "webnlg_id": example["webnlg-id"],
165
+ }