Datasets:
GEM
/

Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
none
Source Datasets:
original
Tags:
data-to-text
License:
Files changed (2) hide show
  1. dataset_infos.json +220 -106
  2. e2e_nlg.py +22 -4
dataset_infos.json CHANGED
@@ -1,108 +1,222 @@
1
  {
2
- "e2e_nlg": {
3
- "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
4
- "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
5
- "homepage": "https://gem-benchmark.github.io/",
6
- "license": "CC-BY-SA-4.0",
7
- "features": {
8
- "gem_id": {
9
- "dtype": "string",
10
- "id": null,
11
- "_type": "Value"
12
- },
13
- "gem_parent_id": {
14
- "dtype": "string",
15
- "id": null,
16
- "_type": "Value"
17
- },
18
- "meaning_representation": {
19
- "dtype": "string",
20
- "id": null,
21
- "_type": "Value"
22
- },
23
- "target": {
24
- "dtype": "string",
25
- "id": null,
26
- "_type": "Value"
27
- },
28
- "references": [
29
- {
30
- "dtype": "string",
31
- "id": null,
32
- "_type": "Value"
33
- }
34
- ]
35
- },
36
- "post_processed": null,
37
- "supervised_keys": null,
38
- "builder_name": "gem",
39
- "config_name": "e2e_nlg",
40
- "version": {
41
- "version_str": "1.1.0",
42
- "description": null,
43
- "major": 1,
44
- "minor": 1,
45
- "patch": 0
46
- },
47
- "splits": {
48
- "train": {
49
- "name": "train",
50
- "num_bytes": 9129030,
51
- "num_examples": 33525,
52
- "dataset_name": "gem"
53
- },
54
- "validation": {
55
- "name": "validation",
56
- "num_bytes": 1856097,
57
- "num_examples": 4299,
58
- "dataset_name": "gem"
59
- },
60
- "test": {
61
- "name": "test",
62
- "num_bytes": 2133695,
63
- "num_examples": 4693,
64
- "dataset_name": "gem"
65
- },
66
- "challenge_train_sample": {
67
- "name": "challenge_train_sample",
68
- "num_bytes": 145319,
69
- "num_examples": 500,
70
- "dataset_name": "gem"
71
- },
72
- "challenge_validation_sample": {
73
- "name": "challenge_validation_sample",
74
- "num_bytes": 226525,
75
- "num_examples": 500,
76
- "dataset_name": "gem"
77
- },
78
- "challenge_test_scramble": {
79
- "name": "challenge_test_scramble",
80
- "num_bytes": 236199,
81
- "num_examples": 500,
82
- "dataset_name": "gem"
83
- }
84
- },
85
- "download_checksums": {
86
- "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv": {
87
- "num_bytes": 11100744,
88
- "checksum": "12a4f59ec85ddd2586244aaf166f65d1b8cd468b6227e6620108baf118d5b325"
89
- },
90
- "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/devel-fixed.no-ol.csv": {
91
- "num_bytes": 1581285,
92
- "checksum": "bb88df2565826a463f96e93a5ab69a8c6460de54f2e68179eb94f0019f430d4d"
93
- },
94
- "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/test-fixed.csv": {
95
- "num_bytes": 1915378,
96
- "checksum": "99b43c2769a09d62fc5d37dcffaa59d4092bcffdc611f226258681df61269b17"
97
- },
98
- "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/e2e_nlg.zip": {
99
- "num_bytes": 70641,
100
- "checksum": "5d9db67219c984f778dda42e718bc8199945bde609f0b313153de2894e33a883"
101
- }
102
- },
103
- "download_size": 14668048,
104
- "post_processing_size": null,
105
- "dataset_size": 13726865,
106
- "size_in_bytes": 28394913
107
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  }
 
1
  {
2
+ "e2e_nlg": {
3
+ "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
4
+ "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
5
+ "homepage": "https://gem-benchmark.github.io/",
6
+ "license": "CC-BY-SA-4.0",
7
+ "features": {
8
+ "gem_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "gem_parent_id": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "meaning_representation": {
19
+ "dtype": "string",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "target": {
24
+ "dtype": "string",
25
+ "id": null,
26
+ "_type": "Value"
27
+ },
28
+ "references": [
29
+ {
30
+ "dtype": "string",
31
+ "id": null,
32
+ "_type": "Value"
33
+ }
34
+ ]
35
+ },
36
+ "post_processed": null,
37
+ "supervised_keys": {
38
+ "input": "meaning_representation",
39
+ "output": "target"
40
+ },
41
+ "task_templates": null,
42
+ "builder_name": "new_e2e",
43
+ "config_name": "default",
44
+ "version": {
45
+ "version_str": "1.0.1",
46
+ "description": null,
47
+ "major": 1,
48
+ "minor": 0,
49
+ "patch": 1
50
+ },
51
+ "splits": {
52
+ "train": {
53
+ "name": "train",
54
+ "num_bytes": 9128934,
55
+ "num_examples": 33525,
56
+ "dataset_name": "new_e2e"
57
+ },
58
+ "validation": {
59
+ "name": "validation",
60
+ "num_bytes": 1373692,
61
+ "num_examples": 1484,
62
+ "dataset_name": "new_e2e"
63
+ },
64
+ "test": {
65
+ "name": "test",
66
+ "num_bytes": 1642884,
67
+ "num_examples": 1847,
68
+ "dataset_name": "new_e2e"
69
+ },
70
+ "challenge_train_sample": {
71
+ "name": "challenge_train_sample",
72
+ "num_bytes": 145295,
73
+ "num_examples": 500,
74
+ "dataset_name": "new_e2e"
75
+ },
76
+ "challenge_validation_sample": {
77
+ "name": "challenge_validation_sample",
78
+ "num_bytes": 226501,
79
+ "num_examples": 500,
80
+ "dataset_name": "new_e2e"
81
+ },
82
+ "challenge_test_scramble": {
83
+ "name": "challenge_test_scramble",
84
+ "num_bytes": 236175,
85
+ "num_examples": 500,
86
+ "dataset_name": "new_e2e"
87
+ }
88
+ },
89
+ "download_checksums": {
90
+ "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv": {
91
+ "num_bytes": 11100744,
92
+ "checksum": "12a4f59ec85ddd2586244aaf166f65d1b8cd468b6227e6620108baf118d5b325"
93
+ },
94
+ "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/validation.json": {
95
+ "num_bytes": 880752,
96
+ "checksum": "92206cb272e2cd2a146b0e9255d04f596c8864a328303b09f819c4bba926981d"
97
+ },
98
+ "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/test.json": {
99
+ "num_bytes": 1081450,
100
+ "checksum": "e52a3cfc76fced9546c8362eb7de4c65dc64c2b935b496916c7ddfa1170b9aaa"
101
+ },
102
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/e2e_nlg.zip": {
103
+ "num_bytes": 70641,
104
+ "checksum": "5d9db67219c984f778dda42e718bc8199945bde609f0b313153de2894e33a883"
105
+ }
106
+ },
107
+ "download_size": 13133587,
108
+ "post_processing_size": null,
109
+ "dataset_size": 12753481,
110
+ "size_in_bytes": 25887068
111
+ },
112
+ "default": {
113
+ "description": "The E2E dataset is designed for a limited-domain data-to-text task --\ngeneration of restaurant descriptions/recommendations based on up to 8 different\nattributes (name, area, price range etc.).\n",
114
+ "citation": "@inproceedings{e2e_cleaned,\n\taddress = {Tokyo, Japan},\n\ttitle = {Semantic {Noise} {Matters} for {Neural} {Natural} {Language} {Generation}},\n\turl = {https://www.aclweb.org/anthology/W19-8652/},\n\tbooktitle = {Proceedings of the 12th {International} {Conference} on {Natural} {Language} {Generation} ({INLG} 2019)},\n\tauthor = {Du\u0161ek, Ond\u0159ej and Howcroft, David M and Rieser, Verena},\n\tyear = {2019},\n\tpages = {421--426},\n}\n",
115
+ "homepage": "http://www.macs.hw.ac.uk/InteractionLab/E2E/",
116
+ "license": "",
117
+ "features": {
118
+ "gem_id": {
119
+ "dtype": "string",
120
+ "id": null,
121
+ "_type": "Value"
122
+ },
123
+ "gem_parent_id": {
124
+ "dtype": "string",
125
+ "id": null,
126
+ "_type": "Value"
127
+ },
128
+ "meaning_representation": {
129
+ "dtype": "string",
130
+ "id": null,
131
+ "_type": "Value"
132
+ },
133
+ "target": {
134
+ "dtype": "string",
135
+ "id": null,
136
+ "_type": "Value"
137
+ },
138
+ "references": [
139
+ {
140
+ "dtype": "string",
141
+ "id": null,
142
+ "_type": "Value"
143
+ }
144
+ ]
145
+ },
146
+ "post_processed": null,
147
+ "supervised_keys": {
148
+ "input": "meaning_representation",
149
+ "output": "target"
150
+ },
151
+ "task_templates": null,
152
+ "builder_name": "new_e2e",
153
+ "config_name": "default",
154
+ "version": {
155
+ "version_str": "1.0.1",
156
+ "description": null,
157
+ "major": 1,
158
+ "minor": 0,
159
+ "patch": 1
160
+ },
161
+ "splits": {
162
+ "train": {
163
+ "name": "train",
164
+ "num_bytes": 9128934,
165
+ "num_examples": 33525,
166
+ "dataset_name": "new_e2e"
167
+ },
168
+ "validation": {
169
+ "name": "validation",
170
+ "num_bytes": 1013046,
171
+ "num_examples": 1484,
172
+ "dataset_name": "new_e2e"
173
+ },
174
+ "test": {
175
+ "name": "test",
176
+ "num_bytes": 1241649,
177
+ "num_examples": 1847,
178
+ "dataset_name": "new_e2e"
179
+ },
180
+ "challenge_train_sample": {
181
+ "name": "challenge_train_sample",
182
+ "num_bytes": 145295,
183
+ "num_examples": 500,
184
+ "dataset_name": "new_e2e"
185
+ },
186
+ "challenge_validation_sample": {
187
+ "name": "challenge_validation_sample",
188
+ "num_bytes": 226501,
189
+ "num_examples": 500,
190
+ "dataset_name": "new_e2e"
191
+ },
192
+ "challenge_test_scramble": {
193
+ "name": "challenge_test_scramble",
194
+ "num_bytes": 236175,
195
+ "num_examples": 500,
196
+ "dataset_name": "new_e2e"
197
+ }
198
+ },
199
+ "download_checksums": {
200
+ "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv": {
201
+ "num_bytes": 11100744,
202
+ "checksum": "12a4f59ec85ddd2586244aaf166f65d1b8cd468b6227e6620108baf118d5b325"
203
+ },
204
+ "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/validation.json": {
205
+ "num_bytes": 880752,
206
+ "checksum": "92206cb272e2cd2a146b0e9255d04f596c8864a328303b09f819c4bba926981d"
207
+ },
208
+ "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/test.json": {
209
+ "num_bytes": 1081450,
210
+ "checksum": "e52a3cfc76fced9546c8362eb7de4c65dc64c2b935b496916c7ddfa1170b9aaa"
211
+ },
212
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/e2e_nlg.zip": {
213
+ "num_bytes": 70641,
214
+ "checksum": "5d9db67219c984f778dda42e718bc8199945bde609f0b313153de2894e33a883"
215
+ }
216
+ },
217
+ "download_size": 13133587,
218
+ "post_processing_size": null,
219
+ "dataset_size": 11991600,
220
+ "size_in_bytes": 25125187
221
+ }
222
  }
e2e_nlg.py CHANGED
@@ -23,14 +23,14 @@ attributes (name, area, price range etc.).
23
 
24
  _URLs = {
25
  "train": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv",
26
- "validation": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/devel-fixed.no-ol.csv",
27
- "test": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/test-fixed.csv",
28
  "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/e2e_nlg.zip",
29
  }
30
 
31
 
32
  class E2ENlg(datasets.GeneratorBasedBuilder):
33
- VERSION = datasets.Version("1.0.0")
34
  DEFAULT_CONFIG_NAME = "e2e_nlg"
35
 
36
  def _info(self):
@@ -92,6 +92,21 @@ class E2ENlg(datasets.GeneratorBasedBuilder):
92
  exple["gem_parent_id"] = exple["gem_id"]
93
  exple["gem_id"] = f"e2e_nlg-{split}-{id_}"
94
  yield id_, exple
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  else:
96
  with open(filepath, encoding="utf-8") as f:
97
  reader = csv.DictReader(f)
@@ -101,5 +116,8 @@ class E2ENlg(datasets.GeneratorBasedBuilder):
101
  "gem_parent_id": f"e2e_nlg-{split}-{id_}",
102
  "meaning_representation": example["mr"],
103
  "target": example["ref"],
104
- "references": [] if split == "train" else [example["ref"]],
105
  }
 
 
 
 
23
 
24
  _URLs = {
25
  "train": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv",
26
+ "validation": "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/validation.json",
27
+ "test": "https://raw.githubusercontent.com/jordiclive/GEM_datasets/main/e2e/test.json",
28
  "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/e2e_nlg.zip",
29
  }
30
 
31
 
32
  class E2ENlg(datasets.GeneratorBasedBuilder):
33
+ VERSION = datasets.Version("1.0.1")
34
  DEFAULT_CONFIG_NAME = "e2e_nlg"
35
 
36
  def _info(self):
 
92
  exple["gem_parent_id"] = exple["gem_id"]
93
  exple["gem_id"] = f"e2e_nlg-{split}-{id_}"
94
  yield id_, exple
95
+ if split.startswith("test") or split.startswith("validation"):
96
+ exples = json.load(open(filepath, encoding="utf-8"))
97
+ if isinstance(exples, dict):
98
+ assert len(exples) == 1, "multiple entries found"
99
+ exples = list(exples.values())[0]
100
+ for id_, exple in enumerate(exples):
101
+ if len(exple) == 0:
102
+ continue
103
+ yield id_, {
104
+ "gem_id": f"e2e_nlg-{split}-{id_}",
105
+ "gem_parent_id": f"e2e_nlg-{split}-{id_}",
106
+ "meaning_representation": exple["meaning_representation"],
107
+ "target": exple["references"][0],
108
+ "references": exple["references"],
109
+ }
110
  else:
111
  with open(filepath, encoding="utf-8") as f:
112
  reader = csv.DictReader(f)
 
116
  "gem_parent_id": f"e2e_nlg-{split}-{id_}",
117
  "meaning_representation": example["mr"],
118
  "target": example["ref"],
119
+ "references": []
120
  }
121
+
122
+
123
+