Datasets:
GEM
/

Languages:
English
Multilinguality:
unknown
Size Categories:
unknown
Language Creators:
unknown
Annotations Creators:
crowd-sourced
Source Datasets:
original
ArXiv:
Tags:
License:
Sebastian Gehrmann commited on
Commit
59b1700
1 Parent(s): 64a5798

merge split and rephrase into wiki_auto_asset_turk

Browse files
benchmarks/README.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # IBM Split and Rephrase 2019
2
+
3
+ ## Benchmarks
4
+ This folder includes the two sources for the Split and Rephrase dataset.
5
+
6
+ `contract-benchmark.tsv`: Contract Benchmark dataset. Contains hundreds of rows of sample text from legal contracts.
7
+
8
+ `wiki-benchmark.tsv`: Wikipedia Benchmark dataset. Contains hundreds of rows of sample text from Wikipedia.
9
+
10
+ The `.tsv` files have two columns.
11
+
12
+ `complex`: The complex sentence given to the crowdworkers.
13
+
14
+ `simple`: The Split and Rephrase rewrites the crowdworkers wrote.
benchmarks/contract-benchmark.tsv ADDED
The diff for this file is too large to render. See raw diff
benchmarks/wiki-benchmark.tsv ADDED
The diff for this file is too large to render. See raw diff
dataset_infos.json CHANGED
@@ -1,200 +1 @@
1
- {
2
- "wiki_auto_asset_turk": {
3
- "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
4
- "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
5
- "homepage": "https://gem-benchmark.github.io/",
6
- "license": "CC-BY-SA-4.0",
7
- "features": {
8
- "gem_id": {
9
- "dtype": "string",
10
- "id": null,
11
- "_type": "Value"
12
- },
13
- "gem_parent_id": {
14
- "dtype": "string",
15
- "id": null,
16
- "_type": "Value"
17
- },
18
- "source": {
19
- "dtype": "string",
20
- "id": null,
21
- "_type": "Value"
22
- },
23
- "target": {
24
- "dtype": "string",
25
- "id": null,
26
- "_type": "Value"
27
- },
28
- "references": [
29
- {
30
- "dtype": "string",
31
- "id": null,
32
- "_type": "Value"
33
- }
34
- ]
35
- },
36
- "post_processed": null,
37
- "supervised_keys": null,
38
- "builder_name": "gem",
39
- "config_name": "wiki_auto_asset_turk",
40
- "version": {
41
- "version_str": "1.1.0",
42
- "description": null,
43
- "major": 1,
44
- "minor": 1,
45
- "patch": 0
46
- },
47
- "splits": {
48
- "train": {
49
- "name": "train",
50
- "num_bytes": 161096555,
51
- "num_examples": 483801,
52
- "dataset_name": "gem"
53
- },
54
- "validation": {
55
- "name": "validation",
56
- "num_bytes": 8211356,
57
- "num_examples": 20000,
58
- "dataset_name": "gem"
59
- },
60
- "test_asset": {
61
- "name": "test_asset",
62
- "num_bytes": 475360,
63
- "num_examples": 359,
64
- "dataset_name": "gem"
65
- },
66
- "test_turk": {
67
- "name": "test_turk",
68
- "num_bytes": 406866,
69
- "num_examples": 359,
70
- "dataset_name": "gem"
71
- },
72
- "challenge_train_sample": {
73
- "name": "challenge_train_sample",
74
- "num_bytes": 219566,
75
- "num_examples": 500,
76
- "dataset_name": "gem"
77
- },
78
- "challenge_validation_sample": {
79
- "name": "challenge_validation_sample",
80
- "num_bytes": 213072,
81
- "num_examples": 500,
82
- "dataset_name": "gem"
83
- },
84
- "challenge_test_asset_backtranslation": {
85
- "name": "challenge_test_asset_backtranslation",
86
- "num_bytes": 436844,
87
- "num_examples": 359,
88
- "dataset_name": "gem"
89
- },
90
- "challenge_test_asset_bfp02": {
91
- "name": "challenge_test_asset_bfp02",
92
- "num_bytes": 432766,
93
- "num_examples": 359,
94
- "dataset_name": "gem"
95
- },
96
- "challenge_test_asset_bfp05": {
97
- "name": "challenge_test_asset_bfp05",
98
- "num_bytes": 432766,
99
- "num_examples": 359,
100
- "dataset_name": "gem"
101
- },
102
- "challenge_test_asset_nopunc": {
103
- "name": "challenge_test_asset_nopunc",
104
- "num_bytes": 432759,
105
- "num_examples": 359,
106
- "dataset_name": "gem"
107
- },
108
- "challenge_test_turk_backtranslation": {
109
- "name": "challenge_test_turk_backtranslation",
110
- "num_bytes": 417228,
111
- "num_examples": 359,
112
- "dataset_name": "gem"
113
- },
114
- "challenge_test_turk_bfp02": {
115
- "name": "challenge_test_turk_bfp02",
116
- "num_bytes": 414405,
117
- "num_examples": 359,
118
- "dataset_name": "gem"
119
- },
120
- "challenge_test_turk_bfp05": {
121
- "name": "challenge_test_turk_bfp05",
122
- "num_bytes": 414407,
123
- "num_examples": 359,
124
- "dataset_name": "gem"
125
- },
126
- "challenge_test_turk_nopunc": {
127
- "name": "challenge_test_turk_nopunc",
128
- "num_bytes": 414412,
129
- "num_examples": 359,
130
- "dataset_name": "gem"
131
- }
132
- },
133
- "download_checksums": {
134
- "train.tsv": {
135
- "num_bytes": 120678315,
136
- "checksum": "0ed9ea351922ba39a9a2a5a15293619af5f2a94b9ead86b7ef2007bfcb76aadd"
137
- },
138
- "valid.tsv": {
139
- "num_bytes": 4338364,
140
- "checksum": "6be79b5d014a27facc0f3e892cef35774f48f6e08e4d6eefafb801bcf2ab7b09"
141
- },
142
- "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json": {
143
- "num_bytes": 452091,
144
- "checksum": "5a1c82b5b0ca1891efc2d1465045f4866a8794e6322bc7386b5501aaac41ac57"
145
- },
146
- "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip": {
147
- "num_bytes": 1061032,
148
- "checksum": "3dc8e070c8afabde606366bf49fa81b0b62f95933035cc9ea0381d149948f52d"
149
- },
150
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.orig": {
151
- "num_bytes": 43745,
152
- "checksum": "673ceb2672a37168a52040d75e16f9ffd1e3777b9f68e19207f2adf6542723f1"
153
- },
154
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.0": {
155
- "num_bytes": 35457,
156
- "checksum": "66f36029d0c732eb92886021faefe531c6cfd0a32bdbe7ae4aa97fd45bd1b046"
157
- },
158
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.1": {
159
- "num_bytes": 34096,
160
- "checksum": "d323ceb364abbe84c79b14b028aa1ff563cd94955fbab19049612548dbb0f83f"
161
- },
162
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.2": {
163
- "num_bytes": 34348,
164
- "checksum": "786b55f8425ce4a993e98be5e2bea9ef87bf536b96dc13f7a57c4733fdb63e06"
165
- },
166
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.3": {
167
- "num_bytes": 37292,
168
- "checksum": "e211c9e2ede1dfe315097132dbe4feda76b309bdc636a5394cb5d2664ba5bf52"
169
- },
170
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.4": {
171
- "num_bytes": 35887,
172
- "checksum": "37be9cf0592c0f68d87848dc9c442fe62f344518c1993896c00788bf943b755d"
173
- },
174
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.5": {
175
- "num_bytes": 35351,
176
- "checksum": "8485210573a3bd76116de8e978b227677c6c207111a4938729397c4e603dfa46"
177
- },
178
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.6": {
179
- "num_bytes": 35846,
180
- "checksum": "f0cb3ab823d23203ea044f81bd7e67cc823db0632095e43b78a54a9891a0b0a8"
181
- },
182
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.7": {
183
- "num_bytes": 34560,
184
- "checksum": "35cbb8b9964252a1470607634f19ad946c6bc2951b3e500eedd826baf12bd3c8"
185
- },
186
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.8": {
187
- "num_bytes": 35830,
188
- "checksum": "047b6419590b88f93b435d3177bba1883dc9c0dc178676e48470b408236446f4"
189
- },
190
- "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.9": {
191
- "num_bytes": 35313,
192
- "checksum": "3f5745e4f2743563b88ea4284ec35fa4ddb68d62de80b63ffb87751b998fe6b8"
193
- }
194
- },
195
- "download_size": 126927527,
196
- "post_processing_size": null,
197
- "dataset_size": 174018362,
198
- "size_in_bytes": 300945889
199
- }
200
- }
1
+ {"wiki_auto_asset_turk": {"description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n", "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n", "homepage": "https://gem-benchmark.github.io/", "license": "CC-BY-SA-4.0", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "gem_parent_id": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "gem", "config_name": "wiki_auto_asset_turk", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 161096555, "num_examples": 483801, "dataset_name": "gem"}, "validation": {"name": "validation", "num_bytes": 8211356, "num_examples": 20000, "dataset_name": "gem"}, "test_asset": {"name": "test_asset", "num_bytes": 475360, "num_examples": 359, "dataset_name": "gem"}, "test_turk": {"name": "test_turk", "num_bytes": 406866, "num_examples": 359, "dataset_name": "gem"}, "challenge_train_sample": {"name": "challenge_train_sample", "num_bytes": 219566, "num_examples": 500, "dataset_name": "gem"}, "challenge_validation_sample": {"name": "challenge_validation_sample", "num_bytes": 213072, "num_examples": 500, "dataset_name": "gem"}, "challenge_test_asset_backtranslation": {"name": "challenge_test_asset_backtranslation", "num_bytes": 436844, "num_examples": 359, "dataset_name": "gem"}, "challenge_test_asset_bfp02": {"name": "challenge_test_asset_bfp02", "num_bytes": 432766, "num_examples": 359, "dataset_name": "gem"}, "challenge_test_asset_bfp05": {"name": "challenge_test_asset_bfp05", "num_bytes": 432766, "num_examples": 359, "dataset_name": "gem"}, "challenge_test_asset_nopunc": {"name": "challenge_test_asset_nopunc", "num_bytes": 432759, "num_examples": 359, "dataset_name": "gem"}, "challenge_test_turk_backtranslation": {"name": "challenge_test_turk_backtranslation", "num_bytes": 417228, "num_examples": 359, "dataset_name": "gem"}, "challenge_test_turk_bfp02": {"name": "challenge_test_turk_bfp02", "num_bytes": 414405, "num_examples": 359, "dataset_name": "gem"}, "challenge_test_turk_bfp05": {"name": "challenge_test_turk_bfp05", "num_bytes": 414407, "num_examples": 359, "dataset_name": "gem"}, "challenge_test_turk_nopunc": {"name": "challenge_test_turk_nopunc", "num_bytes": 414412, "num_examples": 359, "dataset_name": "gem"}}, "download_checksums": {"train.tsv": {"num_bytes": 120678315, "checksum": "0ed9ea351922ba39a9a2a5a15293619af5f2a94b9ead86b7ef2007bfcb76aadd"}, "valid.tsv": {"num_bytes": 4338364, "checksum": "6be79b5d014a27facc0f3e892cef35774f48f6e08e4d6eefafb801bcf2ab7b09"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json": {"num_bytes": 452091, "checksum": "5a1c82b5b0ca1891efc2d1465045f4866a8794e6322bc7386b5501aaac41ac57"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip": {"num_bytes": 1061032, "checksum": "3dc8e070c8afabde606366bf49fa81b0b62f95933035cc9ea0381d149948f52d"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.orig": {"num_bytes": 43745, "checksum": "673ceb2672a37168a52040d75e16f9ffd1e3777b9f68e19207f2adf6542723f1"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.0": {"num_bytes": 35457, "checksum": "66f36029d0c732eb92886021faefe531c6cfd0a32bdbe7ae4aa97fd45bd1b046"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.1": {"num_bytes": 34096, "checksum": "d323ceb364abbe84c79b14b028aa1ff563cd94955fbab19049612548dbb0f83f"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.2": {"num_bytes": 34348, "checksum": "786b55f8425ce4a993e98be5e2bea9ef87bf536b96dc13f7a57c4733fdb63e06"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.3": {"num_bytes": 37292, "checksum": "e211c9e2ede1dfe315097132dbe4feda76b309bdc636a5394cb5d2664ba5bf52"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.4": {"num_bytes": 35887, "checksum": "37be9cf0592c0f68d87848dc9c442fe62f344518c1993896c00788bf943b755d"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.5": {"num_bytes": 35351, "checksum": "8485210573a3bd76116de8e978b227677c6c207111a4938729397c4e603dfa46"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.6": {"num_bytes": 35846, "checksum": "f0cb3ab823d23203ea044f81bd7e67cc823db0632095e43b78a54a9891a0b0a8"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.7": {"num_bytes": 34560, "checksum": "35cbb8b9964252a1470607634f19ad946c6bc2951b3e500eedd826baf12bd3c8"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.8": {"num_bytes": 35830, "checksum": "047b6419590b88f93b435d3177bba1883dc9c0dc178676e48470b408236446f4"}, "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.9": {"num_bytes": 35313, "checksum": "3f5745e4f2743563b88ea4284ec35fa4ddb68d62de80b63ffb87751b998fe6b8"}}, "download_size": 126927527, "post_processing_size": null, "dataset_size": 174018362, "size_in_bytes": 300945889}, "default": {"description": "WikiAuto provides a set of aligned sentences from English Wikipedia and Simple\nEnglish Wikipedia as a resource to train sentence simplification systems.\n\nThe authors first crowd-sourced a set of manual alignments between sentences in\na subset of the Simple English Wikipedia and their corresponding versions in\nEnglish Wikipedia (this corresponds to the manual config in this version of the\ndataset), then trained a neural CRF system to predict these alignments.\n\nThe trained alignment prediction model was then applied to the other articles in\nSimple English Wikipedia with an English counterpart to create a larger corpus\nof aligned sentences (corresponding to the auto and auto_acl configs here).\n", "citation": "@inproceedings{jiang-etal-2020-neural,\n title = \"Neural {CRF} Model for Sentence Alignment in Text Simplification\",\n author = \"Jiang, Chao and\n Maddela, Mounica and\n Lan, Wuwei and\n Zhong, Yang and\n Xu, Wei\",\n booktitle = \"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics\",\n month = jul,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.acl-main.709\",\n doi = \"10.18653/v1/2020.acl-main.709\",\n pages = \"7943--7960\",\n}\n", "homepage": "", "license": "", "features": {"gem_id": {"dtype": "string", "id": null, "_type": "Value"}, "gem_parent_id": {"dtype": "string", "id": null, "_type": "Value"}, "source": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"dtype": "string", "id": null, "_type": "Value"}, "references": [{"dtype": "string", "id": null, "_type": "Value"}]}, "post_processed": null, "supervised_keys": {"input": "source", "output": "target"}, "task_templates": null, "builder_name": "wiki_auto", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 161096751, "num_examples": 483801, "dataset_name": "wiki_auto"}, "validation": {"name": "validation", "num_bytes": 8211356, "num_examples": 20000, "dataset_name": "wiki_auto"}, "test_asset": {"name": "test_asset", "num_bytes": 475360, "num_examples": 359, "dataset_name": "wiki_auto"}, "test_turk": {"name": "test_turk", "num_bytes": 406866, "num_examples": 359, "dataset_name": "wiki_auto"}, "test_contract": {"name": "test_contract", "num_bytes": 567023, "num_examples": 659, "dataset_name": "wiki_auto"}, "test_wiki": {"name": "test_wiki", "num_bytes": 423035, "num_examples": 720, "dataset_name": "wiki_auto"}, "challenge_train_sample": {"name": "challenge_train_sample", "num_bytes": 219566, "num_examples": 500, "dataset_name": "wiki_auto"}, "challenge_validation_sample": {"name": "challenge_validation_sample", "num_bytes": 213072, "num_examples": 500, "dataset_name": "wiki_auto"}, "challenge_test_asset_backtranslation": {"name": "challenge_test_asset_backtranslation", "num_bytes": 436844, "num_examples": 359, "dataset_name": "wiki_auto"}, "challenge_test_asset_bfp02": {"name": "challenge_test_asset_bfp02", "num_bytes": 432766, "num_examples": 359, "dataset_name": "wiki_auto"}, "challenge_test_asset_bfp05": {"name": "challenge_test_asset_bfp05", "num_bytes": 432766, "num_examples": 359, "dataset_name": "wiki_auto"}, "challenge_test_asset_nopunc": {"name": "challenge_test_asset_nopunc", "num_bytes": 432759, "num_examples": 359, "dataset_name": "wiki_auto"}, "challenge_test_turk_backtranslation": {"name": "challenge_test_turk_backtranslation", "num_bytes": 417228, "num_examples": 359, "dataset_name": "wiki_auto"}, "challenge_test_turk_bfp02": {"name": "challenge_test_turk_bfp02", "num_bytes": 414405, "num_examples": 359, "dataset_name": "wiki_auto"}, "challenge_test_turk_bfp05": {"name": "challenge_test_turk_bfp05", "num_bytes": 414407, "num_examples": 359, "dataset_name": "wiki_auto"}, "challenge_test_turk_nopunc": {"name": "challenge_test_turk_nopunc", "num_bytes": 414412, "num_examples": 359, "dataset_name": "wiki_auto"}}, "download_checksums": {"train.tsv": {"num_bytes": 120678315, "checksum": "0ed9ea351922ba39a9a2a5a15293619af5f2a94b9ead86b7ef2007bfcb76aadd"}, "valid.tsv": {"num_bytes": 4338364, "checksum": "6be79b5d014a27facc0f3e892cef35774f48f6e08e4d6eefafb801bcf2ab7b09"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json": {"num_bytes": 452091, "checksum": "5a1c82b5b0ca1891efc2d1465045f4866a8794e6322bc7386b5501aaac41ac57"}, "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip": {"num_bytes": 1061032, "checksum": "3dc8e070c8afabde606366bf49fa81b0b62f95933035cc9ea0381d149948f52d"}, "benchmarks/contract-benchmark.tsv": {"num_bytes": 332032, "checksum": "c82249c44a37e3c2d93bb02377e065bedab2b335806e199998289bc66bb5a38b"}, "benchmarks/wiki-benchmark.tsv": {"num_bytes": 239301, "checksum": "3df841c8643791774575cd2fa655e5247de8cb4ae226eb3b6b84885a53e041f2"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.orig": {"num_bytes": 43745, "checksum": "673ceb2672a37168a52040d75e16f9ffd1e3777b9f68e19207f2adf6542723f1"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.0": {"num_bytes": 35457, "checksum": "66f36029d0c732eb92886021faefe531c6cfd0a32bdbe7ae4aa97fd45bd1b046"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.1": {"num_bytes": 34096, "checksum": "d323ceb364abbe84c79b14b028aa1ff563cd94955fbab19049612548dbb0f83f"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.2": {"num_bytes": 34348, "checksum": "786b55f8425ce4a993e98be5e2bea9ef87bf536b96dc13f7a57c4733fdb63e06"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.3": {"num_bytes": 37292, "checksum": "e211c9e2ede1dfe315097132dbe4feda76b309bdc636a5394cb5d2664ba5bf52"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.4": {"num_bytes": 35887, "checksum": "37be9cf0592c0f68d87848dc9c442fe62f344518c1993896c00788bf943b755d"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.5": {"num_bytes": 35351, "checksum": "8485210573a3bd76116de8e978b227677c6c207111a4938729397c4e603dfa46"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.6": {"num_bytes": 35846, "checksum": "f0cb3ab823d23203ea044f81bd7e67cc823db0632095e43b78a54a9891a0b0a8"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.7": {"num_bytes": 34560, "checksum": "35cbb8b9964252a1470607634f19ad946c6bc2951b3e500eedd826baf12bd3c8"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.8": {"num_bytes": 35830, "checksum": "047b6419590b88f93b435d3177bba1883dc9c0dc178676e48470b408236446f4"}, "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.9": {"num_bytes": 35313, "checksum": "3f5745e4f2743563b88ea4284ec35fa4ddb68d62de80b63ffb87751b998fe6b8"}}, "download_size": 127498860, "post_processing_size": null, "dataset_size": 175008616, "size_in_bytes": 302507476}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wiki_auto_asset_turk.py CHANGED
@@ -41,6 +41,8 @@ _URLs = {
41
  "validation": "valid.tsv",
42
  "test_turk": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json",
43
  "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip",
 
 
44
  }
45
 
46
  # Add Asset files.
@@ -154,6 +156,20 @@ class WikiAuto(datasets.GeneratorBasedBuilder):
154
  "split": "test_turk",
155
  },
156
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  ] + [
158
  datasets.SplitGenerator(
159
  name=challenge_split,
@@ -205,6 +221,17 @@ class WikiAuto(datasets.GeneratorBasedBuilder):
205
  "source": lines[0].strip(),
206
  "references": [line.strip() for line in lines[1:]],
207
  }
 
 
 
 
 
 
 
 
 
 
 
208
  else:
209
  exples = json.load(open(filepath, encoding="utf-8"))
210
  if isinstance(exples, dict):
41
  "validation": "valid.tsv",
42
  "test_turk": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json",
43
  "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip",
44
+ "test_contract": "benchmarks/contract-benchmark.tsv",
45
+ "test_wiki": "benchmarks/wiki-benchmark.tsv",
46
  }
47
 
48
  # Add Asset files.
156
  "split": "test_turk",
157
  },
158
  ),
159
+ datasets.SplitGenerator(
160
+ name="test_contract",
161
+ gen_kwargs={
162
+ "filepath": dl_dir["test_contract"],
163
+ "split": "test_contract",
164
+ },
165
+ ),
166
+ datasets.SplitGenerator(
167
+ name="test_wiki",
168
+ gen_kwargs={
169
+ "filepath": dl_dir["test_wiki"],
170
+ "split": "test_wiki",
171
+ },
172
+ ),
173
  ] + [
174
  datasets.SplitGenerator(
175
  name=challenge_split,
221
  "source": lines[0].strip(),
222
  "references": [line.strip() for line in lines[1:]],
223
  }
224
+ elif split == "test_wiki" or split == "test_contract":
225
+ with open(filepath, 'r') as f:
226
+ reader = csv.DictReader(f, delimiter="\t")
227
+ for id_, entry in enumerate(reader):
228
+ yield id_, {
229
+ "gem_id": f"wiki_auto_asset_turk-{split}-{id_}",
230
+ "gem_parent_id": f"wiki_auto_asset_turk-{split}-{id_}",
231
+ "target": entry["simple"],
232
+ "source": entry["complex"],
233
+ "references": [entry["simple"]],
234
+ }
235
  else:
236
  exples = json.load(open(filepath, encoding="utf-8"))
237
  if isinstance(exples, dict):