system HF staff commited on
Commit
53bc265
1 Parent(s): e3ade7b

Update files from the datasets library (from 1.6.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.6.0

Files changed (42) hide show
  1. dataset_infos.json +0 -0
  2. dummy/common_gen/{1.0.0 → 1.1.0}/dummy_data.zip +2 -2
  3. dummy/cs_restaurants/{1.0.0 → 1.1.0}/dummy_data.zip +2 -2
  4. dummy/dart/{1.0.0 → 1.1.0}/dummy_data.zip +0 -0
  5. dummy/e2e_nlg/{1.0.0 → 1.1.0}/dummy_data.zip +2 -2
  6. dummy/mlsum_de/{1.0.0 → 1.1.0}/dummy_data.zip +2 -2
  7. dummy/mlsum_es/1.0.0/dummy_data.zip +0 -3
  8. dummy/mlsum_es/1.1.0/dummy_data.zip +3 -0
  9. dummy/schema_guided_dialog/1.1.0/dummy_data.zip +3 -0
  10. dummy/totto/1.0.0/dummy_data.zip +0 -3
  11. dummy/totto/1.1.0/dummy_data.zip +3 -0
  12. dummy/web_nlg_en/1.0.0/dummy_data.zip +0 -3
  13. dummy/{schema_guided_dialog/1.0.0 → web_nlg_en/1.1.0}/dummy_data.zip +2 -2
  14. dummy/web_nlg_ru/1.0.0/dummy_data.zip +0 -3
  15. dummy/web_nlg_ru/1.1.0/dummy_data.zip +3 -0
  16. dummy/wiki_auto_asset_turk/1.0.0/dummy_data.zip +0 -3
  17. dummy/wiki_auto_asset_turk/1.1.0/dummy_data.zip +3 -0
  18. dummy/wiki_lingua_arabic_ar/1.1.0/dummy_data.zip +3 -0
  19. dummy/wiki_lingua_chinese_zh/1.1.0/dummy_data.zip +3 -0
  20. dummy/wiki_lingua_czech_cs/1.1.0/dummy_data.zip +3 -0
  21. dummy/wiki_lingua_dutch_nl/1.1.0/dummy_data.zip +3 -0
  22. dummy/wiki_lingua_english_en/1.1.0/dummy_data.zip +3 -0
  23. dummy/{wiki_lingua_es_en/1.0.0 → wiki_lingua_es_en_v0/1.1.0}/dummy_data.zip +0 -0
  24. dummy/wiki_lingua_french_fr/1.1.0/dummy_data.zip +3 -0
  25. dummy/wiki_lingua_german_de/1.1.0/dummy_data.zip +3 -0
  26. dummy/wiki_lingua_hindi_hi/1.1.0/dummy_data.zip +3 -0
  27. dummy/wiki_lingua_indonesian_id/1.1.0/dummy_data.zip +3 -0
  28. dummy/wiki_lingua_italian_it/1.1.0/dummy_data.zip +3 -0
  29. dummy/wiki_lingua_japanese_ja/1.1.0/dummy_data.zip +3 -0
  30. dummy/wiki_lingua_korean_ko/1.1.0/dummy_data.zip +3 -0
  31. dummy/wiki_lingua_portuguese_pt/1.1.0/dummy_data.zip +3 -0
  32. dummy/{wiki_lingua_ru_en/1.0.0 → wiki_lingua_ru_en_v0/1.1.0}/dummy_data.zip +0 -0
  33. dummy/wiki_lingua_russian_ru/1.1.0/dummy_data.zip +3 -0
  34. dummy/wiki_lingua_spanish_es/1.1.0/dummy_data.zip +3 -0
  35. dummy/wiki_lingua_thai_th/1.1.0/dummy_data.zip +3 -0
  36. dummy/{wiki_lingua_tr_en/1.0.0 → wiki_lingua_tr_en_v0/1.1.0}/dummy_data.zip +0 -0
  37. dummy/wiki_lingua_turkish_tr/1.1.0/dummy_data.zip +3 -0
  38. dummy/{wiki_lingua_vi_en/1.0.0 → wiki_lingua_vi_en_v0/1.1.0}/dummy_data.zip +0 -0
  39. dummy/wiki_lingua_vietnamese_vi/1.1.0/dummy_data.zip +3 -0
  40. dummy/xsum/1.0.0/dummy_data.zip +0 -3
  41. dummy/xsum/1.1.0/dummy_data.zip +3 -0
  42. gem.py +742 -215
dataset_infos.json CHANGED
The diff for this file is too large to render. See raw diff
 
dummy/common_gen/{1.0.0 → 1.1.0}/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59e6e129316e63c1a54d6ef7bd9a0c540c62ff4c1a36df33327a9e3facd3b4e3
3
- size 2333
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:686315482fae8bbd0d847372d76c07aa9119c374ab780aaed5b9f41979349a92
3
+ size 4735
dummy/cs_restaurants/{1.0.0 → 1.1.0}/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25cab7c6d4e34d5fb08d476ce278d5e01ee6211c1b99629795bef0156e6aa785
3
- size 1841
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fbd30e7d0d1d211ea2da944c800ec76b8bdeebebc4f696b792237069e8ae1d9
3
+ size 4230
dummy/dart/{1.0.0 → 1.1.0}/dummy_data.zip RENAMED
File without changes
dummy/e2e_nlg/{1.0.0 → 1.1.0}/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02c353e0079daa9fc1b1d2e63f4f94a389ab340ad15f78e874c226dc355836ae
3
- size 1338
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d266d483e50599c7b4eedce57d5df1f92f000aa90cbd7fa31eb57f7959a94f1
3
+ size 3689
dummy/mlsum_de/{1.0.0 → 1.1.0}/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66edd328604c4aa3a9967d81537b438d22202f9830dbda85056fd561b3ae6d4b
3
- size 17325
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df4ed9c1975aff72e507da3a8edc236321945612a835a54ca93b5ea2ed0d4c61
3
+ size 34048
dummy/mlsum_es/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:587408dc43119abcf6d3000266a916233ac5ccabfb5f01e87da55539df303597
3
- size 23066
 
 
 
 
dummy/mlsum_es/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:816565f2e923373a93c639d308ab17ca2faae27d226c8186cb391e22db46bc36
3
+ size 40918
dummy/schema_guided_dialog/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b27ba315658a1cabdfc16ef83eee6bc525347183906bad9dddf4d33b5c48c11a
3
+ size 12875
dummy/totto/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a730949a9fa8a9d5affcd9ec6069470a531903856f97f73971d5a3ef2f8a8801
3
- size 24427
 
 
 
 
dummy/totto/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3aa6b2a296ad9a2c6f52066352132f297fd0eb833c106fbd76fe387c6772a19
3
+ size 32908
dummy/web_nlg_en/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:11e43d5dc953eae0070317b95ad533a46b8f2dc0c5751d33234d29b1e832bc75
3
- size 2623
 
 
 
 
dummy/{schema_guided_dialog/1.0.0 → web_nlg_en/1.1.0}/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73573be9eb634941d2daa888cfcf504cc3bbabab7a8e0d1712a55e7037b230b0
3
- size 1899
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca1d503751ebb251b1b9315e03d222ba85a6f70d69a80c42259ed0b83a307854
3
+ size 5754
dummy/web_nlg_ru/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:428efef997ade4b3c7f9b110a681d2a24abe57f40c4f342826f57f85f8fb9ca7
3
- size 3822
 
 
 
 
dummy/web_nlg_ru/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64caee03808e0724f6abe03cd8d438305520b99a8d4c8016b3757ed9d40ac5e4
3
+ size 6279
dummy/wiki_auto_asset_turk/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:80352624751ac6f5a3cb44439470ec3ffec0a901e9eafe83bcf14c61372dbfa0
3
- size 10318
 
 
 
 
dummy/wiki_auto_asset_turk/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1344de60da0c4ca84f918e8c587d3fed5326a1deed5924566efe9525f7645843
3
+ size 23815
dummy/wiki_lingua_arabic_ar/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9bb7f2baf7423770d9f44d84084850c23e36cbf6462b94e5943a49a35d29282
3
+ size 17747
dummy/wiki_lingua_chinese_zh/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5647262bf23f33dcc884c5674b5f43eca71fc25bddbb9eed291efc9feb7bf05c
3
+ size 18261
dummy/wiki_lingua_czech_cs/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e829391b38736a189bcaff05356983c52d500fca4bd86b186b26501989e260dd
3
+ size 21235
dummy/wiki_lingua_dutch_nl/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b567d06578f0a7793a7435058601533b4d279ed9a86879fe7eaa76ed048157e
3
+ size 17063
dummy/wiki_lingua_english_en/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:472a8592c0bf412172670a1fafd23a54e4bb42ab58c45fae69927420db31a4d5
3
+ size 9106
dummy/{wiki_lingua_es_en/1.0.0 → wiki_lingua_es_en_v0/1.1.0}/dummy_data.zip RENAMED
File without changes
dummy/wiki_lingua_french_fr/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b23ebb87a54b58bfea9ac012e6a894f381ae560df51218f25f2fe6c30dde0bb
3
+ size 19014
dummy/wiki_lingua_german_de/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8bcfa7beb23d687c91be4ded92b92df8eddaccad78c88ecce7995206d95df5e
3
+ size 17761
dummy/wiki_lingua_hindi_hi/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:208ebb495ce596e6c6f089c0e56c3dde89bb9fa1c33f8aa761c3c3f13388806e
3
+ size 19685
dummy/wiki_lingua_indonesian_id/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88233d0425c7dfc79c1b8d391362aac9c9187be46510ce945f0dab7c5f9eab69
3
+ size 17529
dummy/wiki_lingua_italian_it/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc45ca716a30d44aa48e471ca2323903f8a6c74c1f77cefdb1d76ed2f46415c7
3
+ size 19783
dummy/wiki_lingua_japanese_ja/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce04ea92ab7b9ac1ab1521df2e31c1eeb4cf62d72fda5a4d18c02797c919c07
3
+ size 17113
dummy/wiki_lingua_korean_ko/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb813186e3e1470817745f88f16e801cd7cdeb529a7a4660b71e885139298a77
3
+ size 18429
dummy/wiki_lingua_portuguese_pt/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9445b917df8e18396338b11c0d8593d6069166449ef7ef8bc51d3c06711449b
3
+ size 19252
dummy/{wiki_lingua_ru_en/1.0.0 → wiki_lingua_ru_en_v0/1.1.0}/dummy_data.zip RENAMED
File without changes
dummy/wiki_lingua_russian_ru/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13450e16cec76a371afde6da6ad11b2eb60a39f7eb99dd4b8d7165483b6fcbc3
3
+ size 18047
dummy/wiki_lingua_spanish_es/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1052e008149de5507c6006ec31ff3bd9f94f0d3756cc2c3742d15c4eca9b417b
3
+ size 18129
dummy/wiki_lingua_thai_th/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56e58e66d2e99394206f05f8e4cc6d5d488b3339c7c23cf59e6ce6f4cc346230
3
+ size 17239
dummy/{wiki_lingua_tr_en/1.0.0 → wiki_lingua_tr_en_v0/1.1.0}/dummy_data.zip RENAMED
File without changes
dummy/wiki_lingua_turkish_tr/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aa9612fd7f32c5d741b6a260ea8eae4c898c66a738d44de6b9df7911aceca7c
3
+ size 17698
dummy/{wiki_lingua_vi_en/1.0.0 → wiki_lingua_vi_en_v0/1.1.0}/dummy_data.zip RENAMED
File without changes
dummy/wiki_lingua_vietnamese_vi/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25756c0fa718689d2e7f6948d58afc055431a1338c4c6e4de0d9b59f40269d5d
3
+ size 21258
dummy/xsum/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5f62f61f9fdb8eed99b3368c890cfc148e950665e53957f575d4c2b65d9fc48
3
- size 2919
 
 
 
 
dummy/xsum/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1f81d5669e596bf21e4438bf909d134dc474c3e489bcff6e64434dff67b5427
3
+ size 22590
gem.py CHANGED
@@ -14,7 +14,6 @@
14
  # limitations under the License.
15
  """GEM: Generation Evaluation Metrics supporting datasets"""
16
 
17
- from __future__ import absolute_import, division, print_function
18
 
19
  import csv
20
  import json
@@ -23,13 +22,71 @@ import os
23
  import datasets
24
 
25
 
26
- # TODO: Add BibTeX citation
27
  _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- authors={huggingface, Inc.
31
- },
32
- year={2020}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  }
34
  """
35
 
@@ -53,7 +110,30 @@ _LICENSE = "CC-BY-SA-4.0"
53
  _TASKS = {
54
  "summarization": {
55
  "mlsum": ["mlsum_de", "mlsum_es"],
56
- "wiki_lingua": ["wiki_lingua_es_en", "wiki_lingua_ru_en", "wiki_lingua_tr_en", "wiki_lingua_vi_en"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  "xsum": ["xsum"],
58
  },
59
  "struct2text": {
@@ -75,11 +155,13 @@ _TASKS = {
75
  _URLs = {
76
  "common_gen": {
77
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip",
 
78
  },
79
  "cs_restaurants": {
80
  "train": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/train.json",
81
  "validation": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/devel.json",
82
  "test": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/test.json",
 
83
  },
84
  "dart": {
85
  "train": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json",
@@ -90,68 +172,130 @@ _URLs = {
90
  "train": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv",
91
  "validation": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/devel-fixed.no-ol.csv",
92
  "test": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/test-fixed.csv",
 
93
  },
94
  "mlsum_de": {
95
  "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip",
96
  "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip",
97
  "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip",
98
  "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
 
99
  },
100
  "mlsum_es": {
101
  "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip",
102
  "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip",
103
  "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip",
104
  "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
 
105
  },
106
  "schema_guided_dialog": {
107
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd.json.zip",
 
108
  },
109
  "totto": {
110
  "data": "https://storage.googleapis.com/totto/totto_data.zip",
 
111
  },
112
  "web_nlg_en": {
113
  "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_train.json",
114
  "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_val.json",
115
  "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_test.json",
 
116
  },
117
  "web_nlg_ru": {
118
  "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_train.json",
119
  "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_val.json",
120
  "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_test.json",
 
121
  },
122
  "wiki_auto_asset_turk": {
123
- "train": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/train.tsv",
124
- "validation": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/dev.tsv",
 
 
125
  },
126
- "wiki_lingua_es_en": {
127
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
128
  },
129
- "wiki_lingua_ru_en": {
130
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
131
  },
132
- "wiki_lingua_tr_en": {
133
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
134
  },
135
- "wiki_lingua_vi_en": {
136
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
137
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  "xsum": {
139
  "data": "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz",
140
  "splits": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json",
 
141
  },
142
  }
143
 
144
- # Add Turk and Asset files
 
 
 
145
  for i in range(10):
146
  _URLs["wiki_auto_asset_turk"][
147
  f"test_asset_{i}"
148
  ] = f"https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.{i}"
149
 
150
- for i in range(8):
151
- _URLs["wiki_auto_asset_turk"][
152
- f"test_turk_{i}"
153
- ] = f"https://raw.githubusercontent.com/cocoxu/simplification/master/data/turkcorpus/GEM/test.8turkers.tok.turk.{i}"
154
-
155
  _SGD_ACTS = [
156
  "AFFIRM",
157
  "AFFIRM_INTENT",
@@ -196,7 +340,7 @@ class Gem(datasets.GeneratorBasedBuilder):
196
  BUILDER_CONFIGS = [
197
  datasets.BuilderConfig(
198
  name=conf,
199
- version=datasets.Version("1.0.0"),
200
  description=f"GEM benchmark: {task} task, {conf} subset",
201
  )
202
  for task, dset_confs in _TASKS.items()
@@ -211,6 +355,7 @@ class Gem(datasets.GeneratorBasedBuilder):
211
  features = datasets.Features(
212
  {
213
  "gem_id": datasets.Value("string"),
 
214
  "concept_set_id": datasets.Value("int32"),
215
  "concepts": [datasets.Value("string")],
216
  "target": datasets.Value("string"), # single target for train
@@ -221,6 +366,7 @@ class Gem(datasets.GeneratorBasedBuilder):
221
  features = datasets.Features(
222
  {
223
  "gem_id": datasets.Value("string"),
 
224
  "dialog_act": datasets.Value("string"),
225
  "dialog_act_delexicalized": datasets.Value("string"),
226
  "target_delexicalized": datasets.Value("string"),
@@ -232,6 +378,7 @@ class Gem(datasets.GeneratorBasedBuilder):
232
  features = datasets.Features(
233
  {
234
  "gem_id": datasets.Value("string"),
 
235
  "dart_id": datasets.Value("int32"),
236
  "tripleset": [[datasets.Value("string")]], # list of triples
237
  "subtree_was_extended": datasets.Value("bool"),
@@ -244,6 +391,7 @@ class Gem(datasets.GeneratorBasedBuilder):
244
  features = datasets.Features(
245
  {
246
  "gem_id": datasets.Value("string"),
 
247
  "meaning_representation": datasets.Value("string"),
248
  "target": datasets.Value("string"),
249
  "references": [datasets.Value("string")],
@@ -253,6 +401,7 @@ class Gem(datasets.GeneratorBasedBuilder):
253
  features = datasets.Features(
254
  {
255
  "gem_id": datasets.Value("string"),
 
256
  "text": datasets.Value("string"),
257
  "topic": datasets.Value("string"),
258
  "url": datasets.Value("string"),
@@ -266,6 +415,7 @@ class Gem(datasets.GeneratorBasedBuilder):
266
  features = datasets.Features(
267
  {
268
  "gem_id": datasets.Value("string"),
 
269
  "dialog_acts": [
270
  {
271
  "act": datasets.ClassLabel(names=_SGD_ACTS),
@@ -273,7 +423,9 @@ class Gem(datasets.GeneratorBasedBuilder):
273
  "values": [datasets.Value("string")],
274
  }
275
  ],
 
276
  "dialog_id": datasets.Value("string"),
 
277
  "turn_id": datasets.Value("int32"),
278
  "prompt": datasets.Value("string"),
279
  "target": datasets.Value("string"),
@@ -284,6 +436,7 @@ class Gem(datasets.GeneratorBasedBuilder):
284
  features = datasets.Features(
285
  {
286
  "gem_id": datasets.Value("string"),
 
287
  "totto_id": datasets.Value("int32"),
288
  "table_page_title": datasets.Value("string"),
289
  "table_webpage_url": datasets.Value("string"),
@@ -318,6 +471,7 @@ class Gem(datasets.GeneratorBasedBuilder):
318
  features = datasets.Features(
319
  {
320
  "gem_id": datasets.Value("string"),
 
321
  "input": [datasets.Value("string")],
322
  "target": datasets.Value("string"), # single target for train
323
  "references": [datasets.Value("string")],
@@ -329,26 +483,41 @@ class Gem(datasets.GeneratorBasedBuilder):
329
  features = datasets.Features(
330
  {
331
  "gem_id": datasets.Value("string"),
332
- "source_id": datasets.Value("string"),
333
- "target_id": datasets.Value("string"),
334
  "source": datasets.Value("string"),
335
  "target": datasets.Value("string"),
336
  "references": [datasets.Value("string")],
337
  }
338
  )
339
  elif self.config.name.startswith("wiki_lingua"):
340
- features = datasets.Features(
341
- {
342
- "gem_id": datasets.Value("string"),
343
- "source": datasets.Value("string"),
344
- "target": datasets.Value("string"),
345
- "references": [datasets.Value("string")],
346
- }
347
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
  elif self.config.name == "xsum":
349
  features = datasets.Features(
350
  {
351
  "gem_id": datasets.Value("string"),
 
352
  "xsum_id": datasets.Value("string"),
353
  "document": datasets.Value("string"),
354
  "target": datasets.Value("string"),
@@ -368,6 +537,11 @@ class Gem(datasets.GeneratorBasedBuilder):
368
  """Returns SplitGenerators."""
369
  dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
370
  if self.config.name == "common_gen":
 
 
 
 
 
371
  return [
372
  datasets.SplitGenerator(
373
  name=datasets.Split.TRAIN,
@@ -390,11 +564,34 @@ class Gem(datasets.GeneratorBasedBuilder):
390
  "split": "test",
391
  },
392
  ),
 
 
 
 
 
 
 
 
 
393
  ]
394
  elif self.config.name == "cs_restaurants":
 
 
 
 
 
395
  return [
396
  datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
397
  for spl in ["train", "validation", "test"]
 
 
 
 
 
 
 
 
 
398
  ]
399
  elif self.config.name == "dart":
400
  return [
@@ -402,12 +599,31 @@ class Gem(datasets.GeneratorBasedBuilder):
402
  for spl in ["train", "validation", "test"]
403
  ]
404
  elif self.config.name == "e2e_nlg":
 
 
 
 
 
405
  return [
406
  datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
407
  for spl in ["train", "validation", "test"]
 
 
 
 
 
 
 
 
 
408
  ]
409
  elif self.config.name.startswith("mlsum"):
410
  lang = self.config.name.split("_")[1]
 
 
 
 
 
411
  return [
412
  datasets.SplitGenerator(
413
  name=datasets.Split.TRAIN,
@@ -436,15 +652,53 @@ class Gem(datasets.GeneratorBasedBuilder):
436
  "filepaths": dl_dir["bad_ids"],
437
  },
438
  ),
 
 
 
 
 
 
 
 
 
439
  ]
440
  elif self.config.name == "schema_guided_dialog":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441
  return [
442
  datasets.SplitGenerator(
443
  name=spl, gen_kwargs={"filepath": os.path.join(dl_dir["data"], "gem_sgd.json"), "split": spl}
444
  )
445
  for spl in ["train", "validation", "test"]
 
 
 
 
 
 
 
 
 
446
  ]
447
  elif self.config.name == "totto":
 
 
 
 
 
448
  return [
449
  datasets.SplitGenerator(
450
  name=datasets.Split.TRAIN,
@@ -467,13 +721,63 @@ class Gem(datasets.GeneratorBasedBuilder):
467
  "split": "test",
468
  },
469
  ),
 
 
 
 
 
 
 
 
 
470
  ]
471
  elif self.config.name.startswith("web_nlg"):
 
 
 
 
 
 
 
 
472
  return [
473
  datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
474
  for spl in ["train", "validation", "test"]
 
 
 
 
 
 
 
 
 
475
  ]
476
  elif self.config.name == "wiki_auto_asset_turk":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477
  return [
478
  datasets.SplitGenerator(
479
  name=datasets.Split.TRAIN,
@@ -493,46 +797,94 @@ class Gem(datasets.GeneratorBasedBuilder):
493
  name="test_asset",
494
  gen_kwargs={
495
  "filepath": "",
496
- "split": "test",
497
- "filepaths": [dl_dir[f"test_asset_{i}"] for i in range(10)],
498
  },
499
  ),
500
  datasets.SplitGenerator(
501
  name="test_turk",
502
  gen_kwargs={
503
- "filepath": "",
504
- "split": "test",
505
- "filepaths": [dl_dir[f"test_turk_{i}"] for i in range(8)],
506
  },
507
  ),
508
- ]
509
- elif self.config.name.startswith("wiki_lingua"):
510
- lang = self.config.name.split("_")[-2]
511
- base_dir = os.path.join(dl_dir["data"], "GEM_data_crosslingual", f"{lang}_en")
512
- return [
513
  datasets.SplitGenerator(
514
- name=datasets.Split.TRAIN,
515
  gen_kwargs={
516
- "filepath": base_dir,
517
- "split": "train",
518
  },
519
- ),
520
- datasets.SplitGenerator(
521
- name=datasets.Split.VALIDATION,
522
- gen_kwargs={
523
- "filepath": base_dir,
524
- "split": "val",
525
- },
526
- ),
527
- datasets.SplitGenerator(
528
- name=datasets.Split.TEST,
529
- gen_kwargs={
530
- "filepath": base_dir,
531
- "split": "test",
532
- },
533
- ),
534
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
  elif self.config.name == "xsum":
 
 
 
 
 
 
 
 
 
536
  return [
537
  datasets.SplitGenerator(
538
  name=datasets.Split.TRAIN,
@@ -558,50 +910,86 @@ class Gem(datasets.GeneratorBasedBuilder):
558
  "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
559
  },
560
  ),
 
 
 
 
 
 
 
 
 
561
  ]
562
 
563
  def _generate_examples(self, filepath, split, filepaths=None, lang=None):
564
  """ Yields examples. """
565
  if self.config.name == "common_gen":
566
- with open(filepath, encoding="utf-8") as f:
567
- id_ = -1
568
- i = -1
569
- for row in f:
570
- row = row.replace(", }", "}") # Fix possible JSON format error
571
- data = json.loads(row)
572
- concepts = [word for word in data["concept_set"].split("#")]
573
- if split == "train":
574
- i += 1
575
- for scene in data["scene"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
576
  id_ += 1
577
  yield id_, {
578
  "gem_id": f"{self.config.name}-{split}-{id_}",
579
- "concept_set_id": i,
 
580
  "concepts": concepts,
581
- "target": scene,
582
- "references": [],
583
  }
584
- else:
585
- id_ += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586
  yield id_, {
587
  "gem_id": f"{self.config.name}-{split}-{id_}",
588
- "concept_set_id": id_,
589
- "concepts": concepts,
590
- "target": "" if split == "test" else data["scene"][0],
591
- "references": [] if split == "test" else data["scene"],
 
 
592
  }
593
- elif self.config.name == "cs_restaurants":
594
- with open(filepath, encoding="utf8") as f:
595
- data = json.load(f)
596
- for id_, instance in enumerate(data):
597
- yield id_, {
598
- "gem_id": f"{self.config.name}-{split}-{id_}",
599
- "dialog_act": instance["da"],
600
- "dialog_act_delexicalized": instance["delex_da"],
601
- "target": instance["text"],
602
- "target_delexicalized": instance["delex_text"],
603
- "references": [] if split == "train" else [instance["text"]],
604
- }
605
  elif self.config.name == "dart":
606
  with open(filepath, encoding="utf-8") as f:
607
  data = json.loads(f.read())
@@ -614,6 +1002,7 @@ class Gem(datasets.GeneratorBasedBuilder):
614
  id_ += 1
615
  yield id_, {
616
  "gem_id": f"{self.config.name}-{split}-{id_}",
 
617
  "dart_id": i,
618
  "tripleset": example["tripleset"],
619
  "subtree_was_extended": example.get("subtree_was_extended", None), # some are missing
@@ -625,6 +1014,7 @@ class Gem(datasets.GeneratorBasedBuilder):
625
  id_ += 1
626
  yield id_, {
627
  "gem_id": f"{self.config.name}-{split}-{id_}",
 
628
  "dart_id": id_,
629
  "tripleset": example["tripleset"],
630
  "subtree_was_extended": example.get("subtree_was_extended", None), # some are missing
@@ -633,69 +1023,145 @@ class Gem(datasets.GeneratorBasedBuilder):
633
  "references": [annotation["text"] for annotation in example["annotations"]],
634
  }
635
  elif self.config.name == "e2e_nlg":
636
- with open(filepath, encoding="utf-8") as f:
637
- reader = csv.DictReader(f)
638
- for id_, example in enumerate(reader):
639
- yield id_, {
640
- "gem_id": f"{self.config.name}-{split}-{id_}",
641
- "meaning_representation": example["mr"],
642
- "target": example["ref"],
643
- "references": [] if split == "train" else [example["ref"]],
644
- }
645
- elif self.config.name.startswith("mlsum"):
646
- bad_ids_dct = json.load(open(filepaths, encoding="utf-8"))
647
- bad_ids = dict((bad_url, True) for _, bad_url in bad_ids_dct[f"{lang}-{split}"])
648
- with open(filepath, encoding="utf-8") as f:
649
- id_ = -1
650
- for line in f:
651
- data = json.loads(line)
652
- if data["url"] in bad_ids: # TODO : check | i or i-1?
653
  continue
654
- else:
655
- id_ += 1
 
 
 
 
 
656
  yield id_, {
657
  "gem_id": f"{self.config.name}-{split}-{id_}",
658
- "text": data["text"],
659
- "target": data["summary"],
660
- "references": [] if split == "train" else [data["summary"]],
661
- "topic": data["topic"],
662
- "url": data["url"],
663
- "title": data["title"],
664
- "date": data["date"],
665
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666
  elif self.config.name == "schema_guided_dialog":
667
- examples = json.load(open(filepath, encoding="utf-8"))[split]
668
- for id_, example in enumerate(examples):
669
- yield id_, {
670
- "gem_id": f"{self.config.name}-{split}-{id_}",
671
- "dialog_acts": [
672
- {
673
- "act": act_id,
674
- "slot": slot,
675
- "values": values,
676
- }
677
- for act_id, slot, values in example["da"]
678
- ],
679
- "dialog_id": example["dialog_id"],
680
- "turn_id": example["turn_ix"],
681
- "prompt": example["prompt"],
682
- "target": example["target"],
683
- "references": [] if split == "train" else [example["target"]],
684
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
685
  elif self.config.name == "totto":
686
- with open(filepath, "r", encoding="utf-8") as json_file:
687
- json_list = list(json_file)
688
- id_ = -1
689
- i = -1
690
- for json_str in json_list:
691
- result = json.loads(json_str)
692
- if split == "train":
693
- i += 1
694
- for sentence in result["sentence_annotations"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695
  id_ += 1
696
  response = {
697
  "gem_id": f"{self.config.name}-{split}-{id_}",
698
- "totto_id": i,
 
699
  "table_page_title": result["table_page_title"],
700
  "table_webpage_url": result["table_webpage_url"],
701
  "table_section_title": result["table_section_title"],
@@ -703,106 +1169,167 @@ class Gem(datasets.GeneratorBasedBuilder):
703
  "table": result["table"],
704
  "highlighted_cells": result["highlighted_cells"],
705
  "example_id": str(result["example_id"]),
706
- "overlap_subset": "none",
707
- "sentence_annotations": [sentence],
708
- "references": [],
709
- "target": sentence["final_sentence"],
710
  }
 
 
 
 
 
711
  yield id_, response
712
- else:
713
- id_ += 1
714
- response = {
715
- "gem_id": f"{self.config.name}-{split}-{id_}",
716
- "totto_id": id_,
717
- "table_page_title": result["table_page_title"],
718
- "table_webpage_url": result["table_webpage_url"],
719
- "table_section_title": result["table_section_title"],
720
- "table_section_text": result["table_section_text"],
721
- "table": result["table"],
722
- "highlighted_cells": result["highlighted_cells"],
723
- "example_id": str(result["example_id"]),
724
- "overlap_subset": str(result["overlap_subset"]),
725
- }
726
- response["sentence_annotations"] = [] if split == "test" else result["sentence_annotations"]
727
- response["references"] = [
728
- sentence["final_sentence"] for sentence in response["sentence_annotations"]
729
- ]
730
- response["target"] = response["references"][0] if len(response["references"]) > 0 else ""
731
- yield id_, response
732
  elif self.config.name.startswith("web_nlg"):
733
- with open(filepath, encoding="utf-8") as f:
734
- examples = json.load(f)
735
- id_ = -1
736
- for example in examples["values"]:
737
- if split == "train":
738
- for target in example["target"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739
  id_ += 1
740
  yield id_, {
741
  "gem_id": f"{self.config.name}-{split}-{id_}",
 
742
  "input": example["input"],
743
- "target": target,
744
- "references": [] if split == "train" else example["target"],
745
  "category": example["category"],
746
  "webnlg_id": example["webnlg-id"],
747
  }
748
- else:
749
- id_ += 1
750
- yield id_, {
751
- "gem_id": f"{self.config.name}-{split}-{id_}",
752
- "input": example["input"],
753
- "target": example["target"][0] if len(example["target"]) > 0 else "",
754
- "references": example["target"],
755
- "category": example["category"],
756
- "webnlg_id": example["webnlg-id"],
757
- }
758
  elif self.config.name == "wiki_auto_asset_turk":
759
  if split in ["train", "validation"]:
760
  keys = [
761
- "target_id",
762
- "source_id",
763
- "target",
764
  "source",
 
765
  ]
766
  with open(filepath, encoding="utf-8") as f:
767
  for id_, line in enumerate(f):
768
  values = line.strip().split("\t")
769
- assert len(values) == 5, f"Not enough fields in ---- {line} --- {values}"
770
- example = dict([(k, val) for k, val in zip(keys, values[1:])])
771
  example["gem_id"] = f"{self.config.name}-{split}-{id_}"
 
772
  example["references"] = [] if split == "train" else [example["target"]]
773
  yield id_, example
774
- elif split.startswith("test"):
 
 
 
 
 
 
 
 
775
  files = [open(f_name, encoding="utf-8") for f_name in filepaths]
776
  for id_, lines in enumerate(zip(*files)):
777
  yield id_, {
778
  "gem_id": f"{self.config.name}-{split}-{id_}",
779
- "source_id": "",
780
- "target_id": "",
781
  "target": lines[1].strip(),
782
  "source": lines[0].strip(),
783
  "references": [line.strip() for line in lines[1:]],
784
  }
 
 
 
 
 
 
 
 
 
 
 
 
785
  elif self.config.name.startswith("wiki_lingua"):
786
- with open(os.path.join(filepath, f"{split}.src"), encoding="utf-8") as f_in:
787
- with open(os.path.join(filepath, f"{split}.tgt"), encoding="utf-8") as f_out:
788
- for id_, (src, tgt) in enumerate(zip(f_in, f_out)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
789
  yield id_, {
790
  "gem_id": f"{self.config.name}-{split}-{id_}",
791
- "source": src.strip(),
792
- "target": tgt.strip(),
793
- "references": [] if split == "train" else [tgt.strip()],
 
 
794
  }
795
- elif self.config.name == "xsum":
796
- with open(filepath, "r", encoding="utf-8") as f:
797
- split_ids = json.load(f)
798
- for id_, i in enumerate(split_ids[split]):
799
- with open(os.path.join(filepaths, i + ".summary"), "r", encoding="utf-8") as f:
800
- text = "".join([line for line in f.readlines() if line not in _XSUM_REMOVE_LINES and line.strip()])
801
- segs = text.split("[SN]")
802
- yield id_, {
803
- "gem_id": f"{self.config.name}-{split}-{id_}",
804
- "xsum_id": i,
805
- "document": segs[8].strip(),
806
- "target": segs[6].strip(),
807
- "references": [] if split == "train" else [segs[6].strip()],
808
- }
 
14
  # limitations under the License.
15
  """GEM: Generation Evaluation Metrics supporting datasets"""
16
 
 
17
 
18
  import csv
19
  import json
 
22
  import datasets
23
 
24
 
 
25
  _CITATION = """\
26
+ @article{gem_benchmark,
27
+ author = {Sebastian Gehrmann and
28
+ Tosin P. Adewumi and
29
+ Karmanya Aggarwal and
30
+ Pawan Sasanka Ammanamanchi and
31
+ Aremu Anuoluwapo and
32
+ Antoine Bosselut and
33
+ Khyathi Raghavi Chandu and
34
+ Miruna{-}Adriana Clinciu and
35
+ Dipanjan Das and
36
+ Kaustubh D. Dhole and
37
+ Wanyu Du and
38
+ Esin Durmus and
39
+ Ondrej Dusek and
40
+ Chris Emezue and
41
+ Varun Gangal and
42
+ Cristina Garbacea and
43
+ Tatsunori Hashimoto and
44
+ Yufang Hou and
45
+ Yacine Jernite and
46
+ Harsh Jhamtani and
47
+ Yangfeng Ji and
48
+ Shailza Jolly and
49
+ Dhruv Kumar and
50
+ Faisal Ladhak and
51
+ Aman Madaan and
52
+ Mounica Maddela and
53
+ Khyati Mahajan and
54
+ Saad Mahamood and
55
+ Bodhisattwa Prasad Majumder and
56
+ Pedro Henrique Martins and
57
+ Angelina McMillan{-}Major and
58
+ Simon Mille and
59
+ Emiel van Miltenburg and
60
+ Moin Nadeem and
61
+ Shashi Narayan and
62
+ Vitaly Nikolaev and
63
+ Rubungo Andre Niyongabo and
64
+ Salomey Osei and
65
+ Ankur P. Parikh and
66
+ Laura Perez{-}Beltrachini and
67
+ Niranjan Ramesh Rao and
68
+ Vikas Raunak and
69
+ Juan Diego Rodriguez and
70
+ Sashank Santhanam and
71
+ Joao Sedoc and
72
+ Thibault Sellam and
73
+ Samira Shaikh and
74
+ Anastasia Shimorina and
75
+ Marco Antonio Sobrevilla Cabezudo and
76
+ Hendrik Strobelt and
77
+ Nishant Subramani and
78
+ Wei Xu and
79
+ Diyi Yang and
80
+ Akhila Yerukola and
81
+ Jiawei Zhou},
82
+ title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and
83
+ Metrics},
84
+ journal = {CoRR},
85
+ volume = {abs/2102.01672},
86
+ year = {2021},
87
+ url = {https://arxiv.org/abs/2102.01672},
88
+ archivePrefix = {arXiv},
89
+ eprint = {2102.01672}
90
  }
91
  """
92
 
 
110
  _TASKS = {
111
  "summarization": {
112
  "mlsum": ["mlsum_de", "mlsum_es"],
113
+ "wiki_lingua": [
114
+ "wiki_lingua_es_en_v0",
115
+ "wiki_lingua_ru_en_v0",
116
+ "wiki_lingua_tr_en_v0",
117
+ "wiki_lingua_vi_en_v0",
118
+ "wiki_lingua_arabic_ar",
119
+ "wiki_lingua_chinese_zh",
120
+ "wiki_lingua_czech_cs",
121
+ "wiki_lingua_dutch_nl",
122
+ "wiki_lingua_english_en",
123
+ "wiki_lingua_french_fr",
124
+ "wiki_lingua_german_de",
125
+ "wiki_lingua_hindi_hi",
126
+ "wiki_lingua_indonesian_id",
127
+ "wiki_lingua_italian_it",
128
+ "wiki_lingua_japanese_ja",
129
+ "wiki_lingua_korean_ko",
130
+ "wiki_lingua_portuguese_pt",
131
+ "wiki_lingua_russian_ru",
132
+ "wiki_lingua_spanish_es",
133
+ "wiki_lingua_thai_th",
134
+ "wiki_lingua_turkish_tr",
135
+ "wiki_lingua_vietnamese_vi",
136
+ ],
137
  "xsum": ["xsum"],
138
  },
139
  "struct2text": {
 
155
  _URLs = {
156
  "common_gen": {
157
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/common_gen/commongen_data.zip",
158
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/common_gen.zip",
159
  },
160
  "cs_restaurants": {
161
  "train": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/train.json",
162
  "validation": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/devel.json",
163
  "test": "https://raw.githubusercontent.com/UFAL-DSG/cs_restaurant_dataset/master/test.json",
164
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/cs_restaurants.zip",
165
  },
166
  "dart": {
167
  "train": "https://raw.githubusercontent.com/Yale-LILY/dart/master/data/v1.1.1/dart-v1.1.1-full-train.json",
 
172
  "train": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/train-fixed.no-ol.csv",
173
  "validation": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/devel-fixed.no-ol.csv",
174
  "test": "https://github.com/tuetschek/e2e-cleaning/raw/master/cleaned-data/test-fixed.csv",
175
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/e2e_nlg.zip",
176
  },
177
  "mlsum_de": {
178
  "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_train.zip",
179
  "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_val.zip",
180
  "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/de_test.zip",
181
  "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
182
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_de.zip",
183
  },
184
  "mlsum_es": {
185
  "train": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_train.zip",
186
  "validation": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_val.zip",
187
  "test": "https://gitlab.lip6.fr/scialom/mlsum_data/-/raw/master/MLSUM/es_test.zip",
188
  "bad_ids": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_mlsum_bad_ids_fixed.json",
189
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/mlsum_es.zip",
190
  },
191
  "schema_guided_dialog": {
192
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd_context.zip",
193
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/schema_guided_dialog.zip",
194
  },
195
  "totto": {
196
  "data": "https://storage.googleapis.com/totto/totto_data.zip",
197
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/totto.zip",
198
  },
199
  "web_nlg_en": {
200
  "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_train.json",
201
  "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_val.json",
202
  "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_en_test.json",
203
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/web_nlg_en.zip",
204
  },
205
  "web_nlg_ru": {
206
  "train": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_train.json",
207
  "validation": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_val.json",
208
  "test": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_web_nlg/webnlg_ru_test.json",
209
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/web_nlg_ru.zip",
210
  },
211
  "wiki_auto_asset_turk": {
212
+ "train": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.tsv",
213
+ "validation": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/valid.tsv",
214
+ "test_turk": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json",
215
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip",
216
  },
217
+ "wiki_lingua_es_en_v0": {
218
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
219
  },
220
+ "wiki_lingua_ru_en_v0": {
221
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
222
  },
223
+ "wiki_lingua_tr_en_v0": {
224
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
225
  },
226
+ "wiki_lingua_vi_en_v0": {
227
  "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
228
  },
229
+ "wiki_lingua_arabic_ar": {
230
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/arabic.zip",
231
+ },
232
+ "wiki_lingua_chinese_zh": {
233
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/chinese.zip",
234
+ },
235
+ "wiki_lingua_czech_cs": {
236
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/czech.zip",
237
+ },
238
+ "wiki_lingua_dutch_nl": {
239
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/dutch.zip",
240
+ },
241
+ "wiki_lingua_english_en": {
242
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/english.zip",
243
+ },
244
+ "wiki_lingua_french_fr": {
245
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/french.zip",
246
+ },
247
+ "wiki_lingua_german_de": {
248
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/german.zip",
249
+ },
250
+ "wiki_lingua_hindi_hi": {
251
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/hindi.zip",
252
+ },
253
+ "wiki_lingua_indonesian_id": {
254
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/indonesian.zip",
255
+ },
256
+ "wiki_lingua_italian_it": {
257
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/italian.zip",
258
+ },
259
+ "wiki_lingua_japanese_ja": {
260
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/japanese.zip",
261
+ },
262
+ "wiki_lingua_korean_ko": {
263
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/korean.zip",
264
+ },
265
+ "wiki_lingua_portuguese_pt": {
266
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/portuguese.zip",
267
+ },
268
+ "wiki_lingua_russian_ru": {
269
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/russian.zip",
270
+ },
271
+ "wiki_lingua_spanish_es": {
272
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/spanish.zip",
273
+ },
274
+ "wiki_lingua_thai_th": {
275
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/thai.zip",
276
+ },
277
+ "wiki_lingua_turkish_tr": {
278
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/turkish.zip",
279
+ },
280
+ "wiki_lingua_vietnamese_vi": {
281
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/vietnamese.zip",
282
+ },
283
  "xsum": {
284
  "data": "http://bollin.inf.ed.ac.uk/public/direct/XSUM-EMNLP18-Summary-Data-Original.tar.gz",
285
  "splits": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_xsum_confidence_0.8.json",
286
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/xsum.zip",
287
  },
288
  }
289
 
290
+ # Add Asset files
291
+ _URLs["wiki_auto_asset_turk"][
292
+ "test_asset_orig"
293
+ ] = "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.orig"
294
  for i in range(10):
295
  _URLs["wiki_auto_asset_turk"][
296
  f"test_asset_{i}"
297
  ] = f"https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.{i}"
298
 
 
 
 
 
 
299
  _SGD_ACTS = [
300
  "AFFIRM",
301
  "AFFIRM_INTENT",
 
340
  BUILDER_CONFIGS = [
341
  datasets.BuilderConfig(
342
  name=conf,
343
+ version=datasets.Version("1.1.0"),
344
  description=f"GEM benchmark: {task} task, {conf} subset",
345
  )
346
  for task, dset_confs in _TASKS.items()
 
355
  features = datasets.Features(
356
  {
357
  "gem_id": datasets.Value("string"),
358
+ "gem_parent_id": datasets.Value("string"),
359
  "concept_set_id": datasets.Value("int32"),
360
  "concepts": [datasets.Value("string")],
361
  "target": datasets.Value("string"), # single target for train
 
366
  features = datasets.Features(
367
  {
368
  "gem_id": datasets.Value("string"),
369
+ "gem_parent_id": datasets.Value("string"),
370
  "dialog_act": datasets.Value("string"),
371
  "dialog_act_delexicalized": datasets.Value("string"),
372
  "target_delexicalized": datasets.Value("string"),
 
378
  features = datasets.Features(
379
  {
380
  "gem_id": datasets.Value("string"),
381
+ "gem_parent_id": datasets.Value("string"),
382
  "dart_id": datasets.Value("int32"),
383
  "tripleset": [[datasets.Value("string")]], # list of triples
384
  "subtree_was_extended": datasets.Value("bool"),
 
391
  features = datasets.Features(
392
  {
393
  "gem_id": datasets.Value("string"),
394
+ "gem_parent_id": datasets.Value("string"),
395
  "meaning_representation": datasets.Value("string"),
396
  "target": datasets.Value("string"),
397
  "references": [datasets.Value("string")],
 
401
  features = datasets.Features(
402
  {
403
  "gem_id": datasets.Value("string"),
404
+ "gem_parent_id": datasets.Value("string"),
405
  "text": datasets.Value("string"),
406
  "topic": datasets.Value("string"),
407
  "url": datasets.Value("string"),
 
415
  features = datasets.Features(
416
  {
417
  "gem_id": datasets.Value("string"),
418
+ "gem_parent_id": datasets.Value("string"),
419
  "dialog_acts": [
420
  {
421
  "act": datasets.ClassLabel(names=_SGD_ACTS),
 
423
  "values": [datasets.Value("string")],
424
  }
425
  ],
426
+ "context": [datasets.Value("string")],
427
  "dialog_id": datasets.Value("string"),
428
+ "service": datasets.Value("string"),
429
  "turn_id": datasets.Value("int32"),
430
  "prompt": datasets.Value("string"),
431
  "target": datasets.Value("string"),
 
436
  features = datasets.Features(
437
  {
438
  "gem_id": datasets.Value("string"),
439
+ "gem_parent_id": datasets.Value("string"),
440
  "totto_id": datasets.Value("int32"),
441
  "table_page_title": datasets.Value("string"),
442
  "table_webpage_url": datasets.Value("string"),
 
471
  features = datasets.Features(
472
  {
473
  "gem_id": datasets.Value("string"),
474
+ "gem_parent_id": datasets.Value("string"),
475
  "input": [datasets.Value("string")],
476
  "target": datasets.Value("string"), # single target for train
477
  "references": [datasets.Value("string")],
 
483
  features = datasets.Features(
484
  {
485
  "gem_id": datasets.Value("string"),
486
+ "gem_parent_id": datasets.Value("string"),
 
487
  "source": datasets.Value("string"),
488
  "target": datasets.Value("string"),
489
  "references": [datasets.Value("string")],
490
  }
491
  )
492
  elif self.config.name.startswith("wiki_lingua"):
493
+ if "v0" in self.config.name:
494
+ features = datasets.Features(
495
+ {
496
+ "gem_id": datasets.Value("string"),
497
+ "gem_parent_id": datasets.Value("string"),
498
+ "source": datasets.Value("string"),
499
+ "target": datasets.Value("string"),
500
+ "references": [datasets.Value("string")],
501
+ }
502
+ )
503
+ else:
504
+ ln = self.config.name.split("_")[-1]
505
+ features = datasets.Features(
506
+ {
507
+ "gem_id": datasets.Value("string"),
508
+ "gem_parent_id": datasets.Value("string"),
509
+ "source_aligned": datasets.Translation(languages=[ln, "en"]),
510
+ "target_aligned": datasets.Translation(languages=[ln, "en"]),
511
+ "source": datasets.Value("string"),
512
+ "target": datasets.Value("string"),
513
+ "references": [datasets.Value("string")],
514
+ }
515
+ )
516
  elif self.config.name == "xsum":
517
  features = datasets.Features(
518
  {
519
  "gem_id": datasets.Value("string"),
520
+ "gem_parent_id": datasets.Value("string"),
521
  "xsum_id": datasets.Value("string"),
522
  "document": datasets.Value("string"),
523
  "target": datasets.Value("string"),
 
537
  """Returns SplitGenerators."""
538
  dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
539
  if self.config.name == "common_gen":
540
+ challenge_sets = [
541
+ ("challenge_train_sample", "train_common_gen_RandomSample500.json"),
542
+ ("challenge_validation_sample", "validation_common_gen_RandomSample500.json"),
543
+ ("challenge_test_scramble", "test_common_gen_ScrambleInputStructure500.json"),
544
+ ]
545
  return [
546
  datasets.SplitGenerator(
547
  name=datasets.Split.TRAIN,
 
564
  "split": "test",
565
  },
566
  ),
567
+ ] + [
568
+ datasets.SplitGenerator(
569
+ name=challenge_split,
570
+ gen_kwargs={
571
+ "filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
572
+ "split": challenge_split,
573
+ },
574
+ )
575
+ for challenge_split, filename in challenge_sets
576
  ]
577
  elif self.config.name == "cs_restaurants":
578
+ challenge_sets = [
579
+ ("challenge_train_sample", "train_cs_restaurants_RandomSample500.json"),
580
+ ("challenge_validation_sample", "validation_cs_restaurants_RandomSample500.json"),
581
+ ("challenge_test_scramble", "test_cs_restaurants_ScrambleInputStructure500.json"),
582
+ ]
583
  return [
584
  datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
585
  for spl in ["train", "validation", "test"]
586
+ ] + [
587
+ datasets.SplitGenerator(
588
+ name=challenge_split,
589
+ gen_kwargs={
590
+ "filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
591
+ "split": challenge_split,
592
+ },
593
+ )
594
+ for challenge_split, filename in challenge_sets
595
  ]
596
  elif self.config.name == "dart":
597
  return [
 
599
  for spl in ["train", "validation", "test"]
600
  ]
601
  elif self.config.name == "e2e_nlg":
602
+ challenge_sets = [
603
+ ("challenge_train_sample", "train_e2e_nlg_RandomSample500.json"),
604
+ ("challenge_validation_sample", "validation_e2e_nlg_RandomSample500.json"),
605
+ ("challenge_test_scramble", "test_e2e_nlg_ScrambleInputStructure500.json"),
606
+ ]
607
  return [
608
  datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
609
  for spl in ["train", "validation", "test"]
610
+ ] + [
611
+ datasets.SplitGenerator(
612
+ name=challenge_split,
613
+ gen_kwargs={
614
+ "filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
615
+ "split": challenge_split,
616
+ },
617
+ )
618
+ for challenge_split, filename in challenge_sets
619
  ]
620
  elif self.config.name.startswith("mlsum"):
621
  lang = self.config.name.split("_")[1]
622
+ challenge_sets = [
623
+ ("challenge_train_sample", f"train_mlsum_{lang}_RandomSample500.json"),
624
+ ("challenge_validation_sample", f"validation_mlsum_{lang}_RandomSample500.json"),
625
+ ("challenge_test_covid", f"{lang}_test_covid19_cleaned.jsonl"),
626
+ ]
627
  return [
628
  datasets.SplitGenerator(
629
  name=datasets.Split.TRAIN,
 
652
  "filepaths": dl_dir["bad_ids"],
653
  },
654
  ),
655
+ ] + [
656
+ datasets.SplitGenerator(
657
+ name=challenge_split,
658
+ gen_kwargs={
659
+ "filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
660
+ "split": challenge_split,
661
+ },
662
+ )
663
+ for challenge_split, filename in challenge_sets
664
  ]
665
  elif self.config.name == "schema_guided_dialog":
666
+ challenge_sets = [
667
+ ("challenge_train_sample", "train_schema_guided_dialog_RandomSample500_reformatted.json"),
668
+ ("challenge_validation_sample", "validation_schema_guided_dialog_RandomSample500_reformatted.json"),
669
+ ("challenge_test_backtranslation", "test_schema_guided_dialog_BackTranslation500_reformatted.json"),
670
+ (
671
+ "challenge_test_bfp02",
672
+ "test_schema_guided_dialog_ButterFingersPerturbation_p=0.02_500_reformatted.json",
673
+ ),
674
+ (
675
+ "challenge_test_bfp05",
676
+ "test_schema_guided_dialog_ButterFingersPerturbation_p=0.05_500_reformatted.json",
677
+ ),
678
+ ("challenge_test_nopunc", "test_schema_guided_dialog_WithoutPunctuation500_reformatted.json"),
679
+ ("challenge_test_scramble", "test_schema_guided_dialog_ScrambleInputStructure500_reformatted.json"),
680
+ ]
681
  return [
682
  datasets.SplitGenerator(
683
  name=spl, gen_kwargs={"filepath": os.path.join(dl_dir["data"], "gem_sgd.json"), "split": spl}
684
  )
685
  for spl in ["train", "validation", "test"]
686
+ ] + [
687
+ datasets.SplitGenerator(
688
+ name=challenge_split,
689
+ gen_kwargs={
690
+ "filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
691
+ "split": challenge_split,
692
+ },
693
+ )
694
+ for challenge_split, filename in challenge_sets
695
  ]
696
  elif self.config.name == "totto":
697
+ challenge_sets = [
698
+ ("challenge_train_sample", "train_totto_RandomSample500.json"),
699
+ ("challenge_validation_sample", "validation_totto_RandomSample500.json"),
700
+ ("challenge_test_scramble", "test_totto_ScrambleInputStructure500.json"),
701
+ ]
702
  return [
703
  datasets.SplitGenerator(
704
  name=datasets.Split.TRAIN,
 
721
  "split": "test",
722
  },
723
  ),
724
+ ] + [
725
+ datasets.SplitGenerator(
726
+ name=challenge_split,
727
+ gen_kwargs={
728
+ "filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
729
+ "split": challenge_split,
730
+ },
731
+ )
732
+ for challenge_split, filename in challenge_sets
733
  ]
734
  elif self.config.name.startswith("web_nlg"):
735
+ ln = self.config.name.split("_")[-1]
736
+ challenge_sets = [
737
+ ("challenge_train_sample", f"train_web_nlg_{ln}_RandomSample500.json"),
738
+ ("challenge_validation_sample", f"validation_web_nlg_{ln}_RandomSample500.json"),
739
+ ("challenge_test_scramble", f"test_web_nlg_{ln}_ScrambleInputStructure500.json"),
740
+ ]
741
+ if ln == "en":
742
+ challenge_sets += [("challenge_test_numbers", f"test_web_nlg_{ln}_replace_numbers_500.json")]
743
  return [
744
  datasets.SplitGenerator(name=spl, gen_kwargs={"filepath": dl_dir[spl], "split": spl})
745
  for spl in ["train", "validation", "test"]
746
+ ] + [
747
+ datasets.SplitGenerator(
748
+ name=challenge_split,
749
+ gen_kwargs={
750
+ "filepath": os.path.join(dl_dir["challenge_set"], self.config.name, filename),
751
+ "split": challenge_split,
752
+ },
753
+ )
754
+ for challenge_split, filename in challenge_sets
755
  ]
756
  elif self.config.name == "wiki_auto_asset_turk":
757
+ challenge_sets = [
758
+ ("challenge_train_sample", "train_wiki_auto_asset_turk_RandomSample500.json"),
759
+ ("challenge_validation_sample", "validation_wiki_auto_asset_turk_RandomSample500.json"),
760
+ ("challenge_test_asset_backtranslation", "test_asset_wiki_auto_asset_turk_BackTranslation.json"),
761
+ (
762
+ "challenge_test_asset_bfp02",
763
+ "test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json",
764
+ ),
765
+ (
766
+ "challenge_test_asset_bfp05",
767
+ "test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json",
768
+ ),
769
+ ("challenge_test_asset_nopunc", "test_asset_wiki_auto_asset_turk_WithoutPunctuation.json"),
770
+ ("challenge_test_turk_backtranslation", "detok_test_turk_wiki_auto_asset_turk_BackTranslation.json"),
771
+ (
772
+ "challenge_test_turk_bfp02",
773
+ "detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json",
774
+ ),
775
+ (
776
+ "challenge_test_turk_bfp05",
777
+ "detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json",
778
+ ),
779
+ ("challenge_test_turk_nopunc", "detok_test_turk_wiki_auto_asset_turk_WithoutPunctuation.json"),
780
+ ]
781
  return [
782
  datasets.SplitGenerator(
783
  name=datasets.Split.TRAIN,
 
797
  name="test_asset",
798
  gen_kwargs={
799
  "filepath": "",
800
+ "split": "test_asset",
801
+ "filepaths": [dl_dir["test_asset_orig"]] + [dl_dir[f"test_asset_{i}"] for i in range(10)],
802
  },
803
  ),
804
  datasets.SplitGenerator(
805
  name="test_turk",
806
  gen_kwargs={
807
+ "filepath": dl_dir["test_turk"],
808
+ "split": "test_turk",
 
809
  },
810
  ),
811
+ ] + [
 
 
 
 
812
  datasets.SplitGenerator(
813
+ name=challenge_split,
814
  gen_kwargs={
815
+ "filepath": os.path.join(dl_dir["challenge_set"], "wiki_auto_asset_turk", filename),
816
+ "split": challenge_split,
817
  },
818
+ )
819
+ for challenge_split, filename in challenge_sets
 
 
 
 
 
 
 
 
 
 
 
 
 
820
  ]
821
+ elif self.config.name.startswith("wiki_lingua"):
822
+ if "v0" in self.config.name:
823
+ lang = self.config.name.split("_")[-3]
824
+ base_dir = os.path.join(dl_dir["data"], "GEM_data_crosslingual", f"{lang}_en")
825
+ return [
826
+ datasets.SplitGenerator(
827
+ name=datasets.Split.TRAIN,
828
+ gen_kwargs={
829
+ "filepath": base_dir,
830
+ "split": "train",
831
+ },
832
+ ),
833
+ datasets.SplitGenerator(
834
+ name=datasets.Split.VALIDATION,
835
+ gen_kwargs={
836
+ "filepath": base_dir,
837
+ "split": "val",
838
+ },
839
+ ),
840
+ datasets.SplitGenerator(
841
+ name=datasets.Split.TEST,
842
+ gen_kwargs={
843
+ "filepath": base_dir,
844
+ "split": "test",
845
+ },
846
+ ),
847
+ ]
848
+ else:
849
+ lang_name = self.config.name.split("_")[-2]
850
+ lang = self.config.name.split("_")[-1]
851
+ base_dir = os.path.join(dl_dir["data"], lang_name)
852
+ return [
853
+ datasets.SplitGenerator(
854
+ name=datasets.Split.TRAIN,
855
+ gen_kwargs={
856
+ "filepath": base_dir,
857
+ "split": "train",
858
+ "lang": lang,
859
+ },
860
+ ),
861
+ datasets.SplitGenerator(
862
+ name=datasets.Split.VALIDATION,
863
+ gen_kwargs={
864
+ "filepath": base_dir,
865
+ "split": "val",
866
+ "lang": lang,
867
+ },
868
+ ),
869
+ datasets.SplitGenerator(
870
+ name=datasets.Split.TEST,
871
+ gen_kwargs={
872
+ "filepath": base_dir,
873
+ "split": "test",
874
+ "lang": lang,
875
+ },
876
+ ),
877
+ ]
878
  elif self.config.name == "xsum":
879
+ challenge_sets = [
880
+ ("challenge_train_sample", "train_xsum_RandomSample500.json"),
881
+ ("challenge_validation_sample", "validation_xsum_RandomSample500.json"),
882
+ ("challenge_test_backtranslation", "test_xsum_BackTranslation500.json"),
883
+ ("challenge_test_bfp_02", "test_xsum_ButterFingersPerturbation_p=0.02_500.json"),
884
+ ("challenge_test_bfp_05", "test_xsum_ButterFingersPerturbation_p=0.05_500.json"),
885
+ ("challenge_test_nopunc", "test_xsum_WithoutPunctuation500.json"),
886
+ ("challenge_test_covid", f"en_test_covid19.jsonl"),
887
+ ]
888
  return [
889
  datasets.SplitGenerator(
890
  name=datasets.Split.TRAIN,
 
910
  "filepaths": os.path.join(dl_dir["data"], "bbc-summary-data"),
911
  },
912
  ),
913
+ ] + [
914
+ datasets.SplitGenerator(
915
+ name=challenge_split,
916
+ gen_kwargs={
917
+ "filepath": os.path.join(dl_dir["challenge_set"], "xsum", filename),
918
+ "split": challenge_split,
919
+ },
920
+ )
921
+ for challenge_split, filename in challenge_sets
922
  ]
923
 
924
  def _generate_examples(self, filepath, split, filepaths=None, lang=None):
925
  """ Yields examples. """
926
  if self.config.name == "common_gen":
927
+ if split.startswith("challenge"):
928
+ exples = json.load(open(filepath, encoding="utf-8"))
929
+ if isinstance(exples, dict):
930
+ assert len(exples) == 1, "multiple entries found"
931
+ exples = list(exples.values())[0]
932
+ for id_, exple in enumerate(exples):
933
+ if len(exple) == 0:
934
+ continue
935
+ exple["gem_parent_id"] = exple["gem_id"]
936
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
937
+ yield id_, exple
938
+ else:
939
+ with open(filepath, encoding="utf-8") as f:
940
+ id_ = -1
941
+ i = -1
942
+ for row in f:
943
+ row = row.replace(", }", "}") # Fix possible JSON format error
944
+ data = json.loads(row)
945
+ concepts = [word for word in data["concept_set"].split("#")]
946
+ if split == "train":
947
+ i += 1
948
+ for scene in data["scene"]:
949
+ id_ += 1
950
+ yield id_, {
951
+ "gem_id": f"{self.config.name}-{split}-{id_}",
952
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
953
+ "concept_set_id": i,
954
+ "concepts": concepts,
955
+ "target": scene,
956
+ "references": [],
957
+ }
958
+ else:
959
  id_ += 1
960
  yield id_, {
961
  "gem_id": f"{self.config.name}-{split}-{id_}",
962
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
963
+ "concept_set_id": id_,
964
  "concepts": concepts,
965
+ "target": "" if split == "test" else data["scene"][0],
966
+ "references": [] if split == "test" else data["scene"],
967
  }
968
+ elif self.config.name == "cs_restaurants":
969
+ if split.startswith("challenge"):
970
+ exples = json.load(open(filepath, encoding="utf-8"))
971
+ if isinstance(exples, dict):
972
+ assert len(exples) == 1, "multiple entries found"
973
+ exples = list(exples.values())[0]
974
+ for id_, exple in enumerate(exples):
975
+ if len(exple) == 0:
976
+ continue
977
+ exple["gem_parent_id"] = exple["gem_id"]
978
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
979
+ yield id_, exple
980
+ else:
981
+ with open(filepath, encoding="utf8") as f:
982
+ data = json.load(f)
983
+ for id_, instance in enumerate(data):
984
  yield id_, {
985
  "gem_id": f"{self.config.name}-{split}-{id_}",
986
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
987
+ "dialog_act": instance["da"],
988
+ "dialog_act_delexicalized": instance["delex_da"],
989
+ "target": instance["text"],
990
+ "target_delexicalized": instance["delex_text"],
991
+ "references": [] if split == "train" else [instance["text"]],
992
  }
 
 
 
 
 
 
 
 
 
 
 
 
993
  elif self.config.name == "dart":
994
  with open(filepath, encoding="utf-8") as f:
995
  data = json.loads(f.read())
 
1002
  id_ += 1
1003
  yield id_, {
1004
  "gem_id": f"{self.config.name}-{split}-{id_}",
1005
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1006
  "dart_id": i,
1007
  "tripleset": example["tripleset"],
1008
  "subtree_was_extended": example.get("subtree_was_extended", None), # some are missing
 
1014
  id_ += 1
1015
  yield id_, {
1016
  "gem_id": f"{self.config.name}-{split}-{id_}",
1017
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1018
  "dart_id": id_,
1019
  "tripleset": example["tripleset"],
1020
  "subtree_was_extended": example.get("subtree_was_extended", None), # some are missing
 
1023
  "references": [annotation["text"] for annotation in example["annotations"]],
1024
  }
1025
  elif self.config.name == "e2e_nlg":
1026
+ if split.startswith("challenge"):
1027
+ exples = json.load(open(filepath, encoding="utf-8"))
1028
+ if isinstance(exples, dict):
1029
+ assert len(exples) == 1, "multiple entries found"
1030
+ exples = list(exples.values())[0]
1031
+ for id_, exple in enumerate(exples):
1032
+ if len(exple) == 0:
 
 
 
 
 
 
 
 
 
 
1033
  continue
1034
+ exple["gem_parent_id"] = exple["gem_id"]
1035
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
1036
+ yield id_, exple
1037
+ else:
1038
+ with open(filepath, encoding="utf-8") as f:
1039
+ reader = csv.DictReader(f)
1040
+ for id_, example in enumerate(reader):
1041
  yield id_, {
1042
  "gem_id": f"{self.config.name}-{split}-{id_}",
1043
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1044
+ "meaning_representation": example["mr"],
1045
+ "target": example["ref"],
1046
+ "references": [] if split == "train" else [example["ref"]],
 
 
 
1047
  }
1048
+ elif self.config.name.startswith("mlsum"):
1049
+ if split in ["train", "validation", "test", "challenge_test_covid"]:
1050
+ if split == "challenge_test_covid":
1051
+ bad_ids = {}
1052
+ else:
1053
+ bad_ids_dct = json.load(open(filepaths, encoding="utf-8"))
1054
+ bad_ids = dict((bad_url, True) for _, bad_url in bad_ids_dct[f"{lang}-{split}"])
1055
+ with open(filepath, encoding="utf-8") as f:
1056
+ id_ = -1
1057
+ for line in f:
1058
+ data = json.loads(line)
1059
+ if data["url"] in bad_ids:
1060
+ continue
1061
+ else:
1062
+ id_ += 1
1063
+ yield id_, {
1064
+ "gem_id": f"{self.config.name}-{split}-{id_}",
1065
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1066
+ "text": data["text"],
1067
+ "target": data["summary"],
1068
+ "references": [] if split == "train" else [data["summary"]],
1069
+ "topic": data["topic"],
1070
+ "url": data["url"],
1071
+ "title": data["title"],
1072
+ "date": data["date"],
1073
+ }
1074
+ else:
1075
+ exples = json.load(open(filepath, encoding="utf-8"))
1076
+ if isinstance(exples, dict):
1077
+ assert len(exples) == 1, "multiple entries found"
1078
+ exples = list(exples.values())[0]
1079
+ for id_, exple in enumerate(exples):
1080
+ if len(exple) == 0:
1081
+ continue
1082
+ exple["gem_parent_id"] = exple["gem_id"]
1083
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
1084
+ yield id_, exple
1085
  elif self.config.name == "schema_guided_dialog":
1086
+ if "challenge" in split:
1087
+ exples = json.load(open(filepath, encoding="utf-8"))
1088
+ if isinstance(exples, dict):
1089
+ assert len(exples) == 1, "multiple entries found"
1090
+ exples = list(exples.values())[0]
1091
+ for id_, exple in enumerate(exples):
1092
+ if len(exple) == 0:
1093
+ continue
1094
+ exple["gem_parent_id"] = exple["gem_id"]
1095
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
1096
+ yield id_, exple
1097
+ else:
1098
+ examples = json.load(open(filepath, encoding="utf-8"))[split]
1099
+ for id_, example in enumerate(examples):
1100
+ yield id_, {
1101
+ "gem_id": f"{self.config.name}-{split}-{id_}",
1102
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1103
+ "dialog_acts": [
1104
+ {
1105
+ "act": act_id,
1106
+ "slot": slot,
1107
+ "values": values,
1108
+ }
1109
+ for act_id, slot, values in example["da"]
1110
+ ],
1111
+ "context": example["context"],
1112
+ "dialog_id": example["dialog_id"],
1113
+ "service": example["service"],
1114
+ "turn_id": example["turn_ix"],
1115
+ "prompt": example["prompt"],
1116
+ "target": example["target"],
1117
+ "references": [] if split == "train" else [example["target"]],
1118
+ }
1119
  elif self.config.name == "totto":
1120
+ if "challenge" in split:
1121
+ exples = json.load(open(filepath, encoding="utf-8"))
1122
+ if isinstance(exples, dict):
1123
+ assert len(exples) == 1, "multiple entries found"
1124
+ exples = list(exples.values())[0]
1125
+ for id_, exple in enumerate(exples):
1126
+ if len(exple) == 0:
1127
+ continue
1128
+ exple["gem_parent_id"] = exple["gem_id"]
1129
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
1130
+ yield id_, exple
1131
+ else:
1132
+ with open(filepath, "r", encoding="utf-8") as json_file:
1133
+ json_list = list(json_file)
1134
+ id_ = -1
1135
+ i = -1
1136
+ for json_str in json_list:
1137
+ result = json.loads(json_str)
1138
+ if split == "train":
1139
+ i += 1
1140
+ for sentence in result["sentence_annotations"]:
1141
+ id_ += 1
1142
+ response = {
1143
+ "gem_id": f"{self.config.name}-{split}-{id_}",
1144
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1145
+ "totto_id": i,
1146
+ "table_page_title": result["table_page_title"],
1147
+ "table_webpage_url": result["table_webpage_url"],
1148
+ "table_section_title": result["table_section_title"],
1149
+ "table_section_text": result["table_section_text"],
1150
+ "table": result["table"],
1151
+ "highlighted_cells": result["highlighted_cells"],
1152
+ "example_id": str(result["example_id"]),
1153
+ "overlap_subset": "none",
1154
+ "sentence_annotations": [sentence],
1155
+ "references": [],
1156
+ "target": sentence["final_sentence"],
1157
+ }
1158
+ yield id_, response
1159
+ else:
1160
  id_ += 1
1161
  response = {
1162
  "gem_id": f"{self.config.name}-{split}-{id_}",
1163
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1164
+ "totto_id": id_,
1165
  "table_page_title": result["table_page_title"],
1166
  "table_webpage_url": result["table_webpage_url"],
1167
  "table_section_title": result["table_section_title"],
 
1169
  "table": result["table"],
1170
  "highlighted_cells": result["highlighted_cells"],
1171
  "example_id": str(result["example_id"]),
1172
+ "overlap_subset": str(result["overlap_subset"]),
 
 
 
1173
  }
1174
+ response["sentence_annotations"] = [] if split == "test" else result["sentence_annotations"]
1175
+ response["references"] = [
1176
+ sentence["final_sentence"] for sentence in response["sentence_annotations"]
1177
+ ]
1178
+ response["target"] = response["references"][0] if len(response["references"]) > 0 else ""
1179
  yield id_, response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
  elif self.config.name.startswith("web_nlg"):
1181
+ if "challenge" in split:
1182
+ exples = json.load(open(filepath, encoding="utf-8"))
1183
+ if isinstance(exples, dict):
1184
+ assert len(exples) == 1, "multiple entries found"
1185
+ exples = list(exples.values())[0]
1186
+ for id_, exple in enumerate(exples):
1187
+ if len(exple) == 0:
1188
+ continue
1189
+ exple["gem_parent_id"] = exple["gem_id"]
1190
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
1191
+ yield id_, exple
1192
+ else:
1193
+ with open(filepath, encoding="utf-8") as f:
1194
+ examples = json.load(f)
1195
+ id_ = -1
1196
+ for example in examples["values"]:
1197
+ if split == "train":
1198
+ for target in example["target"]:
1199
+ id_ += 1
1200
+ yield id_, {
1201
+ "gem_id": f"{self.config.name}-{split}-{id_}",
1202
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1203
+ "input": example["input"],
1204
+ "target": target,
1205
+ "references": [] if split == "train" else example["target"],
1206
+ "category": example["category"],
1207
+ "webnlg_id": example["webnlg-id"],
1208
+ }
1209
+ else:
1210
  id_ += 1
1211
  yield id_, {
1212
  "gem_id": f"{self.config.name}-{split}-{id_}",
1213
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1214
  "input": example["input"],
1215
+ "target": example["target"][0] if len(example["target"]) > 0 else "",
1216
+ "references": example["target"],
1217
  "category": example["category"],
1218
  "webnlg_id": example["webnlg-id"],
1219
  }
 
 
 
 
 
 
 
 
 
 
1220
  elif self.config.name == "wiki_auto_asset_turk":
1221
  if split in ["train", "validation"]:
1222
  keys = [
 
 
 
1223
  "source",
1224
+ "target",
1225
  ]
1226
  with open(filepath, encoding="utf-8") as f:
1227
  for id_, line in enumerate(f):
1228
  values = line.strip().split("\t")
1229
+ assert len(values) == 2, f"Not enough fields in ---- {line} --- {values}"
1230
+ example = dict([(k, val) for k, val in zip(keys, values)])
1231
  example["gem_id"] = f"{self.config.name}-{split}-{id_}"
1232
+ example["gem_parent_id"] = example["gem_id"]
1233
  example["references"] = [] if split == "train" else [example["target"]]
1234
  yield id_, example
1235
+ elif split == "test_turk":
1236
+ examples = json.load(open(filepath, encoding="utf-8"))
1237
+ for id_, example in enumerate(examples):
1238
+ example["gem_parent_id"] = example["gem_id"]
1239
+ for k in ["source_id", "target_id"]:
1240
+ if k in example:
1241
+ del example[k]
1242
+ yield id_, example
1243
+ elif split == "test_asset":
1244
  files = [open(f_name, encoding="utf-8") for f_name in filepaths]
1245
  for id_, lines in enumerate(zip(*files)):
1246
  yield id_, {
1247
  "gem_id": f"{self.config.name}-{split}-{id_}",
1248
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
 
1249
  "target": lines[1].strip(),
1250
  "source": lines[0].strip(),
1251
  "references": [line.strip() for line in lines[1:]],
1252
  }
1253
+ else:
1254
+ exples = json.load(open(filepath, encoding="utf-8"))
1255
+ if isinstance(exples, dict):
1256
+ assert len(exples) == 1, "multiple entries found"
1257
+ exples = list(exples.values())[0]
1258
+ for id_, exple in enumerate(exples):
1259
+ exple["gem_parent_id"] = exple["gem_id"]
1260
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
1261
+ for k in ["source_id", "target_id"]:
1262
+ if k in exple:
1263
+ del exple[k]
1264
+ yield id_, exple
1265
  elif self.config.name.startswith("wiki_lingua"):
1266
+ if "v0" in self.config.name:
1267
+ with open(os.path.join(filepath, f"{split}.src"), encoding="utf-8") as f_in:
1268
+ with open(os.path.join(filepath, f"{split}.tgt"), encoding="utf-8") as f_out:
1269
+ for id_, (src, tgt) in enumerate(zip(f_in, f_out)):
1270
+ yield id_, {
1271
+ "gem_id": f"{self.config.name}-{split}-{id_}",
1272
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1273
+ "source": src.strip(),
1274
+ "target": tgt.strip(),
1275
+ "references": [] if split == "train" else [tgt.strip()],
1276
+ }
1277
+ else:
1278
+ with open(os.path.join(filepath, f"{split}.src.{lang}"), encoding="utf-8") as f_in_ln:
1279
+ with open(os.path.join(filepath, f"{split}.src.en"), encoding="utf-8") as f_in_en:
1280
+ with open(os.path.join(filepath, f"{split}.tgt.{lang}"), encoding="utf-8") as f_out_ln:
1281
+ with open(os.path.join(filepath, f"{split}.tgt.en"), encoding="utf-8") as f_out_en:
1282
+ for id_, (src_ln, src_en, tgt_ln, tgt_en) in enumerate(
1283
+ zip(f_in_ln, f_in_en, f_out_ln, f_out_en)
1284
+ ):
1285
+ yield id_, {
1286
+ "gem_id": f"{self.config.name}-{split}-{id_}",
1287
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1288
+ "source_aligned": {lang: src_ln.strip(), "en": src_en.strip()},
1289
+ "target_aligned": {lang: tgt_ln.strip(), "en": tgt_en.strip()},
1290
+ "source": src_ln.strip(),
1291
+ "target": tgt_en.strip(),
1292
+ "references": [] if split == "train" else [tgt_en.strip()],
1293
+ }
1294
+ elif self.config.name == "xsum":
1295
+ if "challenge" in split:
1296
+ if "covid" in split:
1297
+ with open(filepath, encoding="utf-8") as f:
1298
+ id_ = -1
1299
+ for line in f:
1300
+ data = json.loads(line)
1301
+ id_ += 1
1302
+ yield id_, {
1303
+ "gem_id": f"{self.config.name}-{split}-{id_}",
1304
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1305
+ "xsum_id": data["url"],
1306
+ "document": data["text"],
1307
+ "target": data["summary"],
1308
+ "references": [] if split == "train" else [data["summary"]],
1309
+ }
1310
+ else:
1311
+ exples = json.load(open(filepath, encoding="utf-8"))
1312
+ if isinstance(exples, dict):
1313
+ assert len(exples) == 1, "multiple entries found"
1314
+ exples = list(exples.values())[0]
1315
+ for id_, exple in enumerate(exples):
1316
+ exple["gem_parent_id"] = exple["gem_id"]
1317
+ exple["gem_id"] = f"{self.config.name}-{split}-{id_}"
1318
+ yield id_, exple
1319
+ else:
1320
+ with open(filepath, "r", encoding="utf-8") as f:
1321
+ split_ids = json.load(f)
1322
+ for id_, i in enumerate(split_ids[split]):
1323
+ with open(os.path.join(filepaths, i + ".summary"), "r", encoding="utf-8") as f:
1324
+ text = "".join(
1325
+ [line for line in f.readlines() if line not in _XSUM_REMOVE_LINES and line.strip()]
1326
+ )
1327
+ segs = text.split("[SN]")
1328
  yield id_, {
1329
  "gem_id": f"{self.config.name}-{split}-{id_}",
1330
+ "gem_parent_id": f"{self.config.name}-{split}-{id_}",
1331
+ "xsum_id": i,
1332
+ "document": segs[8].strip(),
1333
+ "target": segs[6].strip(),
1334
+ "references": [] if split == "train" else [segs[6].strip()],
1335
  }