Datasets:
GEM
/

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
3fea61a
1 Parent(s): 44256ca

Delete loading script

Browse files
Files changed (1) hide show
  1. wiki_auto_asset_turk.py +0 -246
wiki_auto_asset_turk.py DELETED
@@ -1,246 +0,0 @@
1
- import csv
2
- import json
3
- import os
4
- import datasets
5
-
6
- _CITATION = """\
7
- @inproceedings{jiang-etal-2020-neural,
8
- title = "Neural {CRF} Model for Sentence Alignment in Text Simplification",
9
- author = "Jiang, Chao and
10
- Maddela, Mounica and
11
- Lan, Wuwei and
12
- Zhong, Yang and
13
- Xu, Wei",
14
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
15
- month = jul,
16
- year = "2020",
17
- address = "Online",
18
- publisher = "Association for Computational Linguistics",
19
- url = "https://www.aclweb.org/anthology/2020.acl-main.709",
20
- doi = "10.18653/v1/2020.acl-main.709",
21
- pages = "7943--7960",
22
- }
23
- """
24
-
25
- _DESCRIPTION = """\
26
- WikiAuto provides a set of aligned sentences from English Wikipedia and Simple
27
- English Wikipedia as a resource to train sentence simplification systems.
28
-
29
- The authors first crowd-sourced a set of manual alignments between sentences in
30
- a subset of the Simple English Wikipedia and their corresponding versions in
31
- English Wikipedia (this corresponds to the manual config in this version of the
32
- dataset), then trained a neural CRF system to predict these alignments.
33
-
34
- The trained alignment prediction model was then applied to the other articles in
35
- Simple English Wikipedia with an English counterpart to create a larger corpus
36
- of aligned sentences (corresponding to the auto and auto_acl configs here).
37
- """
38
-
39
- _URLs = {
40
- "train": "train.tsv",
41
- "validation": "valid.tsv",
42
- "test_turk": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json",
43
- "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip",
44
- "test_contract": "benchmarks/contract-benchmark.tsv",
45
- "test_wiki": "benchmarks/wiki-benchmark.tsv",
46
- }
47
-
48
- # Add Asset files.
49
- _URLs[
50
- "test_asset_orig"
51
- ] = "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.orig"
52
- for i in range(10):
53
- _URLs[
54
- f"test_asset_{i}"
55
- ] = f"https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.simp.{i}"
56
-
57
-
58
- class WikiAuto(datasets.GeneratorBasedBuilder):
59
- VERSION = datasets.Version("1.0.0")
60
- DEFAULT_CONFIG_NAME = "wiki_auto_asset_turk"
61
-
62
- def _info(self):
63
- features = datasets.Features(
64
- {
65
- "gem_id": datasets.Value("string"),
66
- "gem_parent_id": datasets.Value("string"),
67
- "source": datasets.Value("string"),
68
- "target": datasets.Value("string"),
69
- "references": [datasets.Value("string")],
70
- }
71
- )
72
-
73
- return datasets.DatasetInfo(
74
- description=_DESCRIPTION,
75
- features=features,
76
- supervised_keys=datasets.info.SupervisedKeysData(
77
- input="source", output="target"
78
- ),
79
- homepage="",
80
- citation=_CITATION,
81
- )
82
-
83
- def _split_generators(self, dl_manager):
84
- """Returns SplitGenerators."""
85
- dl_dir = dl_manager.download_and_extract(_URLs)
86
- challenge_sets = [
87
- (
88
- "challenge_train_sample",
89
- "train_wiki_auto_asset_turk_RandomSample500.json",
90
- ),
91
- (
92
- "challenge_validation_sample",
93
- "validation_wiki_auto_asset_turk_RandomSample500.json",
94
- ),
95
- (
96
- "challenge_test_asset_backtranslation",
97
- "test_asset_wiki_auto_asset_turk_BackTranslation.json",
98
- ),
99
- (
100
- "challenge_test_asset_bfp02",
101
- "test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json",
102
- ),
103
- (
104
- "challenge_test_asset_bfp05",
105
- "test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json",
106
- ),
107
- (
108
- "challenge_test_asset_nopunc",
109
- "test_asset_wiki_auto_asset_turk_WithoutPunctuation.json",
110
- ),
111
- (
112
- "challenge_test_turk_backtranslation",
113
- "detok_test_turk_wiki_auto_asset_turk_BackTranslation.json",
114
- ),
115
- (
116
- "challenge_test_turk_bfp02",
117
- "detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json",
118
- ),
119
- (
120
- "challenge_test_turk_bfp05",
121
- "detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json",
122
- ),
123
- (
124
- "challenge_test_turk_nopunc",
125
- "detok_test_turk_wiki_auto_asset_turk_WithoutPunctuation.json",
126
- ),
127
- ]
128
- return [
129
- datasets.SplitGenerator(
130
- name=datasets.Split.TRAIN,
131
- gen_kwargs={
132
- "filepath": dl_dir["train"],
133
- "split": "train",
134
- },
135
- ),
136
- datasets.SplitGenerator(
137
- name=datasets.Split.VALIDATION,
138
- gen_kwargs={
139
- "filepath": dl_dir["validation"],
140
- "split": "validation",
141
- },
142
- ),
143
- datasets.SplitGenerator(
144
- name="test_asset",
145
- gen_kwargs={
146
- "filepath": "",
147
- "split": "test_asset",
148
- "filepaths": [dl_dir["test_asset_orig"]]
149
- + [dl_dir[f"test_asset_{i}"] for i in range(10)],
150
- },
151
- ),
152
- datasets.SplitGenerator(
153
- name="test_turk",
154
- gen_kwargs={
155
- "filepath": dl_dir["test_turk"],
156
- "split": "test_turk",
157
- },
158
- ),
159
- datasets.SplitGenerator(
160
- name="test_contract",
161
- gen_kwargs={
162
- "filepath": dl_dir["test_contract"],
163
- "split": "test_contract",
164
- },
165
- ),
166
- datasets.SplitGenerator(
167
- name="test_wiki",
168
- gen_kwargs={
169
- "filepath": dl_dir["test_wiki"],
170
- "split": "test_wiki",
171
- },
172
- ),
173
- ] + [
174
- datasets.SplitGenerator(
175
- name=challenge_split,
176
- gen_kwargs={
177
- "filepath": os.path.join(
178
- dl_dir["challenge_set"], "wiki_auto_asset_turk", filename
179
- ),
180
- "split": challenge_split,
181
- },
182
- )
183
- for challenge_split, filename in challenge_sets
184
- ]
185
-
186
- def _generate_examples(self, filepath, split, filepaths=None, lang=None):
187
- """Yields examples."""
188
- if split in ["train", "validation"]:
189
- keys = [
190
- "source",
191
- "target",
192
- ]
193
- with open(filepath, encoding="utf-8") as f:
194
- for id_, line in enumerate(f):
195
- values = line.strip().split("\t")
196
- assert (
197
- len(values) == 2
198
- ), f"Not enough fields in ---- {line} --- {values}"
199
- example = dict([(k, val) for k, val in zip(keys, values)])
200
- example["gem_id"] = f"wiki_auto_asset_turk-{split}-{id_}"
201
- example["gem_parent_id"] = example["gem_id"]
202
- example["references"] = (
203
- [] if split == "train" else [example["target"]]
204
- )
205
- yield id_, example
206
- elif split == "test_turk":
207
- examples = json.load(open(filepath, encoding="utf-8"))
208
- for id_, example in enumerate(examples):
209
- example["gem_parent_id"] = example["gem_id"]
210
- for k in ["source_id", "target_id"]:
211
- if k in example:
212
- del example[k]
213
- yield id_, example
214
- elif split == "test_asset":
215
- files = [open(f_name, encoding="utf-8") for f_name in filepaths]
216
- for id_, lines in enumerate(zip(*files)):
217
- yield id_, {
218
- "gem_id": f"wiki_auto_asset_turk-{split}-{id_}",
219
- "gem_parent_id": f"wiki_auto_asset_turk-{split}-{id_}",
220
- "target": lines[1].strip(),
221
- "source": lines[0].strip(),
222
- "references": [line.strip() for line in lines[1:]],
223
- }
224
- elif split == "test_wiki" or split == "test_contract":
225
- with open(filepath, 'r') as f:
226
- reader = csv.DictReader(f, delimiter="\t")
227
- for id_, entry in enumerate(reader):
228
- yield id_, {
229
- "gem_id": f"wiki_auto_asset_turk-{split}-{id_}",
230
- "gem_parent_id": f"wiki_auto_asset_turk-{split}-{id_}",
231
- "target": entry["simple"],
232
- "source": entry["complex"],
233
- "references": [entry["simple"]],
234
- }
235
- else:
236
- exples = json.load(open(filepath, encoding="utf-8"))
237
- if isinstance(exples, dict):
238
- assert len(exples) == 1, "multiple entries found"
239
- exples = list(exples.values())[0]
240
- for id_, exple in enumerate(exples):
241
- exple["gem_parent_id"] = exple["gem_id"]
242
- exple["gem_id"] = f"wiki_auto_asset_turk-{split}-{id_}"
243
- for k in ["source_id", "target_id"]:
244
- if k in exple:
245
- del exple[k]
246
- yield id_, exple