albertvillanova HF staff commited on
Commit
4169d92
1 Parent(s): 39f7d3f

Delete loading script

Browse files
Files changed (1) hide show
  1. asset.py +0 -174
asset.py DELETED
@@ -1,174 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ASSET: a dataset for sentence simplification evaluation"""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{alva-manchego-etal-2020-asset,
25
- title = "{ASSET}: {A} Dataset for Tuning and Evaluation of Sentence Simplification Models with Multiple Rewriting Transformations",
26
- author = "Alva-Manchego, Fernando and
27
- Martin, Louis and
28
- Bordes, Antoine and
29
- Scarton, Carolina and
30
- Sagot, Benoit and
31
- Specia, Lucia",
32
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
33
- month = jul,
34
- year = "2020",
35
- address = "Online",
36
- publisher = "Association for Computational Linguistics",
37
- url = "https://www.aclweb.org/anthology/2020.acl-main.424",
38
- pages = "4668--4679",
39
- }
40
- """
41
-
42
- _DESCRIPTION = """\
43
- ASSET is a dataset for evaluating Sentence Simplification systems with multiple rewriting transformations,
44
- as described in "ASSET: A Dataset for Tuning and Evaluation of Sentence Simplification Models with Multiple Rewriting Transformations".
45
- The corpus is composed of 2000 validation and 359 test original sentences that were each simplified 10 times by different annotators.
46
- The corpus also contains human judgments of meaning preservation, fluency and simplicity for the outputs of several automatic text simplification systems.
47
- """
48
-
49
- _HOMEPAGE = "https://github.com/facebookresearch/asset"
50
-
51
- _LICENSE = "Creative Common Attribution-NonCommercial 4.0 International"
52
-
53
- _URL_LIST = [
54
- (
55
- "human_ratings.csv",
56
- "https://raw.githubusercontent.com/facebookresearch/asset/main/human_ratings/human_ratings.csv",
57
- ),
58
- (
59
- "asset.valid.orig",
60
- "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.valid.orig",
61
- ),
62
- (
63
- "asset.test.orig",
64
- "https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.test.orig",
65
- ),
66
- ]
67
- _URL_LIST += [
68
- (
69
- f"asset.{spl}.simp.{i}",
70
- f"https://raw.githubusercontent.com/facebookresearch/asset/main/dataset/asset.{spl}.simp.{i}",
71
- )
72
- for spl in ["valid", "test"]
73
- for i in range(10)
74
- ]
75
-
76
- _URLs = dict(_URL_LIST)
77
-
78
-
79
- class Asset(datasets.GeneratorBasedBuilder):
80
-
81
- VERSION = datasets.Version("1.0.0")
82
-
83
- BUILDER_CONFIGS = [
84
- datasets.BuilderConfig(
85
- name="simplification",
86
- version=VERSION,
87
- description="A set of original sentences aligned with 10 possible simplifications for each.",
88
- ),
89
- datasets.BuilderConfig(
90
- name="ratings",
91
- version=VERSION,
92
- description="Human ratings of automatically produced text implification.",
93
- ),
94
- ]
95
-
96
- DEFAULT_CONFIG_NAME = "simplification"
97
-
98
- def _info(self):
99
- if self.config.name == "simplification":
100
- features = datasets.Features(
101
- {
102
- "original": datasets.Value("string"),
103
- "simplifications": datasets.Sequence(datasets.Value("string")),
104
- }
105
- )
106
- else:
107
- features = datasets.Features(
108
- {
109
- "original": datasets.Value("string"),
110
- "simplification": datasets.Value("string"),
111
- "original_sentence_id": datasets.Value("int32"),
112
- "aspect": datasets.ClassLabel(names=["meaning", "fluency", "simplicity"]),
113
- "worker_id": datasets.Value("int32"),
114
- "rating": datasets.Value("int32"),
115
- }
116
- )
117
- return datasets.DatasetInfo(
118
- description=_DESCRIPTION,
119
- features=features,
120
- supervised_keys=None,
121
- homepage=_HOMEPAGE,
122
- license=_LICENSE,
123
- citation=_CITATION,
124
- )
125
-
126
- def _split_generators(self, dl_manager):
127
- data_dir = dl_manager.download_and_extract(_URLs)
128
- if self.config.name == "simplification":
129
- return [
130
- datasets.SplitGenerator(
131
- name=datasets.Split.VALIDATION,
132
- gen_kwargs={
133
- "filepaths": data_dir,
134
- "split": "valid",
135
- },
136
- ),
137
- datasets.SplitGenerator(
138
- name=datasets.Split.TEST,
139
- gen_kwargs={"filepaths": data_dir, "split": "test"},
140
- ),
141
- ]
142
- else:
143
- return [
144
- datasets.SplitGenerator(
145
- name="full",
146
- gen_kwargs={
147
- "filepaths": data_dir,
148
- "split": "full",
149
- },
150
- ),
151
- ]
152
-
153
- def _generate_examples(self, filepaths, split):
154
- """Yields examples."""
155
- if self.config.name == "simplification":
156
- files = [open(filepaths[f"asset.{split}.orig"], encoding="utf-8")] + [
157
- open(filepaths[f"asset.{split}.simp.{i}"], encoding="utf-8") for i in range(10)
158
- ]
159
- for id_, lines in enumerate(zip(*files)):
160
- yield id_, {
161
- "original": lines[0].strip(),
162
- "simplifications": [line.strip() for line in lines[1:]],
163
- }
164
- else:
165
- with open(filepaths["human_ratings.csv"], encoding="utf-8") as f:
166
- reader = csv.reader(f, delimiter=",")
167
- for id_, row in enumerate(reader):
168
- if id_ == 0:
169
- keys = row[:]
170
- else:
171
- res = dict([(k, v) for k, v in zip(keys, row)])
172
- for k in ["original_sentence_id", "worker_id", "rating"]:
173
- res[k] = int(res[k])
174
- yield (id_ - 1), res