albertvillanova HF staff commited on
Commit
beaba5b
1 Parent(s): 4972d53

Delete loading script

Browse files
Files changed (1) hide show
  1. stsb_multi_mt.py +0 -196
stsb_multi_mt.py DELETED
@@ -1,196 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """These are different multilingual translations and the English original of the STSbenchmark dataset."""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @InProceedings{huggingface:dataset:stsb_multi_mt,
25
- title = {Machine translated multilingual STS benchmark dataset.},
26
- author={Philip May},
27
- year={2021},
28
- url={https://github.com/PhilipMay/stsb-multi-mt}
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- These are different multilingual translations and the English original of the STSbenchmark dataset. \
34
- Translation has been done with deepl.com.
35
- """
36
-
37
- _HOMEPAGE = "https://github.com/PhilipMay/stsb-multi-mt"
38
-
39
- _LICENSE = "custom license - see project page"
40
-
41
- _BASE_URL = "https://raw.githubusercontent.com/PhilipMay/stsb-multi-mt/main/data"
42
-
43
-
44
- class StsbMultiMt(datasets.GeneratorBasedBuilder):
45
- """These are different multilingual translations and the English original of the STSbenchmark dataset.
46
-
47
- Translation has been done with deepl.com.
48
- """
49
-
50
- VERSION = datasets.Version("1.0.0")
51
-
52
- BUILDER_CONFIGS = [
53
- datasets.BuilderConfig(
54
- name="en",
55
- version=VERSION,
56
- description="This is the original English STS benchmark data set.",
57
- ),
58
- datasets.BuilderConfig(
59
- name="de",
60
- version=VERSION,
61
- description="This is the German STS benchmark data set.",
62
- ),
63
- datasets.BuilderConfig(
64
- name="es",
65
- version=VERSION,
66
- description="This is the Spanish STS benchmark data set.",
67
- ),
68
- datasets.BuilderConfig(
69
- name="fr",
70
- version=VERSION,
71
- description="This is the French STS benchmark data set.",
72
- ),
73
- datasets.BuilderConfig(
74
- name="it",
75
- version=VERSION,
76
- description="This is the Italian STS benchmark data set.",
77
- ),
78
- # here seems to be an issue - see https://github.com/PhilipMay/stsb-multi-mt/issues/1
79
- # datasets.BuilderConfig(name="ja", version=VERSION, description="This is the Japanese STS benchmark data set."),
80
- datasets.BuilderConfig(
81
- name="nl",
82
- version=VERSION,
83
- description="This is the Dutch STS benchmark data set.",
84
- ),
85
- datasets.BuilderConfig(
86
- name="pl",
87
- version=VERSION,
88
- description="This is the Polish STS benchmark data set.",
89
- ),
90
- datasets.BuilderConfig(
91
- name="pt",
92
- version=VERSION,
93
- description="This is the Portuguese STS benchmark data set.",
94
- ),
95
- datasets.BuilderConfig(
96
- name="ru",
97
- version=VERSION,
98
- description="This is the Russian STS benchmark data set.",
99
- ),
100
- datasets.BuilderConfig(
101
- name="zh",
102
- version=VERSION,
103
- description="This is the Chinese (simplified) STS benchmark data set.",
104
- ),
105
- ]
106
-
107
- def _info(self):
108
- features = datasets.Features(
109
- {
110
- "sentence1": datasets.Value("string"),
111
- "sentence2": datasets.Value("string"),
112
- "similarity_score": datasets.Value("float"),
113
- }
114
- )
115
- return datasets.DatasetInfo(
116
- # This is the description that will appear on the datasets page.
117
- description=_DESCRIPTION,
118
- # This defines the different columns of the dataset and their types
119
- features=features, # Here we define them above because they are different between the two configurations
120
- # If there's a common (input, target) tuple from the features,
121
- # specify them here. They'll be used if as_supervised=True in
122
- # builder.as_dataset.
123
- supervised_keys=None,
124
- # Homepage of the dataset for documentation
125
- homepage=_HOMEPAGE,
126
- # License for the dataset if available
127
- license=_LICENSE,
128
- # Citation for the dataset
129
- citation=_CITATION,
130
- )
131
-
132
- def _split_generators(self, dl_manager):
133
- """Returns SplitGenerators."""
134
- urls_to_download = {
135
- "train": f"{_BASE_URL}/stsb-{self.config.name}-train.csv",
136
- "dev": f"{_BASE_URL}/stsb-{self.config.name}-dev.csv",
137
- "test": f"{_BASE_URL}/stsb-{self.config.name}-test.csv",
138
- }
139
- downloaded_files = dl_manager.download(urls_to_download)
140
- return [
141
- datasets.SplitGenerator(
142
- name=datasets.Split.TRAIN,
143
- # These kwargs will be passed to _generate_examples
144
- gen_kwargs={
145
- "filepath": downloaded_files["train"],
146
- },
147
- ),
148
- datasets.SplitGenerator(
149
- name=datasets.Split.TEST,
150
- # These kwargs will be passed to _generate_examples
151
- gen_kwargs={
152
- "filepath": downloaded_files["test"],
153
- },
154
- ),
155
- datasets.SplitGenerator(
156
- name=datasets.NamedSplit("dev"),
157
- # These kwargs will be passed to _generate_examples
158
- gen_kwargs={
159
- "filepath": downloaded_files["dev"],
160
- },
161
- ),
162
- ]
163
-
164
- def _generate_examples(
165
- self,
166
- filepath, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
167
- ):
168
- """Yields examples as (key, example) tuples."""
169
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
170
- # The `key` is here for legacy reason (tfds) and is not important in itself.
171
- with open(filepath, newline="", encoding="utf-8") as csvfile:
172
- csv_dict_reader = csv.DictReader(
173
- csvfile,
174
- fieldnames=["sentence1", "sentence2", "similarity_score"],
175
- dialect="excel",
176
- )
177
- for id_, row in enumerate(csv_dict_reader):
178
- # do asserts
179
- assert "sentence1" in row
180
- assert isinstance(row["sentence1"], str)
181
- assert len(row["sentence1"].strip()) > 0
182
- assert "sentence2" in row
183
- assert isinstance(row["sentence2"], str)
184
- assert len(row["sentence2"].strip()) > 0
185
- assert "similarity_score" in row
186
- assert isinstance(row["similarity_score"], str)
187
- assert len(row["similarity_score"].strip()) > 0
188
-
189
- # convert similarity_score from str to float
190
- row["similarity_score"] = float(row["similarity_score"])
191
-
192
- # do more asserts
193
- assert row["similarity_score"] >= 0.0
194
- assert row["similarity_score"] <= 5.0
195
-
196
- yield id_, row