Datasets:

License:
gabrielaltay commited on
Commit
8f06c11
1 Parent(s): 6088cd4

upload hubscripts/scielo_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. scielo.py +222 -0
scielo.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO.
17
+ """
18
+ from typing import IO, Any, Generator, List, Optional, Tuple
19
+
20
+ import datasets
21
+
22
+ from .bigbiohub import text2text_features
23
+ from .bigbiohub import BigBioConfig
24
+ from .bigbiohub import Tasks
25
+
26
+ _LANGUAGES = ['English', 'Spanish', 'Portuguese']
27
+ _PUBMED = False
28
+ _LOCAL = False
29
+ _CITATION = """\
30
+ @inproceedings{soares2018large,
31
+ title = {A Large Parallel Corpus of Full-Text Scientific Articles},
32
+ author = {Soares, Felipe and Moreira, Viviane and Becker, Karin},
33
+ year = 2018,
34
+ booktitle = {
35
+ Proceedings of the Eleventh International Conference on Language Resources
36
+ and Evaluation (LREC-2018)
37
+ }
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "scielo"
42
+ _DISPLAYNAME = "SciELO"
43
+
44
+ _DESCRIPTION = """\
45
+ A parallel corpus of full-text scientific articles collected from Scielo \
46
+ database in the following languages: English, Portuguese and Spanish. The corpus \
47
+ is sentence aligned for all language pairs, as well as trilingual aligned for a \
48
+ small subset of sentences. Alignment was carried out using the Hunalign \
49
+ algorithm.
50
+ """
51
+
52
+ _HOMEPAGE = "https://sites.google.com/view/felipe-soares/datasets#h.p_92uSCyAjWSRB"
53
+
54
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
55
+
56
+ _URLS = {
57
+ "en_es": "https://ndownloader.figstatic.com/files/14019287",
58
+ "en_pt": "https://ndownloader.figstatic.com/files/14019308",
59
+ "en_pt_es": "https://ndownloader.figstatic.com/files/14019293",
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.TRANSLATION]
63
+
64
+ _SOURCE_VERSION = "1.0.0"
65
+ _BIGBIO_VERSION = "1.0.0"
66
+
67
+
68
+ class ScieloDataset(datasets.GeneratorBasedBuilder):
69
+ """Parallel corpus of full-text articles in Portuguese, English and Spanish from SciELO."""
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
73
+
74
+ # NOTE: bigbio_t2t schema doesn't allow only for more than two texts in text-to-text schema.
75
+ # en-pt-es translation is not implemented using the bigbio schema
76
+
77
+ BUILDER_CONFIGS = [
78
+ BigBioConfig(
79
+ name="scielo_en_es_source",
80
+ version=SOURCE_VERSION,
81
+ description="English-Spanish",
82
+ schema="source",
83
+ subset_id="scielo_en_es",
84
+ ),
85
+ BigBioConfig(
86
+ name="scielo_en_pt_source",
87
+ version=SOURCE_VERSION,
88
+ description="English-Portuguese",
89
+ schema="source",
90
+ subset_id="scielo_en_pt",
91
+ ),
92
+ BigBioConfig(
93
+ name="scielo_en_pt_es_source",
94
+ version=SOURCE_VERSION,
95
+ description="English-Portuguese-Spanish",
96
+ schema="source",
97
+ subset_id="scielo_en_pt_es",
98
+ ),
99
+ BigBioConfig(
100
+ name="scielo_en_es_bigbio_t2t",
101
+ version=BIGBIO_VERSION,
102
+ description="scielo BigBio schema English-Spanish",
103
+ schema="bigbio_t2t",
104
+ subset_id="scielo_en_es",
105
+ ),
106
+ BigBioConfig(
107
+ name="scielo_en_pt_bigbio_t2t",
108
+ version=BIGBIO_VERSION,
109
+ description="scielo BigBio schema English-Portuguese",
110
+ schema="bigbio_t2t",
111
+ subset_id="scielo_en_pt",
112
+ ),
113
+ ]
114
+
115
+ DEFAULT_CONFIG_NAME = "scielo_source_en_es"
116
+
117
+ def _info(self) -> datasets.DatasetInfo:
118
+
119
+ if self.config.schema == "source":
120
+ lang_list: List[str] = self.config.subset_id.split("_")[1:]
121
+ features = datasets.Features(
122
+ {"translation": datasets.features.Translation(languages=lang_list)}
123
+ )
124
+
125
+ elif self.config.schema == "bigbio_t2t":
126
+ features = text2text_features
127
+
128
+ return datasets.DatasetInfo(
129
+ description=_DESCRIPTION,
130
+ features=features,
131
+ homepage=_HOMEPAGE,
132
+ license=str(_LICENSE),
133
+ citation=_CITATION,
134
+ )
135
+
136
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
137
+ """Returns SplitGenerators."""
138
+ lang_list: List[str] = self.config.subset_id.split("_")[1:]
139
+ languages = "_".join(lang_list)
140
+ archive = dl_manager.download(_URLS[languages])
141
+
142
+ fname = languages
143
+
144
+ if languages == "en_pt_es":
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ gen_kwargs={
149
+ "source_file": f"{fname}.en",
150
+ "target_file": f"{fname}.pt",
151
+ "target_file_2": f"{fname}.es",
152
+ "files": dl_manager.iter_archive(archive),
153
+ "languages": languages,
154
+ "split": "train",
155
+ },
156
+ ),
157
+ ]
158
+ else:
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ gen_kwargs={
163
+ "source_file": f"{fname}.{lang_list[0]}",
164
+ "target_file": f"{fname}.{lang_list[1]}",
165
+ "files": dl_manager.iter_archive(archive),
166
+ "languages": languages,
167
+ "split": "train",
168
+ },
169
+ ),
170
+ ]
171
+
172
+ def _generate_examples(
173
+ self,
174
+ languages: str,
175
+ split: str,
176
+ source_file: str,
177
+ target_file: str,
178
+ files: Generator[Tuple[str, IO[bytes]], Any, None],
179
+ target_file_2: Optional[str] = None,
180
+ ) -> Tuple[int, dict]:
181
+
182
+ if self.config.schema == "source":
183
+ for path, f in files:
184
+ if path == source_file:
185
+ source_sentences = f.read().decode("utf-8").split("\n")
186
+ elif path == target_file:
187
+ target_sentences = f.read().decode("utf-8").split("\n")
188
+ elif languages == "en_pt_es" and path == target_file_2:
189
+ target_sentences_2 = f.read().decode("utf-8").split("\n")
190
+
191
+ if languages == "en_pt_es":
192
+ source, target, target_2 = tuple(languages.split("_"))
193
+ for idx, (l1, l2, l3) in enumerate(
194
+ zip(source_sentences, target_sentences, target_sentences_2)
195
+ ):
196
+ result = {"translation": {source: l1, target: l2, target_2: l3}}
197
+ yield idx, result
198
+ else:
199
+ source, target = tuple(languages.split("_"))
200
+ for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
201
+ result = {"translation": {source: l1, target: l2}}
202
+ yield idx, result
203
+
204
+ elif self.config.schema == "bigbio_t2t":
205
+ for path, f in files:
206
+ if path == source_file:
207
+ source_sentences = f.read().decode("utf-8").split("\n")
208
+ elif path == target_file:
209
+ target_sentences = f.read().decode("utf-8").split("\n")
210
+
211
+ uid = 0
212
+ source, target = tuple(languages.split("_"))
213
+ for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
214
+ uid += 1
215
+ yield idx, {
216
+ "id": str(uid),
217
+ "document_id": str(idx),
218
+ "text_1": l1,
219
+ "text_2": l2,
220
+ "text_1_name": source,
221
+ "text_2_name": target,
222
+ }