Datasets:

ArXiv:
License:
holylovenia commited on
Commit
868935e
·
verified ·
1 Parent(s): f8f47ff

Upload gnome.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. gnome.py +190 -0
gnome.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import requests
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import SCHEMA_TO_FEATURES, TASK_TO_SCHEMA, Licenses, Tasks
24
+
25
+ _CITATION = r"""\
26
+ @inproceedings{tiedemann-2012-parallel,
27
+ title = "Parallel Data, Tools and Interfaces in {OPUS}",
28
+ author = {Tiedemann, J{\"o}rg},
29
+ editor = "Calzolari, Nicoletta and
30
+ Choukri, Khalid and
31
+ Declerck, Thierry and
32
+ Do{\u{g}}an, Mehmet U{\u{g}}ur and
33
+ Maegaard, Bente and
34
+ Mariani, Joseph and
35
+ Moreno, Asuncion and
36
+ Odijk, Jan and
37
+ Piperidis, Stelios",
38
+ booktitle = "Proceedings of the Eighth International Conference on Language
39
+ Resources and Evaluation ({LREC}'12)",
40
+ month = may,
41
+ year = "2012",
42
+ address = "Istanbul, Turkey",
43
+ publisher = "European Language Resources Association (ELRA)",
44
+ url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf",
45
+ pages = "2214--2218",
46
+ abstract = "This paper presents the current status of OPUS, a growing
47
+ language resource of parallel corpora and related tools. The focus in OPUS
48
+ is to provide freely available data sets in various formats together with
49
+ basic annotation to be useful for applications in computational linguistics,
50
+ translation studies and cross-linguistic corpus studies. In this paper, we
51
+ report about new data sets and their features, additional annotation tools
52
+ and models provided from the website and essential interfaces and on-line
53
+ services included in the project.",
54
+ }
55
+ """
56
+
57
+ _DATASETNAME = "gnome"
58
+
59
+ _DESCRIPTION = """\
60
+ A parallel corpus of GNOME localization files, which contains the interface text
61
+ in the GNU Network Object Model Environment (GNOME) and published by GNOME
62
+ translation teams. Text in this dataset is relatively short and technical.
63
+ """
64
+
65
+ _HOMEPAGE = "https://opus.nlpl.eu/GNOME/corpus/version/GNOME"
66
+
67
+ _LANGUAGES = ["eng", "vie", "mya", "ind", "tha", "tgl", "zlm", "lao"]
68
+ _SUBSETS = ["en", "vi", "my", "id", "th", "tl", "ms", "lo"]
69
+ _SUBSET_PAIRS = [(src, tgt) for src in _SUBSETS for tgt in _SUBSETS if src != tgt]
70
+
71
+ _LICENSE = Licenses.UNKNOWN.value
72
+
73
+ _LOCAL = False
74
+
75
+ _URLS = {
76
+ "api": "http://opus.nlpl.eu/opusapi/?source={src_lang}&target={tgt_lang}&corpus=GNOME&version=v1",
77
+ "data": "https://object.pouta.csc.fi/OPUS-GNOME/v1/moses/{lang_pair}.txt.zip",
78
+ }
79
+
80
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
81
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # t2t
82
+
83
+ _SOURCE_VERSION = "1.0.0"
84
+
85
+ _SEACROWD_VERSION = "2024.06.20"
86
+
87
+
88
+ class GnomeDataset(datasets.GeneratorBasedBuilder):
89
+ """A parallel corpus of GNOME localization files"""
90
+
91
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
92
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
93
+
94
+ BUILDER_CONFIGS = []
95
+ for subset in _SUBSET_PAIRS:
96
+ lang_pair = f"{subset[0]}-{subset[1]}"
97
+ BUILDER_CONFIGS += [
98
+ SEACrowdConfig(
99
+ name=f"{_DATASETNAME}_{lang_pair}_source",
100
+ version=SOURCE_VERSION,
101
+ description=f"{_DATASETNAME} {lang_pair} source schema",
102
+ schema="source",
103
+ subset_id=lang_pair,
104
+ ),
105
+ SEACrowdConfig(
106
+ name=f"{_DATASETNAME}_{lang_pair}_{_SEACROWD_SCHEMA}",
107
+ version=SEACROWD_VERSION,
108
+ description=f"{_DATASETNAME} {lang_pair} SEACrowd schema",
109
+ schema=_SEACROWD_SCHEMA,
110
+ subset_id=lang_pair,
111
+ ),
112
+ ]
113
+
114
+ DEFAULT_CONFIG_NAME = (
115
+ f"{_DATASETNAME}_{_SUBSET_PAIRS[0][0]}-{_SUBSET_PAIRS[0][1]}_source"
116
+ )
117
+
118
+ def _info(self) -> datasets.DatasetInfo:
119
+ if self.config.schema == "source":
120
+ features = datasets.Features(
121
+ {
122
+ "source": datasets.Value("string"),
123
+ "target": datasets.Value("string"),
124
+ }
125
+ )
126
+ elif self.config.schema == _SEACROWD_SCHEMA:
127
+ features = SCHEMA_TO_FEATURES[
128
+ TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]
129
+ ] # text2text_features
130
+
131
+ return datasets.DatasetInfo(
132
+ description=_DESCRIPTION,
133
+ features=features,
134
+ homepage=_HOMEPAGE,
135
+ license=_LICENSE,
136
+ citation=_CITATION,
137
+ )
138
+
139
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
140
+ """Returns SplitGenerators."""
141
+ src_lang, tgt_lang = self.config.subset_id.split("-")
142
+ api_url = _URLS["api"].format(src_lang=src_lang, tgt_lang=tgt_lang)
143
+ data_url = None
144
+
145
+ response = requests.get(api_url, timeout=10)
146
+ if response:
147
+ corpora = response.json()["corpora"]
148
+ for corpus in corpora:
149
+ if ".txt.zip" in corpus["url"]:
150
+ data_url = corpus["url"]
151
+ break
152
+ else:
153
+ raise requests.exceptions.HTTPError(
154
+ f"Non-success status code: {response.status_code}"
155
+ )
156
+
157
+ if not data_url:
158
+ raise ValueError(f"No suitable corpus found, check {api_url}")
159
+ else:
160
+ lang_pair = data_url.split("/")[-1].split(".")[0]
161
+ data_dir = Path(dl_manager.download_and_extract(data_url))
162
+ src_file = data_dir / f"GNOME.{lang_pair}.{src_lang}"
163
+ tgt_file = data_dir / f"GNOME.{lang_pair}.{tgt_lang}"
164
+
165
+ return [
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.TRAIN,
168
+ gen_kwargs={
169
+ "src_file": src_file,
170
+ "tgt_file": tgt_file,
171
+ },
172
+ ),
173
+ ]
174
+
175
+ def _generate_examples(self, src_file: Path, tgt_file: Path) -> Tuple[int, Dict]:
176
+ """Yields examples as (key, example) tuples."""
177
+ with open(src_file, "r", encoding="utf-8") as src_f, open(
178
+ tgt_file, "r", encoding="utf-8"
179
+ ) as tgt_f:
180
+ for idx, (src_line, tgt_line) in enumerate(zip(src_f, tgt_f)):
181
+ if self.config.schema == "source":
182
+ yield idx, {"source": src_line.strip(), "target": tgt_line.strip()}
183
+ elif self.config.schema == _SEACROWD_SCHEMA:
184
+ yield idx, {
185
+ "id": str(idx),
186
+ "text_1": src_line.strip(),
187
+ "text_2": tgt_line.strip(),
188
+ "text_1_name": f"source ({src_file.name.split('.')[-1]})",
189
+ "text_2_name": f"target ({tgt_file.name.split('.')[-1]})",
190
+ }