holylovenia commited on
Commit
7d22080
1 Parent(s): 374dddb

Upload lexitron.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. lexitron.py +295 -0
lexitron.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Corpus-based dictionary of Thai and English languages. \
18
+ This dataset contains frequently-used words from trusted \
19
+ publications such as novels, academic documents and newspaper. \
20
+ The dataset link contains Thai-English and English-Thai lexicons. \
21
+ Thai-English vocabulary consists of vocabulary, type of word \
22
+ (part of speech), translation, synonym (synonym) and sample sentences \
23
+ with a list of Thai-> English words, 53,000 words and English vocabulary \
24
+ list -> Thai, 83,000 words.
25
+ """
26
+ import os
27
+ import re
28
+ from pathlib import Path
29
+ from typing import Dict, List, Tuple
30
+
31
+ import datasets
32
+ import pandas as pd
33
+
34
+ from seacrowd.utils import schemas
35
+ from seacrowd.utils.configs import SEACrowdConfig
36
+ from seacrowd.utils.constants import Licenses, Tasks
37
+
38
+ # There are no citations available for this dataset.
39
+ _CITATION = ""
40
+
41
+ _DATASETNAME = "lexitron"
42
+
43
+ _DESCRIPTION = """
44
+ Corpus-based dictionary of Thai and English languages. \
45
+ This dataset contains frequently-used words from trusted \
46
+ publications such as novels, academic documents and newspaper. \
47
+ The dataset link contains Thai-English and English-Thai lexicons. \
48
+ Thai-English vocabulary consists of vocabulary, type of word \
49
+ (part of speech), translation, synonym (synonym) and sample sentences \
50
+ with a list of Thai-> English words, 53,000 words and English vocabulary \
51
+ list -> Thai, 83,000 words.
52
+ """
53
+
54
+ _HOMEPAGE = "https://opend-portal.nectec.or.th/dataset/lexitron-2-0"
55
+
56
+ _LANGUAGES = ["tha"]
57
+
58
+ _LICENSE = Licenses.OTHERS.value
59
+
60
+ _LOCAL = False
61
+
62
+ _URLS = {
63
+ "telex": "https://opend-portal.nectec.or.th/dataset/bdd85296-9398-499f-b3a7-aab85042d3f9/resource/761924ea-937f-4be3-afe1-c031c754fa39/download/lexitron_2.0.zip",
64
+ "etlex": "https://opend-portal.nectec.or.th/dataset/bdd85296-9398-499f-b3a7-aab85042d3f9/resource/761924ea-937f-4be3-afe1-c031c754fa39/download/lexitron_2.0.zip",
65
+ }
66
+
67
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
68
+
69
+ _SOURCE_VERSION = "1.0.0"
70
+
71
+ _SEACROWD_VERSION = "2024.06.20"
72
+
73
+
74
+ class LEXiTRONDataset(datasets.GeneratorBasedBuilder):
75
+ """
76
+ Corpus-based dictionary of Thai and English languages. \
77
+ This dataset contains frequently-used words from trusted \
78
+ publications such as novels, academic documents and newspaper. \
79
+ The dataset link contains Thai-English and English-Thai lexicons. \
80
+ Thai-English vocabulary consists of vocabulary, type of word \
81
+ (part of speech), translation, synonym (synonym) and sample sentences \
82
+ with a list of Thai-> English words, 53,000 words and English vocabulary \
83
+ list -> Thai, 83,000 words.
84
+ """
85
+
86
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
87
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
88
+ SEACROWD_SCHEMA_NAME = "t2t"
89
+
90
+ BUILDER_CONFIGS = [
91
+ SEACrowdConfig(
92
+ name=f"{_DATASETNAME}_telex_source",
93
+ version=SOURCE_VERSION,
94
+ description=f"{_DATASETNAME} source schema",
95
+ schema="source",
96
+ subset_id=f"{_DATASETNAME}_telex",
97
+ ),
98
+ SEACrowdConfig(
99
+ name=f"{_DATASETNAME}_telex_seacrowd_{SEACROWD_SCHEMA_NAME}",
100
+ version=SEACROWD_VERSION,
101
+ description=f"{_DATASETNAME} SEACrowd schema",
102
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
103
+ subset_id=f"{_DATASETNAME}_telex",
104
+ ),
105
+ SEACrowdConfig(
106
+ name=f"{_DATASETNAME}_etlex_source",
107
+ version=SOURCE_VERSION,
108
+ description=f"{_DATASETNAME} source schema",
109
+ schema="source",
110
+ subset_id=f"{_DATASETNAME}_etlex",
111
+ ),
112
+ SEACrowdConfig(
113
+ name=f"{_DATASETNAME}_etlex_seacrowd_{SEACROWD_SCHEMA_NAME}",
114
+ version=SEACROWD_VERSION,
115
+ description=f"{_DATASETNAME} SEACrowd schema",
116
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
117
+ subset_id=f"{_DATASETNAME}_etlex",
118
+ ),
119
+ ]
120
+
121
+ DEFAULT_CONFIG_NAME = "[dataset_name]_source"
122
+
123
+ def _info(self) -> datasets.DatasetInfo:
124
+
125
+ if self.config.schema == "source":
126
+
127
+ translation_type = self.config.name.split("_")[1]
128
+
129
+ if translation_type == "telex":
130
+ features = datasets.Features(
131
+ {
132
+ "id": datasets.Value("int64"),
133
+ "tsearch": datasets.Value("string"),
134
+ "tentry": datasets.Value("string"),
135
+ "eentry": datasets.Value("string"),
136
+ "tcat": datasets.Value("string"),
137
+ "tsyn": datasets.Value("string"),
138
+ "tsample": datasets.Value("string"),
139
+ "tdef": datasets.Value("string"),
140
+ }
141
+ )
142
+
143
+ elif translation_type == "etlex":
144
+ features = datasets.Features(
145
+ {"id": datasets.Value("int64"), "esearch": datasets.Value("string"), "eentry": datasets.Value("string"), "tentry": datasets.Value("string"), "ecat": datasets.Value("string"), "esyn": datasets.Value("string")}
146
+ )
147
+
148
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
149
+ features = schemas.text2text_features
150
+
151
+ return datasets.DatasetInfo(
152
+ description=_DESCRIPTION,
153
+ features=features,
154
+ homepage=_HOMEPAGE,
155
+ license=_LICENSE,
156
+ citation=_CITATION,
157
+ )
158
+
159
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
160
+ """Returns SplitGenerators."""
161
+
162
+ translation_type = self.config.name.split("_")[1]
163
+ data_dir = dl_manager.download_and_extract(_URLS[translation_type])
164
+
165
+ return [
166
+ datasets.SplitGenerator(
167
+ name=datasets.Split.TRAIN,
168
+ gen_kwargs={
169
+ "filepath": os.path.join(data_dir, f"LEXiTRON_2.0/{translation_type}"),
170
+ "split": "train",
171
+ },
172
+ )
173
+ ]
174
+
175
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
176
+ """Yields examples as (key, example) tuples."""
177
+
178
+ translation_type = self.config.name.split("_")[1]
179
+
180
+ if translation_type == "telex":
181
+
182
+ with open(filepath, "r", encoding="latin-1") as file:
183
+ data = file.read()
184
+
185
+ pattern = r"<Doc>(.*?)</Doc>"
186
+ docs = re.findall(pattern, data, re.DOTALL)
187
+
188
+ doc_data = []
189
+
190
+ for doc in docs:
191
+ tsearch = tentry = eentry = tcat = tsyn = tsample = tdef = id = None
192
+
193
+ tsearch_match = re.search(r"<tsearch>(.*?)</tsearch>", doc)
194
+ if tsearch_match:
195
+ tsearch = tsearch_match.group(1)
196
+
197
+ tentry_match = re.search(r"<tentry>(.*?)</tentry>", doc)
198
+ if tentry_match:
199
+ tentry = tentry_match.group(1)
200
+
201
+ eentry_match = re.search(r"<eentry>(.*?)</eentry>", doc)
202
+ if eentry_match:
203
+ eentry = eentry_match.group(1)
204
+
205
+ tcat_match = re.search(r"<tcat>(.*?)</tcat>", doc)
206
+ if tcat_match:
207
+ tcat = tcat_match.group(1)
208
+
209
+ tsyn_match = re.search(r"<tsyn>(.*?)</tsyn>", doc)
210
+ if tsyn_match:
211
+ tsyn = tsyn_match.group(1)
212
+
213
+ tsample_match = re.search(r"<tsample>(.*?)</tsample>", doc)
214
+ if tsample_match:
215
+ tsample = tsample_match.group(1)
216
+
217
+ tdef_match = re.search(r"<tdef>(.*?)</tdef>", doc)
218
+ if tdef_match:
219
+ tdef = tdef_match.group(1)
220
+
221
+ id_match = re.search(r"<id>(.*?)</id>", doc)
222
+ if id_match:
223
+ id = id_match.group(1)
224
+
225
+ doc_data.append({"id": id, "tsearch": tsearch, "tentry": tentry, "eentry": eentry, "tcat": tcat, "tsyn": tsyn, "tsample": tsample, "tdef": tdef})
226
+
227
+ df = pd.DataFrame(doc_data)
228
+
229
+ if translation_type == "etlex":
230
+
231
+ with open(filepath, "r", encoding="latin-1") as file:
232
+ data = file.read()
233
+
234
+ pattern = r"<Doc>(.*?)</Doc>"
235
+ docs = re.findall(pattern, data, re.DOTALL)
236
+
237
+ doc_data = []
238
+
239
+ for doc in docs:
240
+ esearch = eentry = tentry = ecat = esyn = id = None
241
+
242
+ esearch_match = re.search(r"<esearch>(.*?)</esearch>", doc)
243
+ if esearch_match:
244
+ esearch = esearch_match.group(1)
245
+
246
+ eentry_match = re.search(r"<eentry>(.*?)</eentry>", doc)
247
+ if eentry_match:
248
+ eentry = eentry_match.group(1)
249
+
250
+ tentry_match = re.search(r"<tentry>(.*?)</tentry>", doc)
251
+ if tentry_match:
252
+ tentry = tentry_match.group(1)
253
+
254
+ ecat_match = re.search(r"<ecat>(.*?)</ecat>", doc)
255
+ if ecat_match:
256
+ ecat = ecat_match.group(1)
257
+
258
+ esyn_match = re.search(r"<esyn>(.*?)</esyn>", doc)
259
+ if esyn_match:
260
+ esyn = esyn_match.group(1)
261
+
262
+ id_match = re.search(r"<id>(.*?)</id>", doc)
263
+ if id_match:
264
+ id = id_match.group(1)
265
+
266
+ doc_data.append({"id": id, "esearch": esearch, "eentry": eentry, "tentry": tentry, "ecat": ecat, "esyn": esyn})
267
+
268
+ df = pd.DataFrame(doc_data)
269
+
270
+ for index, row in df.iterrows():
271
+
272
+ if self.config.schema == "source":
273
+ example = row.to_dict()
274
+
275
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
276
+
277
+ if translation_type == "telex":
278
+ example = {
279
+ "id": str(index),
280
+ "text_1": str(row["tentry"]),
281
+ "text_2": str(row["eentry"]),
282
+ "text_1_name": "tentry",
283
+ "text_2_name": "eentry",
284
+ }
285
+
286
+ if translation_type == "etlex":
287
+ example = {
288
+ "id": str(index),
289
+ "text_1": str(row["eentry"]),
290
+ "text_2": str(row["tentry"]),
291
+ "text_1_name": "eentry",
292
+ "text_2_name": "tentry",
293
+ }
294
+
295
+ yield index, example