Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
47d3c79
1 Parent(s): 2b89ca4

Upload orchid_pos.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. orchid_pos.py +272 -0
orchid_pos.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ import re
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @article{sornlertlamvanich1999building,
28
+ title={Building a Thai part-of-speech tagged corpus (ORCHID)},
29
+ author={Sornlertlamvanich, Virach and Takahashi, Naoto and Isahara, Hitoshi},
30
+ journal={Journal of the Acoustical Society of Japan (E)},
31
+ volume={20},
32
+ number={3},
33
+ pages={189--198},
34
+ year={1999},
35
+ publisher={Acoustical Society of Japan}
36
+ }
37
+ """
38
+
39
+ _DATASETNAME = "orchid_pos"
40
+
41
+ _DESCRIPTION = """\
42
+ The ORCHID corpus is a Thai part-of-speech (POS) tagged dataset, resulting from a collaboration between\
43
+ Japan's Communications Research Laboratory (CRL) and Thailand's National Electronics and Computer Technology\
44
+ Center (NECTEC). It is structured at three levels: paragraph, sentence, and word. The dataset incorporates a\
45
+ unique tagset designed for use in multi-lingual machine translation projects, and is tailored to address the\
46
+ challenges of Thai text, which lacks explicit word and sentence boundaries, punctuation, and inflection.\
47
+ This dataset includes text information along with numbering for retrieval, and employs a probabilistic trigram\
48
+ model for word segmentation and POS tagging. The ORCHID corpus is specifically structured to reduce ambiguity in\
49
+ POS assignments, making it a valuable resource for Thai language processing and computational linguistics research.
50
+ """
51
+
52
+ _HOMEPAGE = "https://github.com/wannaphong/corpus_mirror/releases/tag/orchid-v1.0"
53
+
54
+ _LANGUAGES = ["tha"]
55
+
56
+ _LICENSE = Licenses.CC_BY_NC_SA_3_0.value
57
+
58
+ _LOCAL = False
59
+
60
+ _URLS = {
61
+ _DATASETNAME: "https://github.com/wannaphong/corpus_mirror/releases/download/orchid-v1.0/orchid97.crp.utf",
62
+ }
63
+
64
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING]
65
+
66
+ _SOURCE_VERSION = "1.0.0"
67
+
68
+ _SEACROWD_VERSION = "2024.06.20"
69
+
70
+
71
+ class OrchidPOSDataset(datasets.GeneratorBasedBuilder):
72
+
73
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
74
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
75
+
76
+ BUILDER_CONFIGS = [
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_source",
79
+ version=SOURCE_VERSION,
80
+ description=f"{_DATASETNAME} source schema",
81
+ schema="source",
82
+ subset_id=f"{_DATASETNAME}",
83
+ ),
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_seacrowd_seq_label",
86
+ version=SEACROWD_VERSION,
87
+ description=f"{_DATASETNAME} SEACrowd schema",
88
+ schema="seacrowd_seq_label",
89
+ subset_id=f"{_DATASETNAME}",
90
+ ),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
94
+
95
+ def _info(self) -> datasets.DatasetInfo:
96
+ label_names = [
97
+ "NPRP",
98
+ "NCNM",
99
+ "NONM",
100
+ "NLBL",
101
+ "NCMN",
102
+ "NTTL",
103
+ "PPRS",
104
+ "PDMN",
105
+ "PNTR",
106
+ "PREL",
107
+ "VACT",
108
+ "VSTA",
109
+ "VATT",
110
+ "XVBM",
111
+ "XVAM",
112
+ "XVMM",
113
+ "XVBB",
114
+ "XVAE",
115
+ "DDAN",
116
+ "DDAC",
117
+ "DDBQ",
118
+ "DDAQ",
119
+ "DIAC",
120
+ "DIBQ",
121
+ "DIAQ",
122
+ "DCNM",
123
+ "DONM",
124
+ "ADVN",
125
+ "ADVI",
126
+ "ADVP",
127
+ "ADVS",
128
+ "CNIT",
129
+ "CLTV",
130
+ "CMTR",
131
+ "CFQC",
132
+ "CVBL",
133
+ "JCRG",
134
+ "JCMP",
135
+ "JSBR",
136
+ "RPRE",
137
+ "INT",
138
+ "FIXN",
139
+ "FIXV",
140
+ "EAFF",
141
+ "EITT",
142
+ "NEG",
143
+ "PUNC",
144
+ "CMTR@PUNC",
145
+ ]
146
+ if self.config.schema == "source":
147
+ features = datasets.Features(
148
+ {
149
+ "ttitle": datasets.Value("string"),
150
+ "etitle": datasets.Value("string"),
151
+ "tauthor": datasets.Value("string"),
152
+ "eauthor": datasets.Value("string"),
153
+ "tinbook": datasets.Value("string"),
154
+ "einbook": datasets.Value("string"),
155
+ "tpublisher": datasets.Value("string"),
156
+ "epublisher": datasets.Value("string"),
157
+ "year": datasets.Value("string"),
158
+ "file": datasets.Value("string"),
159
+ "tokens": datasets.Sequence(datasets.Value("string")),
160
+ "labels": datasets.Sequence(datasets.ClassLabel(names=label_names)),
161
+ }
162
+ )
163
+
164
+ elif self.config.schema == "seacrowd_seq_label":
165
+ features = schemas.seq_label_features(label_names)
166
+
167
+ return datasets.DatasetInfo(
168
+ description=_DESCRIPTION,
169
+ features=features,
170
+ homepage=_HOMEPAGE,
171
+ license=_LICENSE,
172
+ citation=_CITATION,
173
+ )
174
+
175
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
176
+ """Returns SplitGenerators."""
177
+ urls = _URLS[_DATASETNAME]
178
+ data_dir = dl_manager.download_and_extract(urls)
179
+
180
+ return [
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.TRAIN,
183
+ gen_kwargs={
184
+ "filepath": os.path.join(data_dir, ""),
185
+ "split": "train",
186
+ },
187
+ )
188
+ ]
189
+
190
+ def _get_tokens_labels(self, paragraphs):
191
+ tokens = []
192
+ labels = []
193
+ token_mapping = {
194
+ "<space>": " ",
195
+ "<exclamation>": "!",
196
+ "<quotation>": '"',
197
+ "<number>": "#",
198
+ "<dollar>": "$",
199
+ "<percent>": "%",
200
+ "<ampersand>": "&",
201
+ "<apostrophe>": "'",
202
+ "<slash>": "/",
203
+ "<colon>": ":",
204
+ "<semi_colon>": ";",
205
+ "<less_than>": "<",
206
+ "<equal>": "=",
207
+ "<greater than>": ">",
208
+ "<question_mark>": "?",
209
+ "<at_mark>": "@",
210
+ "<left_parenthesis>": "(",
211
+ "<left_square_bracket>": "[",
212
+ "<right_parenthesis>": ")",
213
+ "<right_square_bracket>": "]",
214
+ "<asterisk>": "*",
215
+ "<circumflex_accent>": "^",
216
+ "<plus>": "+",
217
+ "<low_line>": "_",
218
+ "<comma>": ",",
219
+ "left_curly_bracket": "{",
220
+ "<minus>": "-",
221
+ "<right_curly_bracket>": "}",
222
+ "<full_stop>": ".",
223
+ "<tilde>": "~",
224
+ }
225
+ for paragraph in paragraphs:
226
+ sentences = re.split(r"#\d+\n", paragraph)
227
+ for sentence in sentences[1:]:
228
+ token_pos_pairs = sentence.split("//")[1]
229
+ for token_pos_pair in token_pos_pairs.split("\n")[1:-1]:
230
+ if "/" in token_pos_pair:
231
+ token = token_pos_pair.split("/")[0]
232
+ tokens.append(token_mapping[token] if token in token_mapping.keys() else token)
233
+ labels.append(token_pos_pair.split("/")[1])
234
+ else:
235
+ token = token_pos_pair.split("@")[0]
236
+ tokens.append(token_mapping[token] if token in token_mapping.keys() else token)
237
+ labels.append(token_pos_pair.split("@")[1])
238
+ return tokens, labels
239
+
240
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
241
+ """Yields examples as (key, example) tuples."""
242
+ file_content = open(filepath, "r").read()
243
+ texts = file_content.split("%TTitle:")
244
+
245
+ idx = 0
246
+ for text in texts[1:]:
247
+ file_part = text.split("%File")[-1]
248
+ tokens, labels = self._get_tokens_labels(re.split(r"#P\d+\n", file_part)[1:])
249
+ if self.config.schema == "source":
250
+ parts = text.split("%")
251
+ example = {
252
+ "ttitle": parts[0],
253
+ "etitle": ":".join(parts[1].split(":")[1:]).strip(),
254
+ "tauthor": ":".join(parts[2].split(":")[1:]).strip(),
255
+ "eauthor": ":".join(parts[3].split(":")[1:]).strip(),
256
+ "tinbook": ":".join(parts[4].split(":")[1:]).strip(),
257
+ "einbook": ":".join(parts[5].split(":")[1:]).strip(),
258
+ "tpublisher": ":".join(parts[6].split(":")[1:]).strip(),
259
+ "epublisher": ":".join(parts[7].split(":")[1:]).strip(),
260
+ "year": ":".join(parts[9].split(":")[1:]).strip(),
261
+ "file": file_part.strip(),
262
+ "tokens": tokens,
263
+ "labels": labels,
264
+ }
265
+ elif self.config.schema == "seacrowd_seq_label":
266
+ example = {
267
+ "id": idx,
268
+ "tokens": tokens,
269
+ "labels": labels,
270
+ }
271
+ yield idx, example
272
+ idx += 1