Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
b386b9a
1 Parent(s): 25a4788

Upload vistec_tp_th_21.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vistec_tp_th_21.py +183 -0
vistec_tp_th_21.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import os
18
+ import re
19
+ from pathlib import Path
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+
24
+ from seacrowd.utils import schemas
25
+ from seacrowd.utils.configs import SEACrowdConfig
26
+ from seacrowd.utils.constants import Licenses, Tasks
27
+
28
+ _CITATION = """\
29
+ @inproceedings{limkonchotiwat-etal-2021-handling,
30
+ title = "Handling Cross- and Out-of-Domain Samples in {T}hai Word Segmentation",
31
+ author = "Limkonchotiwat, Peerat and
32
+ Phatthiyaphaibun, Wannaphong and
33
+ Sarwar, Raheem and
34
+ Chuangsuwanich, Ekapol and
35
+ Nutanong, Sarana",
36
+ booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
37
+ month = aug,
38
+ year = "2021",
39
+ address = "Online",
40
+ publisher = "Association for Computational Linguistics",
41
+ url = "https://aclanthology.org/2021.findings-acl.86",
42
+ doi = "10.18653/v1/2021.findings-acl.86",
43
+ pages = "1003--1016",
44
+ }
45
+ """
46
+
47
+ _DATASETNAME = "vistec_tp_th_21"
48
+
49
+ _DESCRIPTION = """\
50
+ The largest social media domain datasets for Thai text processing (word segmentation,
51
+ misspell correction and detection, and named-entity boundary) called "VISTEC-TP-TH-2021" or VISTEC-2021.
52
+ VISTEC corpus contains 49,997 sentences with 3.39M words where the collection was manually annotated by
53
+ linguists on four tasks, namely word segmentation, misspelling detection and correction,
54
+ and named entity recognition.
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/mrpeerat/OSKut/tree/main/VISTEC-TP-TH-2021"
58
+
59
+
60
+ _LANGUAGES = ["tha"]
61
+
62
+
63
+ _LICENSE = Licenses.CC_BY_SA_3_0.value
64
+
65
+ _LOCAL = False
66
+
67
+ _URLS = {
68
+ "train": "https://raw.githubusercontent.com/mrpeerat/OSKut/main/VISTEC-TP-TH-2021/train/VISTEC-TP-TH-2021_train_proprocessed.txt",
69
+ "test": "https://raw.githubusercontent.com/mrpeerat/OSKut/main/VISTEC-TP-TH-2021/test/VISTEC-TP-TH-2021_test_proprocessed.txt",
70
+ }
71
+
72
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
73
+
74
+ _SOURCE_VERSION = "1.0.0"
75
+
76
+ _SEACROWD_VERSION = "2024.06.20"
77
+
78
+
79
+ class VISTEC21Dataset(datasets.GeneratorBasedBuilder):
80
+ """
81
+ The largest social media domain datasets for Thai text processing (word segmentation,
82
+ misspell correction and detection, and named-entity boundary) called "VISTEC-TP-TH-2021" or VISTEC-2021.
83
+ """
84
+
85
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
86
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
87
+
88
+ SEACROWD_SCHEMA_NAME = "seq_label"
89
+ LABEL_CLASSES = ["0", "1"]
90
+
91
+ BUILDER_CONFIGS = [
92
+ SEACrowdConfig(
93
+ name=f"{_DATASETNAME}_source",
94
+ version=SOURCE_VERSION,
95
+ description=f"{_DATASETNAME} source schema",
96
+ schema="source",
97
+ subset_id=_DATASETNAME,
98
+ ),
99
+ SEACrowdConfig(
100
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
101
+ version=SEACROWD_VERSION,
102
+ description=f"{_DATASETNAME} SEACrowd schema",
103
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
104
+ subset_id=_DATASETNAME,
105
+ ),
106
+ ]
107
+
108
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
109
+
110
+ def _info(self) -> datasets.DatasetInfo:
111
+ if self.config.schema == "source":
112
+ features = datasets.Features(
113
+ {
114
+ "id": datasets.Value("string"),
115
+ "tokens": datasets.Sequence(datasets.Value("string")),
116
+ "ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=self.LABEL_CLASSES)),
117
+ }
118
+ )
119
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
120
+ features = schemas.seq_label_features(self.LABEL_CLASSES)
121
+
122
+ return datasets.DatasetInfo(
123
+ description=_DESCRIPTION,
124
+ features=features,
125
+ homepage=_HOMEPAGE,
126
+ license=_LICENSE,
127
+ citation=_CITATION,
128
+ )
129
+
130
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
131
+ """Returns SplitGenerators."""
132
+ data_files = {
133
+ "train": Path(dl_manager.download_and_extract(_URLS["train"])),
134
+ "test": Path(dl_manager.download_and_extract(_URLS["test"])),
135
+ }
136
+
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ gen_kwargs={"filepath": data_files["train"], "split": "train"},
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TEST,
144
+ gen_kwargs={"filepath": data_files["test"], "split": "test"},
145
+ ),
146
+ ]
147
+
148
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
149
+ """Yields examples as (key, example) tuples."""
150
+ label_key = "ner_tags" if self.config.schema == "source" else "labels"
151
+
152
+ with open(filepath, "r", encoding="utf-8") as f:
153
+ lines = f.readlines()
154
+ id = 0
155
+ for line in lines:
156
+ tokens = line.split("|")
157
+ token_list = []
158
+ ner_tag = []
159
+ for token in tokens:
160
+ if "<ne>" in token:
161
+ token = token.replace("<ne>", "")
162
+ token = token.replace("</ne>", "")
163
+ token_list.append(token)
164
+ ner_tag.append(1)
165
+ continue
166
+ if "</msp>" in token and "<msp value=" in token:
167
+ token_list.append(re.findall(r"<msp value=([^>]*)>", token)[0])
168
+ ner_tag.append(0)
169
+ continue
170
+ if "<compound>" in token or "</compound>" in token:
171
+ token = token.replace("<compound>", "")
172
+ token = token.replace("</compound>", "")
173
+ token_list.append(token)
174
+ ner_tag.append(0)
175
+ continue
176
+ token_list.append(token)
177
+ ner_tag.append(0)
178
+ id += 1
179
+ yield id, {
180
+ "id": str(id),
181
+ "tokens": token_list,
182
+ label_key: ner_tag,
183
+ }