holylovenia commited on
Commit
20398b1
1 Parent(s): 15fb647

Upload kamus_alay.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. kamus_alay.py +160 -0
kamus_alay.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.utils.configs import NusantaraConfig
7
+ from nusacrowd.utils.constants import Tasks
8
+ from nusacrowd.utils import schemas
9
+
10
+ import pandas as pd
11
+
12
+
13
+ _CITATION = """\
14
+ @INPROCEEDINGS{8629151,
15
+ author={Aliyah Salsabila, Nikmatun and Ardhito Winatmoko, Yosef and Akbar Septiandri, Ali and Jamal, Ade},
16
+ booktitle={2018 International Conference on Asian Language Processing (IALP)},
17
+ title={Colloquial Indonesian Lexicon},
18
+ year={2018},
19
+ volume={},
20
+ number={},
21
+ pages={226-229},
22
+ doi={10.1109/IALP.2018.8629151}}
23
+ """
24
+
25
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
26
+ _LOCAL = False
27
+
28
+ _DATASETNAME = "kamus_alay"
29
+
30
+ _DESCRIPTION = """\
31
+ Kamus Alay provide a lexicon for text normalization of Indonesian colloquial words.
32
+ It contains 3,592 unique colloquial words-also known as “bahasa alay” -and manually annotated them
33
+ with the normalized form. We built this lexicon from Instagram comments provided by Septiandri & Wibisono (2017)
34
+ """
35
+
36
+ _HOMEPAGE = "https://ieeexplore.ieee.org/abstract/document/8629151"
37
+
38
+ _LICENSE = "Unknown"
39
+
40
+ _URLS = {
41
+ _DATASETNAME: "https://raw.githubusercontent.com/nasalsabila/kamus-alay/master/colloquial-indonesian-lexicon.csv",
42
+ }
43
+
44
+ _SUPPORTED_TASKS = [Tasks.MORPHOLOGICAL_INFLECTION]
45
+
46
+ # Dataset does not have versioning
47
+ _SOURCE_VERSION = "1.0.0"
48
+ _NUSANTARA_VERSION = "1.0.0"
49
+
50
+
51
+ class KamusAlay(datasets.GeneratorBasedBuilder):
52
+ """Kamus Alay is a dataset of lexicon for text normalization of Indonesian colloquial word"""
53
+
54
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
55
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
56
+
57
+ label_classes = [
58
+ "abreviasi",
59
+ "afiksasi",
60
+ "akronim",
61
+ "anaptiksis",
62
+ "coinage",
63
+ "elongasi",
64
+ "homofon",
65
+ "metatesis",
66
+ "modifikasi vokal",
67
+ "monoftongisasi",
68
+ "naturalisasi",
69
+ "pungtuasi",
70
+ "reduplikasi",
71
+ "salah ketik",
72
+ "subtitusi",
73
+ "word-value letter",
74
+ "zeroisasi",
75
+ ]
76
+
77
+ BUILDER_CONFIGS = [
78
+ NusantaraConfig(
79
+ name="kamus_alay_source",
80
+ version=SOURCE_VERSION,
81
+ description="Kamus Alay source schema",
82
+ schema="source",
83
+ subset_id="kamus_alay",
84
+ ),
85
+ NusantaraConfig(
86
+ name="kamus_alay_nusantara_pairs_multi",
87
+ version=NUSANTARA_VERSION,
88
+ description="Kamus Alay Nusantara schema",
89
+ schema="nusantara_pairs_multi",
90
+ subset_id="kamus_alay",
91
+ ),
92
+ ]
93
+
94
+ DEFAULT_CONFIG_NAME = "kamus_alay_source"
95
+
96
+ def _info(self) -> datasets.DatasetInfo:
97
+ if self.config.schema == "source":
98
+ features = datasets.Features(
99
+ {
100
+ "slang": datasets.Value("string"),
101
+ "formal": datasets.Value("string"),
102
+ "in_dictionary": datasets.Value("bool"),
103
+ "context": datasets.Value("string"),
104
+ "categories": datasets.Sequence(datasets.Value("string")),
105
+ }
106
+ )
107
+ elif self.config.schema == "nusantara_pairs_multi":
108
+ features = schemas.pairs_multi_features(self.label_classes)
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ homepage=_HOMEPAGE,
114
+ license=_LICENSE,
115
+ citation=_CITATION,
116
+ )
117
+
118
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
119
+ """Returns SplitGenerators."""
120
+ urls = _URLS[_DATASETNAME]
121
+
122
+ data_dir = Path(dl_manager.download(urls))
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ gen_kwargs={
127
+ "filepath": data_dir,
128
+ "split": "train",
129
+ },
130
+ ),
131
+ ]
132
+
133
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
134
+ """Yields examples as (key, example) tuples."""
135
+ # Dataset does not have id, using row index as id
136
+ df = pd.read_csv(filepath, encoding="ISO-8859-1").reset_index()
137
+ df.columns = ["id", "slang", "formal", "is_in_dictionary", "example", "category1", "category2", "category3"]
138
+
139
+ if self.config.schema == "source":
140
+ for row in df.itertuples():
141
+ ex = {
142
+ "slang": row.slang,
143
+ "formal": row.formal,
144
+ "in_dictionary": row.is_in_dictionary,
145
+ "context": row.example,
146
+ "categories": [c for c in (row.category1, row.category2, row.category3) if c != "0"],
147
+ }
148
+ yield row.id, ex
149
+
150
+ elif self.config.schema == "nusantara_pairs_multi":
151
+ for row in df.itertuples():
152
+ ex = {
153
+ "id": str(row.id),
154
+ "text_1": row.formal,
155
+ "text_2": row.slang,
156
+ "label": [c for c in (row.category1, row.category2, row.category3) if c != "0"],
157
+ }
158
+ yield row.id, ex
159
+ else:
160
+ raise ValueError(f"Invalid config: {self.config.name}")