holylovenia commited on
Commit
83637d1
1 Parent(s): 4473187

Upload ud_id_csui.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ud_id_csui.py +242 -0
ud_id_csui.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ from conllu import TokenList
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.common_parser import load_ud_data, load_ud_data_as_nusantara_kb
24
+ from nusacrowd.utils.configs import NusantaraConfig
25
+ from nusacrowd.utils.constants import Tasks
26
+
27
+ _CITATION = """\
28
+ @article {10.3844/jcssp.2020.1585.1597,
29
+ author = {Alfina, Ika and Budi, Indra and Suhartanto, Heru},
30
+ title = {Tree Rotations for Dependency Trees: Converting the Head-Directionality of Noun Phrases},
31
+ article_type = {journal},
32
+ volume = {16},
33
+ number = {11},
34
+ year = {2020},
35
+ month = {Nov},
36
+ pages = {1585-1597},
37
+ doi = {10.3844/jcssp.2020.1585.1597},
38
+ url = {https://thescipub.com/abstract/jcssp.2020.1585.1597},
39
+ journal = {Journal of Computer Science},
40
+ publisher = {Science Publications}
41
+ }
42
+ """
43
+
44
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
45
+ _LOCAL = False
46
+
47
+ _DATASETNAME = "ud_id_csui"
48
+
49
+ _DESCRIPTION = """\
50
+ UD Indonesian-CSUI is a conversion from an Indonesian constituency treebank in the Penn Treebank format named Kethu that was also a conversion from a constituency treebank built by Dinakaramani et al. (2015).
51
+ This treebank is named after the place where treebanks were built: Faculty of Computer Science (CS), Universitas Indonesia (UI).
52
+
53
+ About this treebank:
54
+ - Genre is news in formal Indonesian (the majority is economic news)
55
+ - 1030 sentences (28K words) divided into testing and training dataset of around 10K words and around 18K words respectively.
56
+ - Average of 27.4 words per-sentence.
57
+ """
58
+
59
+ _HOMEPAGE = "https://github.com/UniversalDependencies/UD_Indonesian-CSUI"
60
+
61
+ _LICENSE = "CC BY-SA 4.0"
62
+
63
+ _URLS = {
64
+ _DATASETNAME: {
65
+ "train": "https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-CSUI/master/id_csui-ud-train.conllu",
66
+ "test": "https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-CSUI/master/id_csui-ud-test.conllu",
67
+ },
68
+ }
69
+
70
+ _SUPPORTED_TASKS = [Tasks.DEPENDENCY_PARSING, Tasks.MACHINE_TRANSLATION, Tasks.POS_TAGGING]
71
+
72
+ _SOURCE_VERSION = "1.0.0"
73
+
74
+ _NUSANTARA_VERSION = "1.0.0"
75
+
76
+
77
+ class UdIdCsuiDataset(datasets.GeneratorBasedBuilder):
78
+ """Treebank of formal Indonesian news which consists of 1030 sentences (28K words)"""
79
+
80
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
81
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
82
+
83
+ # source: https://universaldependencies.org/u/pos/
84
+ UPOS_TAGS = ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"]
85
+
86
+ BUILDER_CONFIGS = [
87
+ NusantaraConfig(
88
+ name=f"{_DATASETNAME}_source",
89
+ version=SOURCE_VERSION,
90
+ description=f"{_DATASETNAME} source schema",
91
+ schema="source",
92
+ subset_id=f"{_DATASETNAME}",
93
+ ),
94
+ NusantaraConfig(
95
+ name=f"{_DATASETNAME}_nusantara_kb",
96
+ version=NUSANTARA_VERSION,
97
+ description=f"{_DATASETNAME} Nusantara KB schema",
98
+ schema="nusantara_kb",
99
+ subset_id=f"{_DATASETNAME}",
100
+ ),
101
+ NusantaraConfig(
102
+ name=f"{_DATASETNAME}_nusantara_t2t",
103
+ version=NUSANTARA_VERSION,
104
+ description=f"{_DATASETNAME} Nusantara Text to Text schema",
105
+ schema="nusantara_t2t",
106
+ subset_id=f"{_DATASETNAME}",
107
+ ),
108
+ NusantaraConfig(
109
+ name=f"{_DATASETNAME}_nusantara_seq_label",
110
+ version=NUSANTARA_VERSION,
111
+ description=f"{_DATASETNAME} Nusantara Seq Label schema",
112
+ schema="nusantara_seq_label",
113
+ subset_id=f"{_DATASETNAME}",
114
+ ),
115
+ ]
116
+
117
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
118
+
119
+ def _info(self) -> datasets.DatasetInfo:
120
+
121
+ if self.config.schema == "source":
122
+ features = datasets.Features(
123
+ {
124
+ # metadata
125
+ "sent_id": datasets.Value("string"),
126
+ "text": datasets.Value("string"),
127
+ "text_en": datasets.Value("string"),
128
+ # tokens
129
+ "id": [datasets.Value("string")],
130
+ "form": [datasets.Value("string")],
131
+ "lemma": [datasets.Value("string")],
132
+ "upos": [datasets.Value("string")],
133
+ "xpos": [datasets.Value("string")],
134
+ "feats": [datasets.Value("string")],
135
+ "head": [datasets.Value("string")],
136
+ "deprel": [datasets.Value("string")],
137
+ "deps": [datasets.Value("string")],
138
+ "misc": [datasets.Value("string")],
139
+ }
140
+ )
141
+
142
+ elif self.config.schema == "nusantara_kb":
143
+ features = schemas.kb_features
144
+
145
+ elif self.config.schema == "nusantara_t2t":
146
+ features = schemas.text2text_features
147
+
148
+ elif self.config.schema == "nusantara_seq_label":
149
+ features = schemas.seq_label_features(self.UPOS_TAGS)
150
+
151
+ else:
152
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
153
+
154
+ return datasets.DatasetInfo(
155
+ description=_DESCRIPTION,
156
+ features=features,
157
+ homepage=_HOMEPAGE,
158
+ license=_LICENSE,
159
+ citation=_CITATION,
160
+ )
161
+
162
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
163
+ """Returns SplitGenerators."""
164
+ urls = _URLS[_DATASETNAME]
165
+ data_path = dl_manager.download(urls)
166
+
167
+ return [
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TRAIN,
170
+ gen_kwargs={
171
+ "filepath": data_path["train"],
172
+ },
173
+ ),
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.TEST,
176
+ gen_kwargs={
177
+ "filepath": data_path["test"],
178
+ },
179
+ ),
180
+ ]
181
+
182
+ @staticmethod
183
+ def _assert_multispan_range_is_one(token_list: TokenList):
184
+ """
185
+ Asserting that all tokens with multiple span can only have 2 span, and \
186
+ no field other than form has important information
187
+ """
188
+ for token in token_list.filter(id=lambda i: not isinstance(i, int)):
189
+ _id = token["id"]
190
+ assert len(_id) == 3, f"Unexpected length of non-int CONLLU Token's id. Expected 3, found {len(_id)};"
191
+ assert all(isinstance(a, b) for a, b in zip(_id, [int, str, int])), f"Non-int ID should be in format of '\\d+-\\d+'. Found {_id};"
192
+ assert _id[2] - _id[0] == 1, f"Token has more than 2 spans. Found {_id[2] - _id[0] + 1} spans;"
193
+ for key in ["lemma", "upos", "xpos", "feats", "head", "deprel", "deps"]:
194
+ assert token[key] in {"_", None}, f"Field other than 'form' should not contain extra information. Found: '{key}' = '{token[key]}'"
195
+
196
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
197
+ """Yields examples as (key, example) tuples."""
198
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
199
+
200
+ dataset = list(load_ud_data(filepath, filter_kwargs={"id": lambda i: isinstance(i, int)}, assert_fn=self._assert_multispan_range_is_one))
201
+
202
+ if self.config.schema == "source":
203
+ pass
204
+
205
+ elif self.config.schema == "nusantara_kb":
206
+ dataset = load_ud_data_as_nusantara_kb(filepath, dataset)
207
+
208
+ elif self.config.schema == "nusantara_t2t":
209
+ dataset = list(
210
+ map(
211
+ lambda d: {
212
+ "id": d["sent_id"],
213
+ "text_1": d["text"],
214
+ "text_2": d["text_en"],
215
+ "text_1_name": "ind",
216
+ "text_2_name": "eng",
217
+ },
218
+ dataset,
219
+ )
220
+ )
221
+
222
+ elif self.config.schema == "nusantara_seq_label":
223
+ dataset = list(
224
+ map(
225
+ lambda d: {
226
+ "id": d["sent_id"],
227
+ "tokens": d["form"],
228
+ "labels": d["upos"],
229
+ },
230
+ dataset,
231
+ )
232
+ )
233
+
234
+ else:
235
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
236
+
237
+ for key, example in enumerate(dataset):
238
+ yield key, example
239
+
240
+
241
+ if __name__ == "__main__":
242
+ datasets.load_dataset(__file__)