holylovenia commited on
Commit
897b739
1 Parent(s): 2c8237f

Upload indolem_ner_ugm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indolem_ner_ugm.py +177 -0
indolem_ner_ugm.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ from nusacrowd.utils import schemas
6
+ from nusacrowd.utils.common_parser import load_conll_data
7
+
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @inproceedings{koto-etal-2020-indolem,
13
+ title = "{I}ndo{LEM} and {I}ndo{BERT}: A Benchmark Dataset and Pre-trained Language Model for {I}ndonesian {NLP}",
14
+ author = "Koto, Fajri and
15
+ Rahimi, Afshin and
16
+ Lau, Jey Han and
17
+ Baldwin, Timothy",
18
+ booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
19
+ month = dec,
20
+ year = "2020",
21
+ address = "Barcelona, Spain (Online)",
22
+ publisher = "International Committee on Computational Linguistics",
23
+ url = "https://aclanthology.org/2020.coling-main.66",
24
+ doi = "10.18653/v1/2020.coling-main.66",
25
+ pages = "757--770"
26
+ }
27
+ @phdthesis{fachri2014pengenalan,
28
+ title = {Pengenalan Entitas Bernama Pada Teks Bahasa Indonesia Menggunakan Hidden Markov Model},
29
+ author = {FACHRI, MUHAMMAD},
30
+ year = {2014},
31
+ school = {Universitas Gadjah Mada}
32
+ }
33
+ """
34
+
35
+ _LOCAL = False
36
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
37
+ _DATASETNAME = "indolem_ner_ugm"
38
+
39
+ _DESCRIPTION = """\
40
+ NER UGM is a Named Entity Recognition dataset that comprises 2,343 sentences from news articles, and was constructed at the University of Gajah Mada based on five named entity classes: person, organization, location, time, and quantity.
41
+ """
42
+
43
+ _HOMEPAGE = "https://indolem.github.io/"
44
+
45
+ _LICENSE = "Creative Commons Attribution 4.0"
46
+
47
+ _URLS = {
48
+ _DATASETNAME: {
49
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerugm/train.0{fold_number}.tsv",
50
+ "validation": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerugm/dev.0{fold_number}.tsv",
51
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/ner/data/nerugm/test.0{fold_number}.tsv"
52
+ }
53
+ }
54
+
55
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
56
+
57
+ _SOURCE_VERSION = "1.0.0"
58
+
59
+ _NUSANTARA_VERSION = "1.0.0"
60
+
61
+ class IndolemNERUGM(datasets.GeneratorBasedBuilder):
62
+ """NER UGM comprises 2,343 sentences from news articles, and was constructed at the University of Gajah Mada based on five named entity classes: person, organization, location, time, and quantity; and based on 5-fold cross validation"""
63
+
64
+ label_classes = ["B-PERSON", "B-LOCATION", "B-ORGANIZATION", "B-TIME", "B-QUANTITY", "I-PERSON", "I-LOCATION", "I-ORGANIZATION", "I-TIME", "I-QUANTITY", "O"]
65
+
66
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
67
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
68
+
69
+ BUILDER_CONFIGS = (
70
+ [
71
+ NusantaraConfig(
72
+ name="indolem_ner_ugm_fold{fold_number}_source".format(fold_number=i),
73
+ version=_SOURCE_VERSION,
74
+ description="indolem_ner_ugm source schema",
75
+ schema="source",
76
+ subset_id="indolem_ner_ugm_fold{fold_number}".format(fold_number=i),
77
+ ) for i in range(5)
78
+ ]
79
+ + [
80
+ NusantaraConfig(
81
+ name="indolem_ner_ugm_fold{fold_number}_nusantara_seq_label".format(fold_number=i),
82
+ version=_NUSANTARA_VERSION,
83
+ description="indolem_ner_ugm Nusantara schema",
84
+ schema="nusantara_seq_label",
85
+ subset_id="indolem_ner_ugm_fold{fold_number}".format(fold_number=i),
86
+ ) for i in range(5)
87
+ ]
88
+ )
89
+
90
+ DEFAULT_CONFIG_NAME = "indolem_ner_ugm_fold0_source"
91
+
92
+ def _info(self) -> datasets.DatasetInfo:
93
+
94
+ if self.config.schema == "source":
95
+
96
+ features = datasets.Features(
97
+ {
98
+ "index": datasets.Value("string"),
99
+ "tokens": [datasets.Value("string")],
100
+ "tags": [datasets.Value("string")]
101
+ }
102
+ )
103
+
104
+ elif self.config.schema == "nusantara_seq_label":
105
+ features = schemas.seq_label_features(self.label_classes)
106
+
107
+ return datasets.DatasetInfo(
108
+ description=_DESCRIPTION,
109
+ features=features,
110
+ homepage=_HOMEPAGE,
111
+ license=_LICENSE,
112
+ citation=_CITATION,
113
+ )
114
+
115
+ def _get_fold_index(self):
116
+ try:
117
+ subset_id = self.config.subset_id
118
+ idx_fold = subset_id.index("_fold")
119
+ file_id = subset_id[(idx_fold + 5):]
120
+ return int(file_id)
121
+ except:
122
+ return 0
123
+
124
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
125
+ idx = self._get_fold_index()
126
+
127
+ urls = _URLS[_DATASETNAME]
128
+
129
+ for key in urls:
130
+ urls[key] = urls[key].format(fold_number=idx+1)
131
+
132
+ data_dir = dl_manager.download_and_extract(urls)
133
+
134
+ return [
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.TRAIN,
137
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
138
+ gen_kwargs={
139
+ "filepath": data_dir["train"],
140
+ "split": "train",
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.TEST,
145
+ gen_kwargs={
146
+ "filepath": data_dir["test"],
147
+ "split": "test",
148
+ },
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.VALIDATION,
152
+ gen_kwargs={
153
+ "filepath": data_dir["validation"],
154
+ "split": "dev",
155
+ },
156
+ ),
157
+ ]
158
+
159
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
160
+ conll_dataset = load_conll_data(filepath)
161
+
162
+ if self.config.schema == "source":
163
+ for i, row in enumerate(conll_dataset):
164
+ ex = {
165
+ "index": str(i),
166
+ "tokens": row["sentence"],
167
+ "tags": row["label"]
168
+ }
169
+ yield i, ex
170
+ elif self.config.schema == "nusantara_seq_label":
171
+ for i, row in enumerate(conll_dataset):
172
+ ex = {
173
+ "id": str(i),
174
+ "tokens": row["sentence"],
175
+ "labels": row["label"]
176
+ }
177
+ yield i, ex