Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
5daaf92
1 Parent(s): 868500a

Upload indoler.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indoler.py +214 -0
indoler.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ https://github.com/ir-nlp-csui/indoler/tree/main
18
+ The dataset contains 993 annotated court decission document.
19
+ The document was taken from Decision of the Supreme Court of Indonesia.
20
+ The documents have also been tokenized and cleaned
21
+ """
22
+ import os
23
+ import json
24
+ from pathlib import Path
25
+ from typing import Dict, List, Tuple
26
+
27
+ import datasets
28
+
29
+ from seacrowd.utils import schemas
30
+ from seacrowd.utils.configs import SEACrowdConfig
31
+ from seacrowd.utils.constants import Tasks, Licenses
32
+
33
+ _CITATION = """\
34
+ @INPROCEEDINGS{9263157,
35
+ author={Nuranti, Eka Qadri and Yulianti, Evi},
36
+ booktitle={2020 International Conference on Advanced Computer Science and Information Systems (ICACSIS)},
37
+ title={Legal Entity Recognition in Indonesian Court Decision Documents Using Bi-LSTM and CRF Approaches},
38
+ year={2020},
39
+ volume={},
40
+ number={},
41
+ pages={429-434},
42
+ keywords={Xenon;6G mobile communication;legal processing;legal entity recognition;legal document;name entity recognition;ner;bi-lstm;lstm;crf},
43
+ doi={10.1109/ICACSIS51025.2020.9263157}}
44
+ """
45
+
46
+ _DATASETNAME = "indoler"
47
+
48
+ _DESCRIPTION = """\
49
+ https://github.com/ir-nlp-csui/indoler/tree/main
50
+ The data can be used for NER Task in legal documents.
51
+ The dataset contains 993 annotated court decission document.
52
+ The document was taken from Decision of the Supreme Court of Indonesia.
53
+ The documents have also been tokenized and cleaned
54
+ """
55
+
56
+ _HOMEPAGE = "https://github.com/ir-nlp-csui/indoler/tree/main"
57
+
58
+ _LANGUAGES = ['ind'] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
59
+
60
+ _LICENSE = Licenses.UNKNOWN.value
61
+
62
+ _LOCAL = False
63
+
64
+ _URLS = {
65
+ _DATASETNAME: {
66
+ "test_idx": "https://raw.githubusercontent.com/ir-nlp-csui/indoler/main/test.ids.csv",
67
+ "train_idx": "https://raw.githubusercontent.com/ir-nlp-csui/indoler/main/train.ids.csv",
68
+ "valid_idx": "https://raw.githubusercontent.com/ir-nlp-csui/indoler/main/val.ids.csv",
69
+ "full_data": "https://raw.githubusercontent.com/ir-nlp-csui/indoler/main/data.json"
70
+ },
71
+ }
72
+
73
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
74
+
75
+ _SOURCE_VERSION = "2.0.0"
76
+
77
+ _SEACROWD_VERSION = "2024.06.20"
78
+
79
+
80
+
81
+ class IndoLer(datasets.GeneratorBasedBuilder):
82
+ """https://github.com/ir-nlp-csui/indoler/tree/main
83
+ The data can be used for NER Task in legal documents
84
+ The dataset contains 993 annotated court decission document.
85
+ The document was taken from Decision of the Supreme Court of Indonesia.
86
+ The documents have also been tokenized and cleaned"""
87
+
88
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
89
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
90
+
91
+ BUILDER_CONFIGS = [
92
+ SEACrowdConfig(
93
+ name="indoler_source",
94
+ version=SOURCE_VERSION,
95
+ description="indoler source schema",
96
+ schema="source",
97
+ subset_id="indoler",
98
+ ),
99
+ SEACrowdConfig(
100
+ name="indoler_seacrowd_seq_label",
101
+ version=SEACROWD_VERSION,
102
+ description="indoler SEACrowd schema",
103
+ schema="seacrowd_seq_label",
104
+ subset_id="indoler",
105
+ ),
106
+ ]
107
+
108
+ DEFAULT_CONFIG_NAME = "indoler_source"
109
+
110
+ def _info(self) -> datasets.DatasetInfo:
111
+
112
+ NAMED_ENTITIES = ['O', 'B-Jenis Amar', 'B-Jenis Dakwaan', 'B-Jenis Perkara', 'B-Melanggar UU (Dakwaan)',
113
+ 'B-Melanggar UU (Pertimbangan Hukum)', 'B-Melanggar UU (Tuntutan)', 'B-Nama Hakim Anggota', 'B-Nama Hakim Ketua',
114
+ 'B-Nama Jaksa', 'B-Nama Panitera', 'B-Nama Pengacara', 'B-Nama Pengadilan',
115
+ 'B-Nama Saksi', 'B-Nama Terdakwa', 'B-Nomor Putusan', 'B-Putusan Hukuman',
116
+ 'B-Tanggal Kejadian', 'B-Tanggal Putusan', 'B-Tingkat Kasus', 'B-Tuntutan Hukuman',
117
+ 'I-Jenis Amar', 'I-Jenis Dakwaan', 'I-Jenis Perkara', 'I-Melanggar UU (Dakwaan)',
118
+ 'I-Melanggar UU (Pertimbangan Hukum)', 'I-Melanggar UU (Tuntutan)', 'I-Nama Hakim Anggota', 'I-Nama Hakim Ketua',
119
+ 'I-Nama Jaksa', 'I-Nama Panitera', 'I-Nama Pengacara', 'I-Nama Pengadilan',
120
+ 'I-Nama Saksi', 'I-Nama Terdakwa', 'I-Nomor Putusan', 'I-Putusan Hukuman',
121
+ 'I-Tanggal Kejadian', 'I-Tanggal Putusan', 'I-Tingkat Kasus', 'I-Tuntutan Hukuman']
122
+
123
+ if self.config.schema == "source":
124
+ features = datasets.Features({
125
+ "id": datasets.Value("string"),
126
+ "owner": datasets.Value("string"),
127
+ "lawyer": datasets.ClassLabel(names=[False, True]),
128
+ "verdict": datasets.ClassLabel(names=["guilty", "bebas", "lepas"]),
129
+ "indictment": datasets.ClassLabel(names=["NA", "tunggal", "subsider", "komul", "alternatif", "kombinasi", "gabungan"]),
130
+ "text-tags": datasets.Sequence(datasets.ClassLabel(names=NAMED_ENTITIES)),
131
+ "text": datasets.Sequence(datasets.Value("string")),
132
+ })
133
+ elif self.config.schema == "seacrowd_seq_label":
134
+ features = schemas.seq_label.features(NAMED_ENTITIES)
135
+ return datasets.DatasetInfo(
136
+ description=_DESCRIPTION,
137
+ features=features,
138
+ homepage=_HOMEPAGE,
139
+ license=_LICENSE,
140
+ citation=_CITATION,
141
+ )
142
+
143
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
144
+ """Returns SplitGenerators."""
145
+ urls = _URLS[_DATASETNAME]
146
+ test_path = dl_manager.download_and_extract(urls['test_idx'])
147
+ train_path = dl_manager.download_and_extract(urls['train_idx'])
148
+ valid_path = dl_manager.download_and_extract(urls['valid_idx'])
149
+ data_path = dl_manager.download_and_extract(urls['full_data'])
150
+
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "filepath": data_path,
156
+ "idx_path": train_path,
157
+ "split": "train",
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TEST,
162
+ gen_kwargs={
163
+ "filepath": data_path,
164
+ "idx_path": test_path,
165
+ "split": "test",
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.VALIDATION,
170
+ gen_kwargs={
171
+ "filepath": data_path,
172
+ "idx_path": valid_path,
173
+ "split": "validation",
174
+ },
175
+ ),
176
+ ]
177
+
178
+ def _generate_examples(self, filepath: Path, idx_path: Path, split: str) -> Tuple[int, Dict]:
179
+ """Yields examples as (key, example) tuples."""
180
+ split_idxs = []
181
+ with open(idx_path, 'r', encoding="utf-8") as indexes:
182
+ for index in indexes.readlines():
183
+ split_idxs.append(int(index))
184
+ with open(filepath, 'r', encoding="utf-8") as file:
185
+ contents = json.load(file)
186
+ counter = 0
187
+ for content in contents:
188
+ if int(content['id']) in split_idxs:
189
+ if self.config.schema == "source":
190
+ if content['indictment'] not in ["NA", "tunggal", "subsider", "komul", "alternatif", "kombinasi", "gabungan"]:
191
+ content['indictment'] = "NA"
192
+ yield(
193
+ counter,
194
+ {
195
+ "id" : content['id'],
196
+ "owner" : content['owner'],
197
+ "lawyer" : content['lawyer'],
198
+ "verdict" : content['verdict'],
199
+ "indictment": content['indictment'],
200
+ "text-tags" : content['text-tags'],
201
+ "text" : content['text'],
202
+ }
203
+ )
204
+ counter += 1
205
+ elif self.config.schema == "seacrowd_seq_label":
206
+ yield(
207
+ counter,
208
+ {
209
+ "id": content['id'],
210
+ "tokens": content['text'],
211
+ "labels": content['text-tags'],
212
+ }
213
+ )
214
+ counter += 1