bstds commited on
Commit
9bfab44
1 Parent(s): 05775d9

Create indo_law.py

Browse files
Files changed (1) hide show
  1. indo_law.py +189 -0
indo_law.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ from pathlib import Path
5
+ from typing import Dict, List, Tuple
6
+
7
+ from dataclasses import dataclass
8
+
9
+ import datasets
10
+ import json
11
+ import xml.etree.ElementTree as ET
12
+
13
+
14
+ _CITATION = """\
15
+ @article{nuranti2022predicting,
16
+ title={Predicting the Category and the Length of Punishment in Indonesian Courts Based on Previous Court Decision Documents},
17
+ author={Nuranti, Eka Qadri and Yulianti, Evi and Husin, Husna Sarirah},
18
+ journal={Computers},
19
+ volume={11},
20
+ number={6},
21
+ pages={88},
22
+ year={2022},
23
+ publisher={Multidisciplinary Digital Publishing Institute}
24
+ }
25
+ """
26
+
27
+ _LANGUAGES = ["id"]
28
+ _LOCAL = False
29
+
30
+ _DATASETNAME = "indo_law"
31
+
32
+ _DESCRIPTION = """\
33
+ This study presents predictions of first-level judicial decisions by utilizing a collection of Indonesian court decision documents.
34
+ We propose using multi-level learning, namely, CNN+attention, using decision document sections as features to predict the category and the length of punishment in Indonesian courts.
35
+ Our results demonstrate that the decision document sections that strongly affected the accuracy of the prediction model were prosecution history, facts, legal facts, and legal considerations.
36
+ """
37
+
38
+ _HOMEPAGE = ""
39
+
40
+ _LICENSE = "Unknown"
41
+
42
+ _URLS = {
43
+ _DATASETNAME: "https://github.com/ir-nlp-csui/indo-law/zipball/master",
44
+ }
45
+
46
+ _SOURCE_VERSION = "1.0.0"
47
+
48
+
49
+
50
+ @dataclass
51
+ class IndoLawConfig(datasets.BuilderConfig):
52
+
53
+ name: str = None
54
+ version: datasets.Version = None
55
+ description: str = None
56
+ schema: str = None
57
+ subset_id: str = None
58
+
59
+ class IndoLaw(datasets.GeneratorBasedBuilder):
60
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
61
+
62
+
63
+ _LABELS = ["pidana-khusus", "pidana-umum"]
64
+
65
+ BUILDER_CONFIGS = [
66
+ IndoLawConfig(
67
+ name="indo_law_source",
68
+ version=SOURCE_VERSION,
69
+ description="Indo-Law source schema",
70
+ schema="source",
71
+ subset_id="indo_law",
72
+ ),
73
+ IndoLawConfig(
74
+ name="indo_law_nusantara_text",
75
+ version=SOURCE_VERSION,
76
+ description="Indo-Law Nusantara schema",
77
+ schema="nusantara_text",
78
+ subset_id="indo_law",
79
+ ),
80
+ ]
81
+
82
+ DEFAULT_CONFIG_NAME = "indo_law_source"
83
+
84
+ def _get_features(self, label_names):
85
+ return datasets.Features(
86
+ {
87
+ "id": datasets.Value("string"),
88
+ "text": datasets.Value("string"),
89
+ "label": datasets.ClassLabel(names=label_names),
90
+ }
91
+ )
92
+
93
+ def _info(self) -> datasets.DatasetInfo:
94
+
95
+ if self.config.schema == "source":
96
+ features = datasets.Features(
97
+ {
98
+ "id": datasets.Value("string"),
99
+ "klasifikasi": datasets.Value("string"),
100
+ "sub_klasifikasi": datasets.Value("string"),
101
+ "paragraphs": datasets.Sequence({
102
+ "tag": datasets.Value("string"),
103
+ "value": datasets.Value("string"),
104
+ }),
105
+ }
106
+ )
107
+
108
+ elif self.config.schema == "nusantara_text":
109
+ features = self._get_features(self._LABELS)
110
+
111
+ return datasets.DatasetInfo(
112
+ description=_DESCRIPTION,
113
+ features=features,
114
+ homepage=_HOMEPAGE,
115
+ license=_LICENSE,
116
+ citation=_CITATION,
117
+ )
118
+
119
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[
120
+ datasets.SplitGenerator]:
121
+ urls = _URLS[_DATASETNAME]
122
+ data_dir = dl_manager.download_and_extract(urls)
123
+
124
+ data_dir = os.path.join(data_dir, "ir-nlp-csui-indo-law-6734033", "dataset")
125
+
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ gen_kwargs={
130
+ "filepath": data_dir,
131
+ "split": "train",
132
+ },
133
+ ),
134
+ ]
135
+
136
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
137
+ files = os.listdir(filepath)
138
+
139
+ results = []
140
+
141
+ for file in files:
142
+ data = self._parse_file(os.path.join(filepath, file))
143
+ results.append(data)
144
+
145
+ if self.config.schema == "source":
146
+ key = 0
147
+ for result in results:
148
+ example = {
149
+ "id": result["id"],
150
+ "klasifikasi": result["klasifikasi"],
151
+ "sub_klasifikasi": result["klasifikasi"],
152
+ "paragraphs": [],
153
+ }
154
+ for tag in result["paragraphs"]:
155
+ example["paragraphs"].append({
156
+ "tag": tag,
157
+ "value": result["paragraphs"][tag]
158
+ })
159
+ yield key, example
160
+ key += 1
161
+
162
+ elif self.config.schema == "nusantara_text":
163
+ key = 0
164
+ for result in results:
165
+ example = {
166
+ "id": result["id"],
167
+ "text": json.dumps(result["paragraphs"]),
168
+ "label": result["klasifikasi"],
169
+ }
170
+ yield key, example
171
+ key += 1
172
+
173
+ def _parse_file(self, file_path):
174
+ root = ET.parse(file_path).getroot()
175
+
176
+ data = {
177
+ "id": root.attrib["id"],
178
+ "klasifikasi": root.attrib["klasifikasi"],
179
+ "sub_klasifikasi": root.attrib["sub_klasifikasi"],
180
+ "paragraphs": {}
181
+ }
182
+
183
+ for child in root:
184
+ data["paragraphs"].update({
185
+ child.tag: child.text
186
+ })
187
+
188
+ return data
189
+