wonseok commited on
Commit
19b8851
1 Parent(s): b92b8f5

Upload lbox_open.py

Browse files
Files changed (1) hide show
  1. lbox_open.py +142 -0
lbox_open.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LBox
2
+ import json
3
+ import datasets
4
+
5
+ _CASENAME_CLASSIFICATION_FEATURES = {
6
+ "id": datasets.Value("int32"),
7
+ "casetype": datasets.Value("string"),
8
+ "casename": datasets.Value("string"),
9
+ "facts": datasets.Value("string"),
10
+ }
11
+
12
+ _STATUTE_CLASSIFICATION_FEATURES = {
13
+ "id": datasets.Value("int32"),
14
+ "casetype": datasets.Value("string"),
15
+ "casename": datasets.Value("string"),
16
+ "statutes": datasets.features.Sequence(datasets.Value("string")),
17
+ "facts": datasets.Value("string"),
18
+ }
19
+
20
+ _SUMMARIZATION_CLASSIFICATION_FEATURES = {
21
+ "id": datasets.Value("int32"),
22
+ "summary": datasets.Value("string"),
23
+ "precedent": datasets.Value("string"),
24
+ }
25
+
26
+ _CASE_CORPUS_FEATURES = {
27
+ "id": datasets.Value("int32"),
28
+ "precedent": datasets.Value("string"),
29
+ }
30
+
31
+
32
+ class LBoxOpenConfig(datasets.BuilderConfig):
33
+ """BuilderConfig for OpenLBox."""
34
+
35
+ def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
36
+ # Version history:
37
+ # 0.1.0: Initial version.
38
+ super(LBoxOpenConfig, self).__init__(version=datasets.Version("0.1.0"), **kwargs)
39
+ self.features = features
40
+ self.label_classes = label_classes
41
+ self.data_url = data_url
42
+ self.citation = citation
43
+ self.url = url
44
+
45
+
46
+ class LBoxOpen(datasets.GeneratorBasedBuilder):
47
+ """The Legal AI Benchmark dataset from Korean Legal Cases."""
48
+ BUILDER_CONFIGS = [
49
+ LBoxOpenConfig(
50
+ name="casename_classification",
51
+ description="",
52
+ features=_CASENAME_CLASSIFICATION_FEATURES,
53
+ data_url="https://lbox-open.s3.ap-northeast-2.amazonaws.com/precedent_benchmark_dataset/casename_classification/v0.1.0/",
54
+ citation="",
55
+ url="lbox.kr",
56
+ ),
57
+ LBoxOpenConfig(
58
+ name="statute_classification",
59
+ description="",
60
+ features=_STATUTE_CLASSIFICATION_FEATURES,
61
+ data_url="https://lbox-open.s3.ap-northeast-2.amazonaws.com/precedent_benchmark_dataset/statute_classification/v0.1.0/",
62
+ citation="",
63
+ url="lbox.kr",
64
+ ),
65
+ LBoxOpenConfig(
66
+ name="summarization",
67
+ description="",
68
+ features=_SUMMARIZATION_CLASSIFICATION_FEATURES,
69
+ data_url="https://lbox-open.s3.ap-northeast-2.amazonaws.com/precedent_benchmark_dataset/summarization/v0.1.0/",
70
+ citation="",
71
+ url="lbox.kr",
72
+ ),
73
+ LBoxOpenConfig(
74
+ name="case_corpus",
75
+ description="",
76
+ features=_CASE_CORPUS_FEATURES,
77
+ data_url="https://lbox-open.s3.ap-northeast-2.amazonaws.com/precedent_benchmark_dataset/case_corpus/v0.1.0/",
78
+ citation="",
79
+ url="lbox.kr",
80
+ ),
81
+ ]
82
+
83
+ def _info(self):
84
+ return datasets.DatasetInfo(
85
+ description="",
86
+ features=datasets.Features(self.config.features),
87
+ homepage=self.config.url,
88
+ citation="",
89
+ )
90
+
91
+ def _split_generators(self, dl_manager):
92
+ if self.config.name == "case_corpus":
93
+ dl_dir = {
94
+ "train": dl_manager.download_and_extract(f"{self.config.data_url}case_corpus-150k.jsonl") or "",
95
+ }
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={
101
+ "data_file": dl_dir["train"],
102
+ "split": datasets.Split.TRAIN,
103
+ },
104
+ )
105
+ ]
106
+
107
+ else:
108
+ dl_dir = {
109
+ "train": dl_manager.download_and_extract(f"{self.config.data_url}train.jsonl") or "",
110
+ "valid": dl_manager.download_and_extract(f"{self.config.data_url}valid.jsonl") or "",
111
+ "test": dl_manager.download_and_extract(f"{self.config.data_url}test.jsonl") or "",
112
+ }
113
+
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ gen_kwargs={
118
+ "data_file": dl_dir["train"],
119
+ "split": datasets.Split.TRAIN,
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ gen_kwargs={
125
+ "data_file": dl_dir["valid"],
126
+ "split": datasets.Split.VALIDATION,
127
+ },
128
+ ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.TEST,
131
+ gen_kwargs={
132
+ "data_file": dl_dir["test"],
133
+ "split": datasets.Split.TEST,
134
+ },
135
+ ),
136
+ ]
137
+
138
+ def _generate_examples(self, data_file, split):
139
+ with open(data_file, encoding="utf-8") as f:
140
+ for line in f:
141
+ row = json.loads(line)
142
+ yield row["id"], row