Datasets:

Tasks:
Other
Languages:
Chinese
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
Tags:
judgement-prediction
License:
albertvillanova HF staff commited on
Commit
0288b0a
1 Parent(s): b689f32

Delete loading script

Browse files
Files changed (1) hide show
  1. cail2018.py +0 -116
cail2018.py DELETED
@@ -1,116 +0,0 @@
1
- import json
2
- import os
3
-
4
- import datasets
5
-
6
-
7
- _CITATION = """\
8
- @misc{xiao2018cail2018,
9
- title={CAIL2018: A Large-Scale Legal Dataset for Judgment Prediction},
10
- author={Chaojun Xiao and Haoxi Zhong and Zhipeng Guo and Cunchao Tu and Zhiyuan Liu and Maosong Sun and Yansong Feng and Xianpei Han and Zhen Hu and Heng Wang and Jianfeng Xu},
11
- year={2018},
12
- eprint={1807.02478},
13
- archivePrefix={arXiv},
14
- primaryClass={cs.CL}
15
- }
16
- """
17
-
18
- _DESCRIPTION = """\
19
- In this paper, we introduce Chinese AI and Law challenge dataset (CAIL2018),
20
- the first large-scale Chinese legal dataset for judgment prediction. CAIL contains more than 2.6 million
21
- criminal cases published by the Supreme People's Court of China, which are several times larger than other
22
- datasets in existing works on judgment prediction. Moreover, the annotations of judgment results are more
23
- detailed and rich. It consists of applicable law articles, charges, and prison terms, which are expected
24
- to be inferred according to the fact descriptions of cases. For comparison, we implement several conventional
25
- text classification baselines for judgment prediction and experimental results show that it is still a
26
- challenge for current models to predict the judgment results of legal cases, especially on prison terms.
27
- To help the researchers make improvements on legal judgment prediction.
28
- """
29
- _URL = "https://cail.oss-cn-qingdao.aliyuncs.com/CAIL2018_ALL_DATA.zip"
30
-
31
-
32
- class Cail2018(datasets.GeneratorBasedBuilder):
33
- VERSION = datasets.Version("1.0.0")
34
-
35
- def _info(self):
36
- features = datasets.Features(
37
- {
38
- "fact": datasets.Value("string"),
39
- "relevant_articles": datasets.Sequence(datasets.Value("int32")),
40
- "accusation": datasets.Sequence(datasets.Value("string")),
41
- "punish_of_money": datasets.Value("float"),
42
- "criminals": datasets.Sequence(datasets.Value("string")),
43
- "death_penalty": datasets.Value("bool"),
44
- "imprisonment": datasets.Value("float"),
45
- "life_imprisonment": datasets.Value("bool"),
46
- }
47
- )
48
- return datasets.DatasetInfo(
49
- description=_DESCRIPTION,
50
- features=features,
51
- homepage="https://arxiv.org/abs/1807.02478",
52
- citation=_CITATION,
53
- )
54
-
55
- def _split_generators(self, dl_manager):
56
- """Returns SplitGenerators."""
57
-
58
- dl_dir = dl_manager.download_and_extract(_URL)
59
-
60
- return [
61
- datasets.SplitGenerator(
62
- name=datasets.Split("exercise_contest_train"),
63
- gen_kwargs={
64
- "filepath": os.path.join(dl_dir, "final_all_data/exercise_contest/data_train.json"),
65
- "split": "exercise_contest_train",
66
- },
67
- ),
68
- datasets.SplitGenerator(
69
- name=datasets.Split("exercise_contest_valid"),
70
- gen_kwargs={
71
- "filepath": os.path.join(dl_dir, "final_all_data/exercise_contest/data_valid.json"),
72
- "split": "exercise_contest_valid",
73
- },
74
- ),
75
- datasets.SplitGenerator(
76
- name=datasets.Split("exercise_contest_test"),
77
- gen_kwargs={
78
- "filepath": os.path.join(dl_dir, "final_all_data/exercise_contest/data_test.json"),
79
- "split": "exercise_contest_test",
80
- },
81
- ),
82
- datasets.SplitGenerator(
83
- name=datasets.Split("first_stage_train"),
84
- gen_kwargs={
85
- "filepath": os.path.join(dl_dir, "final_all_data/first_stage/train.json"),
86
- "split": "first_stage_train",
87
- },
88
- ),
89
- datasets.SplitGenerator(
90
- name=datasets.Split("first_stage_test"),
91
- gen_kwargs={
92
- "filepath": os.path.join(dl_dir, "final_all_data/first_stage/test.json"),
93
- "split": "first_stage_test",
94
- },
95
- ),
96
- datasets.SplitGenerator(
97
- name=datasets.Split("final_test"),
98
- gen_kwargs={"filepath": os.path.join(dl_dir, "final_all_data/final_test.json"), "split": "final_test"},
99
- ),
100
- ]
101
-
102
- def _generate_examples(self, filepath, split):
103
- """Yields examples."""
104
- with open(filepath, encoding="utf-8") as f:
105
- for idx, row in enumerate(f):
106
- data = json.loads(row)
107
- yield idx, {
108
- "fact": data["fact"],
109
- "relevant_articles": data["meta"]["relevant_articles"],
110
- "accusation": data["meta"]["accusation"],
111
- "punish_of_money": data["meta"]["punish_of_money"],
112
- "criminals": data["meta"]["criminals"],
113
- "death_penalty": data["meta"]["term_of_imprisonment"]["death_penalty"],
114
- "imprisonment": data["meta"]["term_of_imprisonment"]["imprisonment"],
115
- "life_imprisonment": data["meta"]["term_of_imprisonment"]["life_imprisonment"],
116
- }