system HF staff commited on
Commit
9ed6d98
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +136 -0
  3. cail2018.py +118 -0
  4. dataset_infos.json +1 -0
  5. dummy/1.0.0/dummy_data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - zh
8
+ licenses:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - n>1M
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - other
18
+ task_ids:
19
+ - other-other-judgement-prediction---
20
+ ---
21
+ # Dataset Card for CAIL 2018
22
+
23
+ ## Table of Contents
24
+ - [Dataset Description](#dataset-description)
25
+ - [Dataset Summary](#dataset-summary)
26
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
27
+ - [Languages](#languages)
28
+ - [Dataset Structure](#dataset-structure)
29
+ - [Data Instances](#data-instances)
30
+ - [Data Fields](#data-instances)
31
+ - [Data Splits](#data-instances)
32
+ - [Dataset Creation](#dataset-creation)
33
+ - [Curation Rationale](#curation-rationale)
34
+ - [Source Data](#source-data)
35
+ - [Annotations](#annotations)
36
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
37
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
38
+ - [Social Impact of Dataset](#social-impact-of-dataset)
39
+ - [Discussion of Biases](#discussion-of-biases)
40
+ - [Other Known Limitations](#other-known-limitations)
41
+ - [Additional Information](#additional-information)
42
+ - [Dataset Curators](#dataset-curators)
43
+ - [Licensing Information](#licensing-information)
44
+ - [Citation Information](#citation-information)
45
+
46
+ ## Dataset Description
47
+
48
+ - **Homepage:** [Github](https://github.com/thunlp/CAIL/blob/master/README_en.md)
49
+ - **Repository:** [Github](https://github.com/thunlp/CAIL)
50
+ - **Paper:** [Arxiv](https://arxiv.org/abs/1807.02478)
51
+ - **Leaderboard:**
52
+ - **Point of Contact:**
53
+
54
+ ### Dataset Summary
55
+
56
+ [More Information Needed]
57
+
58
+ ### Supported Tasks and Leaderboards
59
+
60
+ [More Information Needed]
61
+
62
+ ### Languages
63
+
64
+ [More Information Needed]
65
+
66
+ ## Dataset Structure
67
+
68
+ ### Data Instances
69
+
70
+ [More Information Needed]
71
+
72
+ ### Data Fields
73
+
74
+ [More Information Needed]
75
+
76
+ ### Data Splits
77
+
78
+ [More Information Needed]
79
+
80
+ ## Dataset Creation
81
+
82
+ ### Curation Rationale
83
+
84
+ [More Information Needed]
85
+
86
+ ### Source Data
87
+
88
+ #### Initial Data Collection and Normalization
89
+
90
+ [More Information Needed]
91
+
92
+ #### Who are the source language producers?
93
+
94
+ [More Information Needed]
95
+
96
+ ### Annotations
97
+
98
+ #### Annotation process
99
+
100
+ [More Information Needed]
101
+
102
+ #### Who are the annotators?
103
+
104
+ [More Information Needed]
105
+
106
+ ### Personal and Sensitive Information
107
+
108
+ [More Information Needed]
109
+
110
+ ## Considerations for Using the Data
111
+
112
+ ### Social Impact of Dataset
113
+
114
+ [More Information Needed]
115
+
116
+ ### Discussion of Biases
117
+
118
+ [More Information Needed]
119
+
120
+ ### Other Known Limitations
121
+
122
+ [More Information Needed]
123
+
124
+ ## Additional Information
125
+
126
+ ### Dataset Curators
127
+
128
+ [More Information Needed]
129
+
130
+ ### Licensing Information
131
+
132
+ [More Information Needed]
133
+
134
+ ### Citation Information
135
+
136
+ [More Information Needed]
cail2018.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import, division, print_function
2
+
3
+ import json
4
+ import os
5
+
6
+ import datasets
7
+
8
+
9
+ _CITATION = """\
10
+ @misc{xiao2018cail2018,
11
+ title={CAIL2018: A Large-Scale Legal Dataset for Judgment Prediction},
12
+ author={Chaojun Xiao and Haoxi Zhong and Zhipeng Guo and Cunchao Tu and Zhiyuan Liu and Maosong Sun and Yansong Feng and Xianpei Han and Zhen Hu and Heng Wang and Jianfeng Xu},
13
+ year={2018},
14
+ eprint={1807.02478},
15
+ archivePrefix={arXiv},
16
+ primaryClass={cs.CL}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ In this paper, we introduce Chinese AI and Law challenge dataset (CAIL2018),
22
+ the first large-scale Chinese legal dataset for judgment prediction. CAIL contains more than 2.6 million
23
+ criminal cases published by the Supreme People's Court of China, which are several times larger than other
24
+ datasets in existing works on judgment prediction. Moreover, the annotations of judgment results are more
25
+ detailed and rich. It consists of applicable law articles, charges, and prison terms, which are expected
26
+ to be inferred according to the fact descriptions of cases. For comparison, we implement several conventional
27
+ text classification baselines for judgment prediction and experimental results show that it is still a
28
+ challenge for current models to predict the judgment results of legal cases, especially on prison terms.
29
+ To help the researchers make improvements on legal judgment prediction.
30
+ """
31
+ _URL = "https://cail.oss-cn-qingdao.aliyuncs.com/CAIL2018_ALL_DATA.zip"
32
+
33
+
34
+ class Cail2018(datasets.GeneratorBasedBuilder):
35
+ VERSION = datasets.Version("1.0.0")
36
+
37
+ def _info(self):
38
+ features = datasets.Features(
39
+ {
40
+ "fact": datasets.Value("string"),
41
+ "relevant_articles": datasets.Sequence(datasets.Value("int32")),
42
+ "accusation": datasets.Sequence(datasets.Value("string")),
43
+ "punish_of_money": datasets.Value("float"),
44
+ "criminals": datasets.Sequence(datasets.Value("string")),
45
+ "death_penalty": datasets.Value("bool"),
46
+ "imprisonment": datasets.Value("float"),
47
+ "life_imprisonment": datasets.Value("bool"),
48
+ }
49
+ )
50
+ return datasets.DatasetInfo(
51
+ description=_DESCRIPTION,
52
+ features=features,
53
+ homepage="https://arxiv.org/abs/1807.02478",
54
+ citation=_CITATION,
55
+ )
56
+
57
+ def _split_generators(self, dl_manager):
58
+ """Returns SplitGenerators."""
59
+
60
+ dl_dir = dl_manager.download_and_extract(_URL)
61
+
62
+ return [
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split("exercise_contest_train"),
65
+ gen_kwargs={
66
+ "filepath": os.path.join(dl_dir, "final_all_data/exercise_contest/data_train.json"),
67
+ "split": "exercise_contest_train",
68
+ },
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split("exercise_contest_valid"),
72
+ gen_kwargs={
73
+ "filepath": os.path.join(dl_dir, "final_all_data/exercise_contest/data_valid.json"),
74
+ "split": "exercise_contest_valid",
75
+ },
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split("exercise_contest_test"),
79
+ gen_kwargs={
80
+ "filepath": os.path.join(dl_dir, "final_all_data/exercise_contest/data_test.json"),
81
+ "split": "exercise_contest_test",
82
+ },
83
+ ),
84
+ datasets.SplitGenerator(
85
+ name=datasets.Split("first_stage_train"),
86
+ gen_kwargs={
87
+ "filepath": os.path.join(dl_dir, "final_all_data/first_stage/train.json"),
88
+ "split": "first_stage_train",
89
+ },
90
+ ),
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split("first_stage_test"),
93
+ gen_kwargs={
94
+ "filepath": os.path.join(dl_dir, "final_all_data/first_stage/test.json"),
95
+ "split": "first_stage_test",
96
+ },
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split("final_test"),
100
+ gen_kwargs={"filepath": os.path.join(dl_dir, "final_all_data/final_test.json"), "split": "final_test"},
101
+ ),
102
+ ]
103
+
104
+ def _generate_examples(self, filepath, split):
105
+ """Yields examples."""
106
+ with open(filepath, encoding="utf-8") as f:
107
+ for idx, row in enumerate(f):
108
+ data = json.loads(row)
109
+ yield idx, {
110
+ "fact": data["fact"],
111
+ "relevant_articles": data["meta"]["relevant_articles"],
112
+ "accusation": data["meta"]["accusation"],
113
+ "punish_of_money": data["meta"]["punish_of_money"],
114
+ "criminals": data["meta"]["criminals"],
115
+ "death_penalty": data["meta"]["term_of_imprisonment"]["death_penalty"],
116
+ "imprisonment": data["meta"]["term_of_imprisonment"]["imprisonment"],
117
+ "life_imprisonment": data["meta"]["term_of_imprisonment"]["life_imprisonment"],
118
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "In this paper, we introduce Chinese AI and Law challenge dataset (CAIL2018),\nthe first large-scale Chinese legal dataset for judgment prediction. CAIL contains more than 2.6 million\ncriminal cases published by the Supreme People's Court of China, which are several times larger than other\ndatasets in existing works on judgment prediction. Moreover, the annotations of judgment results are more\ndetailed and rich. It consists of applicable law articles, charges, and prison terms, which are expected\nto be inferred according to the fact descriptions of cases. For comparison, we implement several conventional\ntext classification baselines for judgment prediction and experimental results show that it is still a\nchallenge for current models to predict the judgment results of legal cases, especially on prison terms.\nTo help the researchers make improvements on legal judgment prediction.\n", "citation": "@misc{xiao2018cail2018,\n title={CAIL2018: A Large-Scale Legal Dataset for Judgment Prediction}, \n author={Chaojun Xiao and Haoxi Zhong and Zhipeng Guo and Cunchao Tu and Zhiyuan Liu and Maosong Sun and Yansong Feng and Xianpei Han and Zhen Hu and Heng Wang and Jianfeng Xu},\n year={2018},\n eprint={1807.02478},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://arxiv.org/abs/1807.02478", "license": "", "features": {"fact": {"dtype": "string", "id": null, "_type": "Value"}, "relevant_articles": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "accusation": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "punish_of_money": {"dtype": "float32", "id": null, "_type": "Value"}, "criminals": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "death_penalty": {"dtype": "bool", "id": null, "_type": "Value"}, "imprisonment": {"dtype": "float32", "id": null, "_type": "Value"}, "life_imprisonment": {"dtype": "bool", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "cail2018", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"exercise_contest_train": {"name": "exercise_contest_train", "num_bytes": 220112732, "num_examples": 154592, "dataset_name": "cail2018"}, "exercise_contest_valid": {"name": "exercise_contest_valid", "num_bytes": 21702157, "num_examples": 17131, "dataset_name": "cail2018"}, "exercise_contest_test": {"name": "exercise_contest_test", "num_bytes": 41057634, "num_examples": 32508, "dataset_name": "cail2018"}, "first_stage_train": {"name": "first_stage_train", "num_bytes": 1779657510, "num_examples": 1710856, "dataset_name": "cail2018"}, "first_stage_test": {"name": "first_stage_test", "num_bytes": 244335194, "num_examples": 217016, "dataset_name": "cail2018"}, "final_test": {"name": "final_test", "num_bytes": 44194707, "num_examples": 35922, "dataset_name": "cail2018"}}, "download_checksums": {"https://cail.oss-cn-qingdao.aliyuncs.com/CAIL2018_ALL_DATA.zip": {"num_bytes": 984551626, "checksum": "3c05dfdade742f8b0d5e782d174475e7769448a5f407bfb7f14f0aed72d61d4a"}}, "download_size": 984551626, "post_processing_size": null, "dataset_size": 2351059934, "size_in_bytes": 3335611560}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67462601f56c71a49ba0a716690c8ca081621acbd2d53af131ba6ed9f30ffa9d
3
+ size 14736